RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory

TLDR; Delete the .pth file that was cached

Here’s where mine was located: /home/user/.cache/torch/hub/checkpoints/convnext_tiny_22k_224.pth

I was trying to use convnext_tiny_22k_224 from timm and I kept getting this stack trace:

RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_5929/ in <module>
----> 1 learn = train(path, config)

/tmp/ipykernel_5929/ in train(path, config)
     22                             metrics=[accuracy, F1Score(average='macro')],
     23                             cbs=WandbCallback,
---> 24                             wd=config.wd
     25                            )
     26 #         if config.reduce_lr: learn = learn.add_cb(ReduceLROnPlateau())

~/.local/lib/python3.7/site-packages/fastai/vision/ in vision_learner(dls, arch, normalize, n_out, pretrained, loss_func, opt_func, lr, splitter, cbs, metrics, path, model_dir, wd, wd_bn_bias, train_bn, moms, cut, init, custom_head, concat_pool, pool, lin_ftrs, ps, first_bn, bn_final, lin_first, y_range, **kwargs)
    214                       first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range, **kwargs)
    215     if normalize: _add_norm(dls, meta, pretrained)
--> 216     if isinstance(arch, str): model = create_timm_model(arch, n_out, default_split, pretrained, **model_args)
    217     else: model = create_vision_model(arch, n_out, pretrained=pretrained, **model_args)

~/.local/lib/python3.7/site-packages/fastai/vision/ in create_timm_model(arch, n_out, cut, pretrained, n_in, init, custom_head, concat_pool, pool, lin_ftrs, ps, first_bn, bn_final, lin_first, y_range, **kwargs)
    185                      concat_pool=True, pool=True, lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None, **kwargs):
    186     "Create custom architecture using `arch`, `n_in` and `n_out` from the `timm` library"
--> 187     body = TimmBody(arch, pretrained, None, n_in, **kwargs)
    188     nf = body.model.num_features
    189     return add_head(body, nf, n_out, init=init, head=custom_head, concat_pool=concat_pool, pool=body.needs_pool,

~/.local/lib/python3.7/site-packages/fastai/vision/ in __init__(self, arch, pretrained, cut, n_in, **kwargs)
    175     def __init__(self, arch:str, pretrained:bool=True, cut=None, n_in:int=3, **kwargs):
    176         super().__init__()
--> 177         model = timm.create_model(arch, pretrained=pretrained, num_classes=0, in_chans=n_in, **kwargs)
    178         self.needs_pool = model.default_cfg.get('pool_size', None)
    179         self.model = model if cut is None else cut_model(model, cut)

~/.local/lib/python3.7/site-packages/timm/models/ in create_model(model_name, pretrained, pretrained_cfg, checkpoint_path, scriptable, exportable, no_jit, **kwargs)
     69     create_fn = model_entrypoint(model_name)
     70     with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
---> 71         model = create_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg, **kwargs)
     73     if checkpoint_path:

~/.local/lib/python3.7/site-packages/timm/models/ in convnext_tiny_in22k(pretrained, **kwargs)
    493 def convnext_tiny_in22k(pretrained=False, **kwargs):
    494     model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs)
--> 495     model = _create_convnext('convnext_tiny_in22k', pretrained=pretrained, **model_args)
    496     return model

~/.local/lib/python3.7/site-packages/timm/models/ in _create_convnext(variant, pretrained, **kwargs)
    366         pretrained_filter_fn=checkpoint_filter_fn,
    367         feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
--> 368         **kwargs)
    369     return model

~/.local/lib/python3.7/site-packages/timm/models/ in build_model_with_cfg(model_cls, variant, pretrained, pretrained_cfg, model_cfg, feature_cfg, pretrained_strict, pretrained_filter_fn, pretrained_custom_load, kwargs_filter, **kwargs)
    549                 in_chans=kwargs.get('in_chans', 3),
    550                 filter_fn=pretrained_filter_fn,
--> 551                 strict=pretrained_strict)
    553     # Wrap the model in a feature extraction module if enabled

~/.local/lib/python3.7/site-packages/timm/models/ in load_pretrained(model, pretrained_cfg, num_classes, in_chans, filter_fn, strict)
    244'Loading pretrained weights from url ({pretrained_loc})')
    245         state_dict = load_state_dict_from_url(
--> 246             pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH)
    247     elif load_from == 'hf-hub':
    248'Loading pretrained weights from Hugging Face hub ({pretrained_loc})')

/opt/conda/lib/python3.7/site-packages/torch/ in load_state_dict_from_url(url, model_dir, map_location, progress, check_hash, file_name)
    588     if _is_legacy_zip_format(cached_file):
    589         return _legacy_zip_load(cached_file, model_dir, map_location)
--> 590     return torch.load(cached_file, map_location=map_location)

/opt/conda/lib/python3.7/site-packages/torch/ in load(f, map_location, pickle_module, **pickle_load_args)
    598             # reset back to the original position.
    599             orig_position = opened_file.tell()
--> 600             with _open_zipfile_reader(opened_file) as opened_zipfile:
    601                 if _is_torchscript_zip(opened_zipfile):
    602                     warnings.warn("'torch.load' received a zip file that looks like a TorchScript archive"

/opt/conda/lib/python3.7/site-packages/torch/ in __init__(self, name_or_buffer)
    240 class _open_zipfile_reader(_opener):
    241     def __init__(self, name_or_buffer) -> None:
--> 242         super(_open_zipfile_reader, self).__init__(torch._C.PyTorchFileReader(name_or_buffer))

RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory

I finally figured out what I was doing wrong. When I had initially started downloaded the file, I had stopped it before it fully downloaded so the .pth file was incomplete and not downloaded properly. Once I removed that file and allowed it to redownload, the issue resolved itself! Hope this helps somebody else in a similar situation