Error running DataLoader on Google Cloud

From Chapter 11 ‘Mid-level Data API’, I ran the following on my google cloud notebook and got an error.

from fastai.text.all import *
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')

I got a file not found error.

---------------------------------------------------------------------------
FileNotFoundError                         Traceback (most recent call last)
/tmp/ipykernel_2134/2545780926.py in <module>
      1 from fastai.text.all import *
      2 
----> 3 dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')

/opt/conda/lib/python3.7/site-packages/fastai/text/data.py in from_folder(cls, path, train, valid, valid_pct, seed, vocab, text_vocab, is_lm, tok_tfm, seq_len, backwards, **kwargs)
    250         "Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
    251         splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
--> 252         blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len, backwards, tok=tok_tfm)]
    253         if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
    254         get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files

/opt/conda/lib/python3.7/site-packages/fastai/text/data.py in from_folder(cls, path, vocab, is_lm, seq_len, backwards, min_freq, max_vocab, **kwargs)
    238     def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
    239         "Build a `TextBlock` from a `path`"
--> 240         return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
    241                    backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
    242 

/opt/conda/lib/python3.7/site-packages/fastai/text/core.py in from_folder(cls, path, tok, rules, **kwargs)
    276         if tok is None: tok = WordTokenizer()
    277         output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
--> 278         res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
    279                   lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
    280         res.path,res.output_dir = path,output_dir

/opt/conda/lib/python3.7/site-packages/fastcore/xtras.py in load_pickle(fn)
    203 def load_pickle(fn):
    204     "Load a pickle file from a file name or opened file"
--> 205     with open_file(fn, 'rb') as f: return pickle.load(f)
    206 
    207 # Cell

/opt/conda/lib/python3.7/site-packages/fastcore/xtras.py in open_file(fn, mode, **kwargs)
    193     elif fn.suffix=='.gz' : return gzip.GzipFile(fn, mode, **kwargs)
    194     elif fn.suffix=='.zip': return zipfile.ZipFile(fn, mode, **kwargs)
--> 195     else: return open(fn,mode, **kwargs)
    196 
    197 # Cell

FileNotFoundError: [Errno 2] No such file or directory: '/home/jupyter/.fastai/data/imdb_tok/counter.pkl'

Hi Kevin
%matplotlib inline
%reload_ext autoreload
!pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
from fastai import *
from fastai.text.all import *
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid=‘test’)

This works for me.
Silly questions do you have a Google Drive. Did it prompt you to sign in. Do you have enough free space?

Regards Conwyn

This did not work for me… Kindly assist . I am still receiving the same error:

FileNotFoundError: [Errno 2] No such file or directory: ‘/home/jupyter/.fastai/data/imdb_tok/counter.pkl’