No such file or directory: '/home/ahmad/multifit/data/wiki/fa-2/models/fsp15k/spm/spm.vocab'

I tried to run MLDoc-JA-multifit_fp16.ipynb notebook of multifit project for Persian.

It’s in:

I created a data directory named fa-2 by running prepare_wiki.sh fa
and changed ja-100 to fa-2, but when I run it, I get the following error:

FileNotFoundError                         Traceback (most recent call last)

~/multifit/notebooks/ml.py in
50
51
—> 52 wiki_dataset.load_lm_databunch(bs=128,bptt=70).show_batch()
53
54

~/multifit/multifit/datasets/dataset.py in load_lm_databunch(self, bs, bptt)
208 data_loader=self.load_unsupervised_data,
209 bptt=bptt,
–> 210 bs=bs)
211
212 with (self.cache_path / “itos.pkl”).open(‘wb’) as f:

~/multifit/multifit/datasets/dataset.py in load_n_cache_databunch(self, name, bunch_class, data_loader, bs, **args)
252 print(f"Running tokenization: ‘{name}’ …")
253 train_df, valid_df = data_loader()
–> 254 databunch = self.databunch_from_df(bunch_class, train_df, valid_df, **args)
255 databunch.save(name)
256 print(f"Data {name}, trn: {len(databunch.train_ds)}, val: {len(databunch.valid_ds)}")

~/multifit/multifit/datasets/dataset.py in databunch_from_df(self, bunch_class, train_df, valid_df, **args)
265 mark_fields=True,
266 text_cols=list(train_df.columns.values)[1:],
–> 267 **args)
268 return databunch
269

~/multifit/fastai_contrib/text_data.py in make_data_bunch_from_df(cls, path, train_df, valid_df, tokenizer, vocab, classes, text_cols, label_cols, label_delim, chunksize, max_vocab, min_freq, mark_fields, include_bos, include_eos, processor, **kwargs)
146 TextList.from_df(valid_df, path, cols=text_cols, processor=processor))
147 if cls == TextLMDataBunch:
–> 148 src = src.label_for_lm()
149 else:
150 if label_delim is not None:

~/miniconda3/lib/python3.7/site-packages/fastai/data_block.py in _inner(*args, **kwargs)
478 self.valid = fv(*args, from_item_lists=True, **kwargs)
479 self.class = LabelLists
–> 480 self.process()
481 return self
482 return _inner

~/miniconda3/lib/python3.7/site-packages/fastai/data_block.py in process(self)
532 “Process the inner datasets.”
533 xp,yp = self.get_processors()
–> 534 for ds,n in zip(self.lists, [‘train’,‘valid’,‘test’]): ds.process(xp, yp, name=n)
535 #progress_bar clear the outputs so in some case warnings issued during processing disappear.
536 for ds in self.lists:

~/miniconda3/lib/python3.7/site-packages/fastai/data_block.py in process(self, xp, yp, name, max_warn_items)
712 p.warns = []
713 self.x,self.y = self.x[~filt],self.y[~filt]
–> 714 self.x.process(xp)
715 return self
716

~/miniconda3/lib/python3.7/site-packages/fastai/data_block.py in process(self, processor)
82 if processor is not None: self.processor = processor
83 self.processor = listify(self.processor)
—> 84 for p in self.processor: p.process(self)
85 return self
86

~/multifit/fastai_contrib/text_data.py in process(self, ds)
122 class SPProcessor2(SPProcessor):
123 def process(self, ds):
–> 124 super().process(ds)
125 ds.vocab.sp_model = self.sp_model
126 ds.vocab.sp_vocab = self.sp_vocab

~/miniconda3/lib/python3.7/site-packages/fastai/text/data.py in process(self, ds)
466 self.sp_model,self.sp_vocab = cache_dir/‘spm.model’,cache_dir/‘spm.vocab’
467 if not getattr(self, ‘vocab’, False):
–> 468 with open(self.sp_vocab, ‘r’, encoding=self.enc) as f: self.vocab = Vocab([line.split(’\t’)[0] for line in f.readlines()])
469 if self.n_cpus <= 1: ds.items = self._encode_batch(ds.items)
470 else:

FileNotFoundError: [Errno 2] No such file or directory: ‘/home/ahmad/multifit/data/wiki/fa-2/models/fsp15k/spm/spm.vocab’

In fact spm.vocab wasn’t created!