Another Dataloader issue: TypeError: unsupported operand type(s) for *: ‘NoneType’ and ‘int’
from fastai.nlp import *
PATH=‘data/aclImdb/’
names = [‘neg’,‘pos’]
trn,trn_y = texts_labels_from_folders(f’{PATH}train’,names)
val,val_y = texts_labels_from_folders(f’{PATH}test’,names)
veczr = CountVectorizer(tokenizer=tokenize)
trn_term_doc = veczr.fit_transform(trn)
val_term_doc = veczr.transform(val)
md = TextClassifierData.from_bow(trn_term_doc, trn_y, val_term_doc, val_y, 2000)
learner = md.dotprod_nb_learner()
learner.fit(0.02, 1, wds=1e-6, cycle_len=1)
Epoch
0% 0/1 [00:00<?, ?it/s]
0%| | 0/391 [00:00<?, ?it/s]
TypeError Traceback (most recent call last)
in ()
----> 1 learner.fit(0.02, 1, wds=1e-6, cycle_len=1)
~/fastai/courses/ml1/fastai/learner.py in fit(self, lrs, n_cycle, wds, **kwargs)
250 self.sched = None
251 layer_opt = self.get_layer_opt(lrs, wds)
–> 252 return self.fit_gen(self.model, self.data, layer_opt, n_cycle, **kwargs)
253
254 def warm_up(self, lr, wds=None):
~/fastai/courses/ml1/fastai/learner.py in fit_gen(self, model, data, layer_opt, n_cycle, cycle_len, cycle_mult, cycle_save_name, best_save_name, use_clr, use_clr_beta, metrics, callbacks, use_wd_sched, norm_wds, wds_sched_mult, **kwargs)
197 n_epoch = sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle)
198 return fit(model, data, n_epoch, layer_opt.opt, self.crit,
–> 199 metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, **kwargs)
200
201 def get_layer_groups(self): return self.models.get_layer_groups()
~/fastai/courses/ml1/fastai/model.py in fit(model, data, epochs, opt, crit, metrics, callbacks, stepper, **kwargs)
120 i = 0
121 if all_val: val_iter = IterBatch(data.val_dl)
–> 122 for (*x,y) in t:
123 batch_num += 1
124 for cb in callbacks: cb.on_batch_begin()
~/anaconda3/lib/python3.6/site-packages/tqdm/_tqdm.py in iter(self)
953 “”", fp_write=getattr(self.fp, ‘write’, sys.stderr.write))
954
–> 955 for obj in iterable:
956 yield obj
957 # Update and possibly print the progressbar.
~/fastai/courses/ml1/fastai/dataloader.py in iter(self)
81 with ThreadPoolExecutor(max_workers=self.num_workers) as e:
82 # avoid py3.6 issue where queue is infinite and can result in memory exhaustion
—> 83 for c in chunk_iter(iter(self.batch_sampler), self.num_workers*10):
84 for batch in e.map(self.get_batch, c): yield get_tensor(batch, self.pin_memory)
85
TypeError: unsupported operand type(s) for *: ‘NoneType’ and ‘int’