I am not sure if this notebook is from the 2017 vintage or the 2018 vintage, however I am posting this in this discussion.
I the cell after defining md = TextClassifierData.from_bow(…
we define
learner = md.dotprod_nb_learner() and then run
learner.fit(0.02, 1, wds=1e-5, cycle_len=1)
It is here I run into an error (full trace below):
TypeError Traceback (most recent call last)
in ()
1 learner = md.dotprod_nb_learner()
----> 2 learner.fit(0.02, 1, wds=1e-5, cycle_len=1)
~/Documents/fastai/courses/dl1/fastai/learner.py in fit(self, lrs, n_cycle, wds, **kwargs)
223 self.sched = None
224 layer_opt = self.get_layer_opt(lrs, wds)
–> 225 return self.fit_gen(self.model, self.data, layer_opt, n_cycle, **kwargs)
226
227 def warm_up(self, lr, wds=None):
~/Documents/fastai/courses/dl1/fastai/learner.py in fit_gen(self, model, data, layer_opt, n_cycle, cycle_len, cycle_mult, cycle_save_name, best_save_name, use_clr, metrics, callbacks, use_wd_sched, norm_wds, wds_sched_mult, **kwargs)
170 n_epoch = sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle)
171 return fit(model, data, n_epoch, layer_opt.opt, self.crit,
–> 172 metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, **kwargs)
173
174 def get_layer_groups(self): return self.models.get_layer_groups()
~/Documents/fastai/courses/dl1/fastai/model.py in fit(model, data, epochs, opt, crit, metrics, callbacks, stepper, **kwargs)
91 t = tqdm(iter(data.trn_dl), leave=False, total=num_batch)
92 i = 0
—> 93 for (*x,y) in t:
94 batch_num += 1
95 for cb in callbacks: cb.on_batch_begin()
~/anaconda3/envs/py36/lib/python3.6/site-packages/tqdm/_tqdm.py in iter(self)
895 “”", fp_write=getattr(self.fp, ‘write’, sys.stderr.write))
896
–> 897 for obj in iterable:
898 yield obj
899 # Update and possibly print the progressbar.
~/Documents/fastai/courses/dl1/fastai/dataloader.py in iter(self)
81 with ThreadPoolExecutor(max_workers=self.num_workers) as e:
82 # avoid py3.6 issue where queue is infinite and can result in memory exhaustion
—> 83 for c in chunk_iter(iter(self.batch_sampler), self.num_workers*10):
84 for batch in e.map(self.get_batch, c): yield get_tensor(batch, self.pin_memory)
85
Those of you who can debug well, can you figure out what is going wrong?