NotImplementedError when running learner.fit of lesson4 on Colab

Hello,
I wanted to train my own NLP learner by using the code of lesson4 notebook.
I used LanguageModelData.from_dataframes for the preprocessing, everything else is the same.

But when I executed NotImplementedError, I had a NotImplementedError.
I am not capable to find the problem.
Could you please tell me what’s going wrong?


NotImplementedError Traceback (most recent call last)
in ()
----> 1 learner.fit(3e-3, 1, wds=1e-6, cycle_len=10)

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in fit(self, lrs, n_cycle, wds, **kwargs)
97 self.sched = None
98 layer_opt = self.get_layer_opt(lrs, wds)
—> 99 self.fit_gen(self.model, self.data, layer_opt, n_cycle, **kwargs)
100
101 def lr_find(self, start_lr=1e-5, end_lr=10, wds=None):

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in fit_gen(self, model, data, layer_opt, n_cycle, cycle_len, cycle_mult, cycle_save_name, metrics, callbacks, **kwargs)
87 n_epoch = sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle)
88 fit(model, data, n_epoch, layer_opt.opt, self.crit,
—> 89 metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, **kwargs)
90
91 def get_layer_groups(self): return self.models.get_layer_groups()

/usr/local/lib/python3.6/dist-packages/fastai/model.py in fit(model, data, epochs, opt, crit, metrics, callbacks, kwargs)
82 for (*x,y) in t:
83 batch_num += 1
—> 84 loss = stepper.step(V(x),V(y))
85 avg_loss = avg_loss * avg_mom + loss * (1-avg_mom)
86 debias_loss = avg_loss / (1 - avg_mom
batch_num)

/usr/local/lib/python3.6/dist-packages/fastai/model.py in step(self, xs, y)
38 def step(self, xs, y):
39 xtra = []
—> 40 output = self.m(*xs)
41 if isinstance(output,(tuple,list)): output,*xtra = output
42 self.opt.zero_grad()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
–> 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
—> 91 input = module(input)
92 return input
93

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
–> 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/fastai/lm_rnn.py in forward(self, input)
39
40 def forward(self, input):
—> 41 emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
42 emb = self.dropouti(emb)
43

/usr/local/lib/python3.6/dist-packages/fastai/rnn_reg.py in embedded_dropout(embed, words, dropout, scale)
54 padding_idx = embed.padding_idx
55 if padding_idx is None: padding_idx = -1
—> 56 X = embed._backend.Embedding.apply(words, masked_embed_weight,
57 padding_idx, embed.max_norm, embed.norm_type,
58 embed.scale_grad_by_freq, embed.sparse

/usr/local/lib/python3.6/dist-packages/torch/nn/backends/backend.py in getattr(self, name)
8 fn = self.function_classes.get(name)
9 if fn is None:
—> 10 raise NotImplementedError
11 return fn
12

NotImplementedError:

1 Like

Yes I have the same issue.

Hi, were you able to solve it ?

Hello, yes, I ran the whole process on Colab, here is my blogpost explaining the whole process and I added the code at the end: