If I disable the GPU, to trace back the error, I get this:
RuntimeError Traceback (most recent call last)
<ipython-input-19-f2e08e2ffc17> in <module>()
----> 1 learn.lr_find(); learn.recorder.plot()
/usr/local/lib/python3.6/dist-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
30 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
31 a = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
33
34 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
176 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
177 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 178 callbacks=self.callbacks+callbacks)
179
180 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/usr/local/lib/python3.6/dist-packages/fastai/utils/mem.py in wrapper(*args, **kwargs)
83
84 try:
---> 85 return func(*args, **kwargs)
86 except Exception as e:
87 if "CUDA out of memory" in str(e) or tb_clear_frames=="1":
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
98 except Exception as e:
99 exception = e
--> 100 raise e
101 finally: cb_handler.on_train_end(exception)
102
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
88 for xb,yb in progress_bar(data.train_dl, parent=pbar):
89 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 90 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
91 if cb_handler.on_batch_end(loss): break
92
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
22
23 if not loss_func: return to_detach(out), yb[0].detach()
---> 24 loss = loss_func(out, *yb)
25
26 if opt is not None:
/usr/local/lib/python3.6/dist-packages/fastai/layers.py in __call__(self, input, target, **kwargs)
229 if self.floatify: target = target.float()
230 input = input.view(-1,input.shape[-1]) if self.is_2d else input.view(-1)
--> 231 return self.func.__call__(input, target.view(-1), **kwargs)
232
233 def CrossEntropyFlat(*args, axis:int=-1, **kwargs):
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
902 def forward(self, input, target):
903 return F.cross_entropy(input, target, weight=self.weight,
--> 904 ignore_index=self.ignore_index, reduction=self.reduction)
905
906
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
1968 if size_average is not None or reduce is not None:
1969 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 1970 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
1971
1972
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1788 .format(input.size(0), target.size(0)))
1789 if dim == 2:
-> 1790 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
1791 elif dim == 4:
1792 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at /pytorch/aten/src/THNN/generic/ClassNLLCriterion.c:93
So it is not choosing the right loss as it should be a Binary Cross entropy (predicting beweent 0 and 1)