I want to use mixup for a multi-class image classification problem and simply added .mixup()
to my already working learner, but got the following error:
RuntimeError Traceback (most recent call last)
in ()
1 learn = cnn_learner(data,models.resnet18).mixup()
----> 2 learn.lr_find()/opt/conda/lib/python3.6/site-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
30 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
31 epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
—> 32 learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
33
34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
194 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
195 if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
–> 196 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
197
198 def create_opt(self, lr:Floats, wd:Floats=0.)->None:/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
97 cb_handler.on_epoch_begin()
98 for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
—> 99 xb, yb = cb_handler.on_batch_begin(xb, yb)
100 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
101 if cb_handler.on_batch_end(loss): break/opt/conda/lib/python3.6/site-packages/fastai/callback.py in on_batch_begin(self, xb, yb, train)
276 self.state_dict.update(dict(last_input=xb, last_target=yb, train=train,
277 stop_epoch=False, skip_step=False, skip_zero=False, skip_bwd=False))
–> 278 self(‘batch_begin’, mets = not self.state_dict[‘train’])
279 return self.state_dict[‘last_input’], self.state_dict[‘last_target’]
280/opt/conda/lib/python3.6/site-packages/fastai/callback.py in call(self, cb_name, call_mets, **kwargs)
248 if call_mets:
249 for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
–> 250 for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
251
252 def set_dl(self, dl:DataLoader):/opt/conda/lib/python3.6/site-packages/fastai/callback.py in _call_and_update(self, cb, cb_name, **kwargs)
238 def call_and_update(self, cb, cb_name, **kwargs)->None:
239 “Callcb_name
oncb
and update the inner state.”
–> 240 new = ifnone(getattr(cb, f’on{cb_name}’)(**self.state_dict, **kwargs), dict())
241 for k,v in new.items():
242 if k not in self.state_dict:/opt/conda/lib/python3.6/site-packages/fastai/callbacks/mixup.py in on_batch_begin(self, last_input, last_target, train, **kwargs)
26 new_input = (last_input * lambd.view(lambd.size(0),1,1,1) + x1 * (1-lambd).view(lambd.size(0),1,1,1))
27 if self.stack_y:
—> 28 new_target = torch.cat([last_target[:,None].float(), y1[:,None].float(), lambd[:,None].float()], 1)
29 else:
30 if len(last_target.shape) == 2:RuntimeError: invalid argument 0: Tensors must have same number of dimensions: got 2 and 3 at /opt/conda/conda-bld/pytorch_1549630534704/work/aten/src/THC/generic/THCTensorMath.cu:74
Is there some bug or am I doing something wrong?