Getting error in simple tabular classification

I had not used fastai for a while, I recently updated to latest version. I am using the following code which seems pretty standard for a simple tabular classification problem. I am using everything straight from the fastai tabular tutorial, I don’t do any fancy loss or anything.

procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_df(df_final, procs=procs, cat_names=cat_names, cont_names=cont_names, 
                                 y_names="Churn", y_block=CategoryBlock(), valid_idx=df_notshared[test_mask].index.tolist(), bs=1024, n_jobs=8)
learn = tabular_learner(dls, metrics=[accuracy, Precision, Recall])
learn = learn.to_fp16()
learn.fit_one_cycle(10)

But then I get the following error when running this:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-10-184a41e6b7e2> in <module>
----> 1 learn.fit_one_cycle(10)

c:\work\ml\fastai\fastai\callback\schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
    110     scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
    111               'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 112     self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
    113 
    114 # Cell

c:\work\ml\fastai\fastai\learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
    216             self.opt.set_hypers(lr=self.lr if lr is None else lr)
    217             self.n_epoch = n_epoch
--> 218             self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
    219 
    220     def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None

c:\work\ml\fastai\fastai\learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

c:\work\ml\fastai\fastai\learner.py in _do_fit(self)
    207         for epoch in range(self.n_epoch):
    208             self.epoch=epoch
--> 209             self._with_events(self._do_epoch, 'epoch', CancelEpochException)
    210 
    211     def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):

c:\work\ml\fastai\fastai\learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

c:\work\ml\fastai\fastai\learner.py in _do_epoch(self)
    201 
    202     def _do_epoch(self):
--> 203         self._do_epoch_train()
    204         self._do_epoch_validate()
    205 

c:\work\ml\fastai\fastai\learner.py in _do_epoch_train(self)
    193     def _do_epoch_train(self):
    194         self.dl = self.dls.train
--> 195         self._with_events(self.all_batches, 'train', CancelTrainException)
    196 
    197     def _do_epoch_validate(self, ds_idx=1, dl=None):

c:\work\ml\fastai\fastai\learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

c:\work\ml\fastai\fastai\learner.py in all_batches(self)
    164     def all_batches(self):
    165         self.n_iter = len(self.dl)
--> 166         for o in enumerate(self.dl): self.one_batch(*o)
    167 
    168     def _do_one_batch(self):

c:\work\ml\fastai\fastai\learner.py in one_batch(self, i, b)
    189         b = self._set_device(b)
    190         self._split(b)
--> 191         self._with_events(self._do_one_batch, 'batch', CancelBatchException)
    192 
    193     def _do_epoch_train(self):

c:\work\ml\fastai\fastai\learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

c:\work\ml\fastai\fastai\learner.py in _do_one_batch(self)
    170         self('after_pred')
    171         if len(self.yb):
--> 172             self.loss_grad = self.loss_func(self.pred, *self.yb)
    173             self.loss = self.loss_grad.clone()
    174         self('after_loss')

c:\work\ml\fastai\fastai\losses.py in __call__(self, inp, targ, **kwargs)
     33         if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long()
     34         if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1)
---> 35         return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs)
     36 
     37 # Cell

~\miniconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

~\miniconda3\envs\fastai\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
   1045     def forward(self, input: Tensor, target: Tensor) -> Tensor:
   1046         assert self.weight is None or isinstance(self.weight, Tensor)
-> 1047         return F.cross_entropy(input, target, weight=self.weight,
   1048                                ignore_index=self.ignore_index, reduction=self.reduction)
   1049 

~\miniconda3\envs\fastai\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
   2678     """
   2679     if has_torch_function_variadic(input, target):
-> 2680         return handle_torch_function(
   2681             cross_entropy,
   2682             (input, target),

~\miniconda3\envs\fastai\lib\site-packages\torch\overrides.py in handle_torch_function(public_api, relevant_args, *args, **kwargs)
   1200         # Use `public_api` instead of `implementation` so __torch_function__
   1201         # implementations can do equality/identity comparisons.
-> 1202         result = overloaded_arg.__torch_function__(public_api, types, args, kwargs)
   1203 
   1204         if result is not NotImplemented:

c:\work\ml\fastai\fastai\torch_core.py in __torch_function__(self, func, types, args, kwargs)
    330         convert=False
    331         if _torch_handled(args, self._opt, func): convert,types = type(self),(torch.Tensor,)
--> 332         res = super().__torch_function__(func, types, args=args, kwargs=kwargs)
    333         if convert: res = convert(res)
    334         if isinstance(res, TensorBase): res.set_meta(self, as_copy=True)

~\miniconda3\envs\fastai\lib\site-packages\torch\tensor.py in __torch_function__(cls, func, types, args, kwargs)
    960 
    961         with _C.DisableTorchFunction():
--> 962             ret = func(*args, **kwargs)
    963             return _convert(ret, cls)
    964 

~\miniconda3\envs\fastai\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
   2691     if size_average is not None or reduce is not None:
   2692         reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2693     return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
   2694 
   2695 

~\miniconda3\envs\fastai\lib\site-packages\torch\nn\functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
   2386         )
   2387     if dim == 2:
-> 2388         ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
   2389     elif dim == 4:
   2390         ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

RuntimeError: expected scalar type Long but found Float

Found the problem… My y_name variable was in the cont_names list. So it was getting normalized.

2 Likes