Prediction of a scalar with a CNN

I’m trying to implement ‘BBox only’ part of lesson 8 from 2018 course using fastai2. I’m not sure if it’s relevant but posting it here just because I’m getting the exact same error:

My implementation:

def get_bbox_dls(df,sz=128,bs=128):
  getters = [lambda o: path/'train'/o,\
             lambda o: img2bbox[o],
             lambda o: ['']]
  dblock = DataBlock(
      blocks=(ImageBlock,BBoxBlock, BBoxLblBlock),
      get_items=get_train_imgs,
      getters=getters,n_inp=1,
      splitter=RandomSplitter(seed=47),
      item_tfms=Resize(sz,method='squish'),
      batch_tfms=[*aug_transforms(),Normalize.from_stats(*imagenet_stats)])
  return dblock.dataloaders(df,bs=bs)

img2bbox has the mapping of img_path to bbox of largest object. Using BBoxLblBox to adapt underlying bb_pad implementation but model isn’t supposed to predict any label yet. I’ve modified L1Loss accordingly to work with ys:

class CustomL1Loss(nn.L1Loss):
    def forward(self, input, bbox_targets, lbl_targets):
      return F.l1_loss(input, bbox_targets, reduction=self.reduction)

Some debugging done.
The bbox_targets coming out of dataloaders are of shape (bs,1,4)

_,x,_ = dls.one_batch(); x.shape

Working with random tensors cause no issues with CustomL1Loss

cust_l1 = CustomL1Loss()
inp = torch.randn(8,1,4)
op = torch.randn(8,1,4)
op2 = torch.randn(8,1)
cust_l1(inp,op,op2)

EDIT: Here is the whole stack trace of error

RuntimeError                              Traceback (most recent call last)

<ipython-input-52-bb1de44e7349> in <module>()
----> 1 learn.fit_one_cycle(1,lr_max=1e-4)

7 frames

/usr/local/lib/python3.6/dist-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
    110     scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
    111               'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 112     self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
    113 
    114 # Cell

/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
    188                     try:
    189                         self.epoch=epoch;          self('begin_epoch')
--> 190                         self._do_epoch_train()
    191                         self._do_epoch_validate()
    192                     except CancelEpochException:   self('after_cancel_epoch')

/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in _do_epoch_train(self)
    161         try:
    162             self.dl = self.dls.train;                        self('begin_train')
--> 163             self.all_batches()
    164         except CancelTrainException:                         self('after_cancel_train')
    165         finally:                                             self('after_train')

/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in all_batches(self)
    139     def all_batches(self):
    140         self.n_iter = len(self.dl)
--> 141         for o in enumerate(self.dl): self.one_batch(*o)
    142 
    143     def one_batch(self, i, b):

/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in one_batch(self, i, b)
    147             self.pred = self.model(*self.xb);                self('after_pred')
    148             if len(self.yb) == 0: return
--> 149             self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
    150             if not self.training: return
    151             self.loss.backward();                            self('after_backward')

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py in __init__(self, size_average, reduce, reduction)
     83 
     84     def __init__(self, size_average=None, reduce=None, reduction='mean'):
---> 85         super(L1Loss, self).__init__(size_average, reduce, reduction)
     86 
     87     def forward(self, input, target):

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py in __init__(self, size_average, reduce, reduction)
     10         super(_Loss, self).__init__()
     11         if size_average is not None or reduce is not None:
---> 12             self.reduction = _Reduction.legacy_get_string(size_average, reduce)
     13         else:
     14             self.reduction = reduction

/usr/local/lib/python3.6/dist-packages/torch/nn/_reduction.py in legacy_get_string(size_average, reduce, emit_warning)
     34         reduce = True
     35 
---> 36     if size_average and reduce:
     37         ret = 'mean'
     38     elif reduce:

RuntimeError: bool value of Tensor with more than one value is ambiguous