Segmentation problem with different target size and input size

Hi all,

I have been through a lot of forms discussing errors in the cross entropy function resulting in “Expected input batch_size ({}) to match target batch_size ({}).” As far as I can tell none of the solutions I have found work for my problem and I’m at a loss of ideas to try.

I am trying to create a predict an image segmentation mask for an image of size (513,25) and only do segmentation on the middle column of pixels which means I have targets of size (513,1).

I load the my data via SegmentationItemList and everything seems to work out fine.

codes = array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
bs = 2
data = (SegmentationItemList.from_folder(path/‘train’)
.split_by_rand_pct(0.2)
.label_from_func(get_y_fn, classes=codes)
.databunch(bs=bs))

When I run data.train_ds is yields:

LabelList (15 items)
x: SegmentationItemList
Image (3, 513, 25),Image (3, 513, 25),Image (3, 513, 25),Image (3, 513, 25),Image (3, 513, 25)
y: SegmentationLabelList
ImageSegment (1, 513, 1),ImageSegment (1, 513, 1),ImageSegment (1, 513, 1),ImageSegment (1, 513, 1),ImageSegment (1, 513, 1)
Path: Data/train

I then create my learner:

learn = cnn_learner(data, models.resnet34)

Then when I try running learn.fit_one_cycle(2) it yields this error:


ValueError Traceback (most recent call last)
in
----> 1 learn.fit_one_cycle(2)

/opt/anaconda3/lib/python3.7/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, final_div, wd, callbacks, tot_epochs, start_epoch)
20 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
21 final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
—> 22 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
23
24 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None):

/opt/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
198 callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
199 if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
–> 200 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
201
202 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/opt/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
99 for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
100 xb, yb = cb_handler.on_batch_begin(xb, yb)
–> 101 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
102 if cb_handler.on_batch_end(loss): break
103

/opt/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
28
29 if not loss_func: return to_detach(out), yb[0].detach()
—> 30 loss = loss_func(out, *yb)
31
32 if opt is not None:

/opt/anaconda3/lib/python3.7/site-packages/fastai/layers.py in call(self, input, target, **kwargs)
235 if self.floatify: target = target.float()
236 input = input.view(-1,input.shape[-1]) if self.is_2d else input.view(-1)
–> 237 return self.func.call(input, target.view(-1), **kwargs)
238
239 def CrossEntropyFlat(*args, axis:int=-1, **kwargs):

/opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

/opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
940 def forward(self, input, target):
941 return F.cross_entropy(input, target, weight=self.weight,
–> 942 ignore_index=self.ignore_index, reduction=self.reduction)
943
944

/opt/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2054 if size_average is not None or reduce is not None:
2055 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2056 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
2057
2058

/opt/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1867 if input.size(0) != target.size(0):
1868 raise ValueError(‘Expected input batch_size ({}) to match target batch_size ({}).’
-> 1869 .format(input.size(0), target.size(0)))
1870 if dim == 2:
1871 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

ValueError: Expected input batch_size (2) to match target batch_size (25650).

Any insight into this would be a huge help!