Expected input batch_size (8) to match target batch_size (524288)

Hi. I’m trying to do multiclass image segmentation. I created a custom data set from 256x256 images with .tif extension. I’m using google colab. My code is a combination of lesson 3 camvid and lesson 7 minstar:

from fastai.vision import *
from fastai.callbacks.hooks import *
from fastai.utils.mem import *

from google.colab import drive
drive.mount('/content/drive')
path = Path('/content/drive/My Drive/New1')
path_lbl = Path('/content/drive/My Drive/New1/Mask_4class')
path_img = Path('/content/drive/My Drive/New1/Image')

fnames = get_image_files(path_img)
img_f = fnames[3]
img = open_image(img_f)
img.show(figsize=(5,5))

get_y_fn = lambda x: path_lbl/f'{x.stem}{x.suffix}'
mask = open_mask(get_y_fn(img_f))
mask.show(figsize=(5,5), alpha=1)
src_size = np.array(mask.shape[1:])
src_size,mask.data

codes = np.loadtxt(path/'codes.txt', dtype=str); codes

size = src_size
free = gpu_mem_get_free_no_cache()
# the max size of bs depends on the available GPU RAM
if free > 8200: bs=8
else:           bs=4
print(f"using bs={bs}, have {free}MB of GPU RAM free")

src = (SegmentationItemList.from_folder(path_img)
   .split_by_rand_pct(0.2)
   .label_from_func(get_y_fn, classes=codes))

data = (src.transform(get_transforms(), size=size, tfm_y=True)
    .databunch(bs=bs)
    .normalize())

data
ImageDataBunch;

Train: LabelList (24 items)
x: SegmentationItemList
Image (3, 256, 256),Image (3, 256, 256),Image (3, 256, 256),Image (3, 256, 256),Image (3, 256,256)
y: SegmentationLabelList
ImageSegment (1, 256, 256),ImageSegment (1, 256, 256),ImageSegment (1, 256, 256),ImageSegment (1, 256, 256),ImageSegment (1, 256, 256)
Path: /content/drive/My Drive/New1/Image;

Valid: LabelList (5 items)
x: SegmentationItemList
Image (3, 256, 256),Image (3, 256, 256),Image (3, 256, 256),Image (3, 256, 256),Image (3, 256, 256)
y: SegmentationLabelList
ImageSegment (1, 256, 256),ImageSegment (1, 256, 256),ImageSegment (1, 256,    256),ImageSegment (1, 256, 256),ImageSegment (1, 256, 256)
Path: /content/drive/My Drive/New1/Image;

Test: None

def conv2(ni,nf): return conv_layer(ni,nf,stride=2)

def conv_and_res(ni,nf): return nn.Sequential(conv2(ni, nf), res_block(nf))
model = nn.Sequential(
  conv_and_res(3, 8),#256
  conv_and_res(8, 16),#128
  conv_and_res(16, 32),#64
  conv_and_res(32, 64),#32
  conv_and_res(64, 128),#16
  conv_and_res(128, 64),#8
  conv_and_res(64, 32),#4
  conv_and_res(32, 16),#2
  conv2(16, 10),
  Flatten()
)
model.eval() 

name2id = {v:k for k,v in enumerate(codes)}
void_code = name2id['Else']

def acc_camvid(input, target):
   target = target.squeeze(1)
   mask = target != void_code
   return (input.argmax(dim=1)[mask]==target[mask]).float().mean()

metrics=acc_camvid
wd=1e-2

learn = Learner(data, model, metrics=metrics, wd=wd)


lr_find(learn)
learn.recorder.plot()

The error is:

LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-112-dd390b1c8108> in <module>()
----> 1 lr_find(learn)
  2 learn.recorder.plot()

7 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight,  size_average, ignore_index, reduce, reduction)
   1869                          .format(input.size(0), target.size(0)))
   1870     if dim == 2:
-> 1871         ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
   1872     elif dim == 4:
   1873         ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

RuntimeError: multi-target not supported at /pytorch/aten/src/THCUNN/generic/ClassNLLCriterion.cu:15