Would appreciate any help on how to troubleshoot tensor size mismatch error. I’m using the superesolution notebook as a template, but I keep getting size mismatch error.
Initially, I thought it was because my image is a rectangle (one of the dimension errors is the dimensions of my image), so I resized it to a square of 135 x 135, but no luck.
It fails on the .fit command, and here’s the end of the error output:
~/anaconda3/envs/dl_2019_06/lib/python3.6/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
24 if not is_listy(xb): xb = [xb]
25 if not is_listy(yb): yb = [yb]
---> 26 out = model(*xb)
27 out = cb_handler.on_loss_begin(out)
28
~/anaconda3/envs/dl_2019_06/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
~/anaconda3/envs/dl_2019_06/lib/python3.6/site-packages/fastai/layers.py in forward(self, x)
153 for l in self.layers:
154 res.orig = x
--> 155 nres = l(res)
156 # We have to remove res.orig to avoid hanging refs and therefore memory leaks
157 res.orig = None
~/anaconda3/envs/dl_2019_06/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
~/anaconda3/envs/dl_2019_06/lib/python3.6/site-packages/fastai/layers.py in forward(self, x)
170 self.dense=dense
171
--> 172 def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
173
174 def res_block(nf, dense:bool=False, norm_type:Optional[NormType]=NormType.Batch, bottle:bool=False, **conv_kwargs):
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 1. Got 135 and 136 in dimension 2 at /opt/conda/conda-bld/pytorch_1556653099582/work/aten/src/THC/generic/THCTensorMath.cu:71
I should also point out that I’ve checked that all my input data are of dimensions (135,135,3), and I have no idea where the 136 dimension in the error message is coming from!!!
Here’s my datagen. I’m using a pandas df that has corresponding filenames for the crappy and non-crappy images (filename_y, filename_x):
image_gen = get_transforms(do_flip=False, max_rotate=0.0, max_zoom=1.2, max_lighting=0.0, max_warp=0.0)
trn_src = (ImageImageList.from_df(trnval_df, path="/", cols='filename_y')
.split_by_rand_pct(valid_pct=0.1, seed=7)
.label_from_df(cols='filename_x'))
trn_data = (trn_src.transform(image_gen, size=(135,135), tfm_y=True)
.databunch(bs=batch_size)
.normalize(imagenet_stats, do_y=True))