RuntimeError: The size of tensor a (96) must match the size of tensor b (256) at non-singleton dimension 0

Hi, I am working on mnist_basics from https://github.com/fastai/fastbook/blob/master/04_mnist_basics.ipynb.

Executing learn.fit(10, lr=lr) gives below error. How did my input tensor become size 96? I believe error comes from batch_accuracy function.

epoch train_loss valid_loss batch_accuracy time
0 0.001440 0.000002 None 00:01

RuntimeError Traceback (most recent call last)
~/fastai2/fastai2/learner.py in one_batch(self, i, b)
158 self._split(b); self(‘begin_batch’)
–> 159 self.pred = self.model(*self.xb); self(‘after_pred’)
160 if len(self.yb) == 0: return

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
531 else:
–> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
86 def forward(self, input):
—> 87 return F.linear(input, self.weight, self.bias)
88

/opt/conda/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1369 # fused op is marginally faster
-> 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:

RuntimeError: Expected object of scalar type Float but got scalar type Byte for argument #2 ‘mat1’ in call to _th_addmm

During handling of the above exception, another exception occurred:

RuntimeError Traceback (most recent call last)
in
----> 1 learn.fit(10, lr=lr)

/opt/conda/lib/python3.7/site-packages/fastcore/utils.py in _f(*args, **kwargs)
429 init_args.update(log)
430 setattr(inst, ‘init_args’, init_args)
–> 431 return inst if to_return else f(*args, **kwargs)
432 return _f
433

~/fastai2/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
199 self.epoch=epoch; self(‘begin_epoch’)
200 self._do_epoch_train()
–> 201 self._do_epoch_validate()
202 except CancelEpochException: self(‘after_cancel_epoch’)
203 finally: self(‘after_epoch’)

~/fastai2/fastai2/learner.py in _do_epoch_validate(self, ds_idx, dl)
181 try:
182 self.dl = dl; self(‘begin_validate’)
–> 183 with torch.no_grad(): self.all_batches()
184 except CancelValidException: self(‘after_cancel_validate’)
185 finally: self(‘after_validate’)

~/fastai2/fastai2/learner.py in all_batches(self)
151 def all_batches(self):
152 self.n_iter = len(self.dl)
–> 153 for o in enumerate(self.dl): self.one_batch(*o)
154
155 def one_batch(self, i, b):

~/fastai2/fastai2/learner.py in one_batch(self, i, b)
165 self.opt.zero_grad()
166 except CancelBatchException: self(‘after_cancel_batch’)
–> 167 finally: self(‘after_batch’)
168
169 def _do_begin_fit(self, n_epoch):

~/fastai2/fastai2/learner.py in call(self, event_name)
132 def ordered_cbs(self, event): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, event)]
133
–> 134 def call(self, event_name): L(event_name).map(self._call_one)
135 def _call_one(self, event_name):
136 assert hasattr(event, event_name)

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
374 else f.format if isinstance(f,str)
375 else f.getitem)
–> 376 return self._new(map(g, self))
377
378 def filter(self, f, negate=False, **kwargs):

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
325 @property
326 def _xtra(self): return None
–> 327 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
328 def getitem(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
329 def copy(self): return self._new(self.items.copy())

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in call(cls, x, args, **kwargs)
45 return x
46
—> 47 res = super().call(
((x,) + args), **kwargs)
48 res._newchk = 0
49 return res

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in init(self, items, use_list, match, *rest)
316 if items is None: items = []
317 if (use_list is not None) or not _is_array(items):
–> 318 items = list(items) if use_list else _listify(items)
319 if match is not None:
320 if is_coll(match): match = len(match)

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in _listify(o)
252 if isinstance(o, list): return o
253 if isinstance(o, str) or _is_array(o): return [o]
–> 254 if is_iter(o): return list(o)
255 return [o]
256

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in call(self, *args, **kwargs)
218 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
219 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
–> 220 return self.fn(*fargs, **kwargs)
221
222 # Cell

~/fastai2/fastai2/learner.py in _call_one(self, event_name)
135 def _call_one(self, event_name):
136 assert hasattr(event, event_name)
–> 137 [cb(event_name) for cb in sort_by_run(self.cbs)]
138
139 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)

~/fastai2/fastai2/learner.py in (.0)
135 def _call_one(self, event_name):
136 assert hasattr(event, event_name)
–> 137 [cb(event_name) for cb in sort_by_run(self.cbs)]
138
139 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)

~/fastai2/fastai2/callback/core.py in call(self, event_name)
22 _run = (event_name not in _inner_loop or (self.run_train and getattr(self, ‘training’, True)) or
23 (self.run_valid and not getattr(self, ‘training’, False)))
—> 24 if self.run and _run: getattr(self, event_name, noop)()
25 if event_name==‘after_fit’: self.run=True #Reset self.run to True at each end of fit
26

~/fastai2/fastai2/learner.py in after_batch(self)
421 if len(self.yb) == 0: return
422 mets = self._train_mets if self.training else self._valid_mets
–> 423 for met in mets: met.accumulate(self.learn)
424 if not self.training: return
425 self.lrs.append(self.opt.hypers[-1][‘lr’])

~/fastai2/fastai2/learner.py in accumulate(self, learn)
346 def accumulate(self, learn):
347 bs = find_bs(learn.yb)
–> 348 self.total += to_detach(self.func(learn.pred, *learn.yb))*bs
349 self.count += bs
350 @property

in batch_accuracy(xb, yb)
1 def batch_accuracy(xb,yb): #Define metrics
2 preds = xb.sigmoid()
----> 3 correct = (preds>0.5)==yb
4 return correct.float().mean()

/opt/conda/lib/python3.7/site-packages/torch/tensor.py in wrapped(*args, **kwargs)
26 def wrapped(*args, **kwargs):
27 try:
—> 28 return f(*args, **kwargs)
29 except TypeError:
30 return NotImplemented

RuntimeError: The size of tensor a (96) must match the size of tensor b (256) at non-singleton dimension 0