TfmDL for prediction

I am trying to build a prediction code for my fastai resnet model, but no success.
I need a different set of transformations for the prediction than I used in the original training dataloader.
So I tried to build a new TfmDL. but I can’t call infer.get_preds with this dl.
(I have my own item_tfms that produces an ImageTensor. after that I added the batch_tfms from the original dataloader for resnet normalization)

item_tfms = Pipeline([preprocess ,resize, Image2PILImage, ToTensor])
batch_tfms = Pipeline([IntToFloatTensor,
Brightness(max_lighting=.05, p=1, draw=False,batch=True),
Normalize.from_stats(*imagenet_stats)
])
tdl = TfmdDL(imgs, after_item=item_tfms, after_batch=batch_tfms, device=device)

when calling to infer.get_preds I get:

infer.get_preds(dl=tdl)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
154 def with_events(self, f, event_type, ex, final=noop):
–> 155 try: self(f’before
{event_type}’) ;f()
156 except ex: self(f’after_cancel
{event_type}’)

23 frames
/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _do_one_batch(self)
163 def _do_one_batch(self):
–> 164 self.pred = self.model(*self.xb)
165 self(‘after_pred’)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(

/usr/local/lib/python3.6/dist-packages/fastai/layers.py in forward(self, x)
396 res.orig = x
–> 397 nres = l(res)
398 # We have to remove res.orig to avoid hanging refs and therefore memory leaks

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
116 for module in self:
–> 117 input = module(input)
118 return input

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in forward(self, input)
422 def forward(self, input: Tensor) -> Tensor:
–> 423 return self._conv_forward(input, self.weight)
424

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight)
419 return F.conv2d(input, weight, self.bias, self.stride,
–> 420 self.padding, self.dilation, self.groups)
421

RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 7, 7], but got 3-dimensional input of size [3, 235, 224] instead

During handling of the above exception, another exception occurred:

TypeError Traceback (most recent call last)
in ()
----> 1 line_infer.get_preds(dl=tdl)

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in get_preds(self, ds_idx, dl, with_input, with_decoded, with_loss, act, inner, reorder, cbs, **kwargs)
233 if with_loss: ctx_mgrs.append(self.loss_not_reduced())
234 with ContextManagers(ctx_mgrs):
–> 235 self._do_epoch_validate(dl=dl)
236 if act is None: act = getattr(self.loss_func, ‘activation’, noop)
237 res = cb.all_tensors()

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _do_epoch_validate(self, ds_idx, dl)
186 if dl is None: dl = self.dls[ds_idx]
187 self.dl = dl
–> 188 with torch.no_grad(): self._with_events(self.all_batches, ‘validate’, CancelValidException)
189
190 def _do_epoch(self):

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
153
154 def with_events(self, f, event_type, ex, final=noop):
–> 155 try: self(f’before
{event_type}’) ;f()
156 except ex: self(f’after_cancel
{event_type}’)
157 finally: self(f’after_{event_type}’) ;final()

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in all_batches(self)
159 def all_batches(self):
160 self.n_iter = len(self.dl)
–> 161 for o in enumerate(self.dl): self.one_batch(*o)
162
163 def _do_one_batch(self):

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in one_batch(self, i, b)
177 self.iter = i
178 self._split(b)
–> 179 self._with_events(self._do_one_batch, ‘batch’, CancelBatchException)
180
181 def _do_epoch_train(self):

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
155 try: self(f’before
{event_type}’) ;f()
156 except ex: self(f’after_cancel_{event_type}’)
–> 157 finally: self(f’after_{event_type}’) ;final()
158
159 def all_batches(self):

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in call(self, event_name)
131 def ordered_cbs(self, event): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, event)]
132
–> 133 def call(self, event_name): L(event_name).map(self._call_one)
134
135 def _call_one(self, event_name):

/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in map(self, f, gen, *args, **kwargs)
224 def range(cls, a, b=None, step=None): return cls(range_of(a, b=b, step=step))
225
–> 226 def map(self, f, *args, gen=False, **kwargs): return self._new(map_ex(self, f, *args, gen=gen, **kwargs))
227 def argwhere(self, f, negate=False, **kwargs): return self._new(argwhere(self, f, negate, **kwargs))
228 def filter(self, f=noop, negate=False, gen=False, **kwargs):

/usr/local/lib/python3.6/dist-packages/fastcore/basics.py in map_ex(iterable, f, gen, *args, **kwargs)
541 res = map(g, iterable)
542 if gen: return res
–> 543 return list(res)
544
545 # Cell

/usr/local/lib/python3.6/dist-packages/fastcore/basics.py in call(self, *args, **kwargs)
531 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
532 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
–> 533 return self.fn(*fargs, **kwargs)
534
535 # Cell

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in _call_one(self, event_name)
135 def _call_one(self, event_name):
136 assert hasattr(event, event_name), event_name
–> 137 [cb(event_name) for cb in sort_by_run(self.cbs)]
138
139 def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in (.0)
135 def _call_one(self, event_name):
136 assert hasattr(event, event_name), event_name
–> 137 [cb(event_name) for cb in sort_by_run(self.cbs)]
138
139 def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)

/usr/local/lib/python3.6/dist-packages/fastai/callback/core.py in call(self, event_name)
42 (self.run_valid and not getattr(self, ‘training’, False)))
43 res = None
—> 44 if self.run and _run: res = getattr(self, event_name, noop)()
45 if event_name==‘after_fit’: self.run=True #Reset self.run to True at each end of fit
46 return res

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in after_batch(self)
451 if len(self.yb) == 0: return
452 mets = self._train_mets if self.training else self._valid_mets
–> 453 for met in mets: met.accumulate(self.learn)
454 if not self.training: return
455 self.lrs.append(self.opt.hypers[-1][‘lr’])

/usr/local/lib/python3.6/dist-packages/fastai/learner.py in accumulate(self, learn)
375 def accumulate(self, learn):
376 bs = find_bs(learn.yb)
–> 377 self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
378 self.count += bs
379 @property

TypeError: accuracy() got multiple values for argument ‘axis’

so I tried to fetch the batches from tdl to the model manually. so I tried:

ys = []
for xb in tdl:
with torch.no_grad():
yb = infer.model(xb)
yb = yb.to(‘cpu’)
ys += [yb]

but got:


TypeError Traceback (most recent call last)
in ()
----> 1 get_ipython().run_cell_magic(‘time’, ‘’, “ys = []\nfor xb in tdl:\n with torch.no_grad():\n # xb = xb.to(device)\n # xb = batch_tfms(xb)\n # xb = torch.tensor(xb)\n yb = m(xb)\n yb = yb.to(‘cpu’)\n ys += [yb]”)

18 frames
/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2115 magic_arg_s = self.var_expand(line, stack_depth)
2116 with self.builtin_trap:
-> 2117 result = fn(magic_arg_s, cell)
2118 return result
2119

in time(self, line, cell, local_ns)

/usr/local/lib/python3.6/dist-packages/IPython/core/magic.py in (f, *a, **k)
186 # but it’s overkill for just that one bit of state.
187 def magic_deco(arg):
–> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):

/usr/local/lib/python3.6/dist-packages/IPython/core/magics/execution.py in time(self, line, cell, local_ns)
1191 else:
1192 st = clock2()
-> 1193 exec(code, glob, local_ns)
1194 end = clock2()
1195 out = None

in ()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

/usr/local/lib/python3.6/dist-packages/fastai/layers.py in forward(self, x)
395 for l in self.layers:
396 res.orig = x
–> 397 nres = l(res)
398 # We have to remove res.orig to avoid hanging refs and therefore memory leaks
399 res.orig = None

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
115 def forward(self, input):
116 for module in self:
–> 117 input = module(input)
118 return input
119

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in forward(self, input)
421
422 def forward(self, input: Tensor) -> Tensor:
–> 423 return self._conv_forward(input, self.weight)
424
425 class Conv3d(_ConvNd):

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight)
418 _pair(0), self.dilation, self.groups)
419 return F.conv2d(input, weight, self.bias, self.stride,
–> 420 self.padding, self.dilation, self.groups)
421
422 def forward(self, input: Tensor) -> Tensor:

/usr/local/lib/python3.6/dist-packages/fastai/torch_core.py in torch_function(self, func, types, args, kwargs)
315 def torch_function(self, func, types, args=(), kwargs=None):
316 with torch._C.DisableTorchFunction(): ret = _convert(func(*args, **(kwargs or {})), self.class)
–> 317 if isinstance(ret, TensorBase): ret.set_meta(self, as_copy=True)
318 return ret
319

/usr/local/lib/python3.6/dist-packages/fastai/torch_core.py in set_meta(self, x, as_copy)
278 "Set all metadata in __dict__"
279 if not hasattr(x,‘dict’): return
–> 280 self.dict = deepcopy(x.dict) if as_copy else x.dict
281
282 # Cell

/usr/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
–> 150 y = copier(x, memo)
151 else:
152 try:

/usr/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
–> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict

/usr/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
159 copier = getattr(x, “deepcopy”, None)
160 if copier:
–> 161 y = copier(memo)
162 else:
163 reductor = dispatch_table.get(cls)

/usr/local/lib/python3.6/dist-packages/torch/tensor.py in deepcopy(self, memo)
43 relevant_args = (self,)
44 if type(self) is not Tensor and has_torch_function(relevant_args):
—> 45 return handle_torch_function(Tensor.deepcopy, relevant_args, self, memo)
46 if not self.is_leaf:
47 raise RuntimeError("Only Tensors created explicitly by the user "

/usr/local/lib/python3.6/dist-packages/torch/overrides.py in handle_torch_function(public_api, relevant_args, *args, **kwargs)
1061 # Use public_api instead of implementation so torch_function
1062 # implementations can do equality/identity comparisons.
-> 1063 result = overloaded_arg.torch_function(public_api, types, args, kwargs)
1064
1065 if result is not NotImplemented:

/usr/local/lib/python3.6/dist-packages/fastai/torch_core.py in torch_function(self, func, types, args, kwargs)
314
315 def torch_function(self, func, types, args=(), kwargs=None):
–> 316 with torch._C.DisableTorchFunction(): ret = _convert(func(*args, **(kwargs or {})), self.class)
317 if isinstance(ret, TensorBase): ret.set_meta(self, as_copy=True)
318 return ret

/usr/local/lib/python3.6/dist-packages/torch/tensor.py in deepcopy(self, memo)
75 self.backward_hooks)
76 else:
—> 77 new_tensor = self.new()
78 new_tensor.set
(new_storage, self.storage_offset(), self.size(), self.stride())
79 new_tensor.requires_grad = self.requires_grad

TypeError: new() missing 1 required positional argument: ‘x’

at last, I found that casting xb to regular torch tensor (and ignoring pytorch warning…) helps. so this works:

ys = []
for xb in tdl:
with torch.no_grad():
xb = torch.tensor(xb)
yb = m(xb)
yb = yb.to(‘cpu’)
ys += [yb]

but it not seems to be the right solution…
I spent a very long time with all this, and I’ll be very happy with any help.
Thanks!