Learn.get_preds() gives error

Since yesterday when I run learn.getp_reds() I am getting the following error:
‘NoneType’ object cannot be interpreted as an integer

I am getting this error for both a tabular_learner and a cnn learner, any idea what is the reason?

We need a bit more information here. How are you setting up your Learner? How are you setting up your DataLoaders? Was your model exported and then loaded in with load_learner? etc, etc

Here is my code, I am practicing with PetFinder on kaggle.

splits = RandomSplitter(valid_pct=0.2)(range_of(train))
to = TabularPandas(train, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names=dep_var, y_block = CategoryBlock, splits=splits)

dls = to.dataloaders(256)
learn = tabular_learner(dls,layers=[1000,500, 100], y_range=(0,4), metrics=[accuracy, CohenKappa(weights=‘quadratic’)])
learn.fit_one_cycle(15, 4e-3, wd=1e-6)

to_test = learn.dls.train_ds.new(test)
to_test.process()
dl = TabDataLoader(to_test)
preds,_ = learn.get_preds(dl=dl, reorder=True)
labels = np.argmax(preds, 1)

I have the same problem with cnn learner:

dls = ImageDataLoaders.from_df(img_label, path,folder=‘train_images’,
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms())
dls.train.show_batch(max_n=8, nrows=2, unique=True)
learn1 = cnn_learner(dls, resnet18, metrics=error_rate)
learn1.fine_tune(9, freeze_epochs=1, base_lr=3e-3)

test_items = get_image_files(path/‘test_images’)
test_dl = dls.test_dl(test_items)
img_preds,_=learn1.get_preds(dl=test_dl)

I also saved the model and then export it to use and got the same error.
It is worth mentioning that the same codes was working before yesterday night.

What does len(dl) give you here? Also try dl = learn.dls.test_dl(test)

What may be happening here is it’s tied train_images to your image path. What does this get you:

test_items = get_image_files(path/‘test_images’)
test_dl = dls.test_dl(test_items)
len(test_dl)

Len(dl) gives 249

len(test_dl) gives 227

the codes are working now. I did not change any thing.
Any way, thanks @muellerzr for your responses

1 Like

I figured out that when I run my Kaggle kernel on GPU this error shows up, but when I run it on CPU there is no problem. Do you know what is the problem?
this is the full error:


RuntimeError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
153 def with_events(self, f, event_type, ex, final=noop):
–> 154 try: self(f’before
{event_type}’) ;f()
155 except ex: self(f’after_cancel
{event_type}’)

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in all_batches(self)
159 self.n_iter = len(self.dl)
–> 160 for o in enumerate(self.dl): self.one_batch(*o)
161

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in one_batch(self, i, b)
177 self._split(b)
–> 178 self._with_events(self._do_one_batch, ‘batch’, CancelBatchException)
179

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
153 def with_events(self, f, event_type, ex, final=noop):
–> 154 try: self(f’before
{event_type}’) ;f()
155 except ex: self(f’after_cancel
{event_type}’)

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in _do_one_batch(self)
162 def _do_one_batch(self):
–> 163 self.pred = self.model(*self.xb)
164 self(‘after_pred’)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(

/opt/conda/lib/python3.7/site-packages/fastai/tabular/model.py in forward(self, x_cat, x_cont)
47 if self.n_emb != 0:
—> 48 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
49 x = torch.cat(x, 1)

/opt/conda/lib/python3.7/site-packages/fastai/tabular/model.py in (.0)
47 if self.n_emb != 0:
—> 48 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
49 x = torch.cat(x, 1)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
726 else:
–> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/sparse.py in forward(self, input)
125 input, self.weight, self.padding_idx, self.max_norm,
–> 126 self.norm_type, self.scale_grad_by_freq, self.sparse)
127

/opt/conda/lib/python3.7/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1851 no_grad_embedding_renorm(weight, input, max_norm, norm_type)
-> 1852 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1853

RuntimeError: Input, output and indices must be on the current device

During handling of the above exception, another exception occurred:

TypeError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/fastai/torch_core.py in to_concat(xs, dim)
270 # in this case we return a big list
–> 271 try: return retain_type(torch.cat(xs, dim=dim), xs[0])
272 except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])

TypeError: expected Tensor as element 0 in argument 0, but got NoneType

During handling of the above exception, another exception occurred:

TypeError Traceback (most recent call last)
in
2 to_test.process()
3 dl = TabDataLoader(to_test)
----> 4 preds,_ = learn.get_preds(dl=dl, reorder=True)
5 labels = np.argmax(preds, 1)

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in get_preds(self, ds_idx, dl, with_input, with_decoded, with_loss, act, inner, reorder, cbs, **kwargs)
235 if with_loss: ctx_mgrs.append(self.loss_not_reduced())
236 with ContextManagers(ctx_mgrs):
–> 237 self._do_epoch_validate(dl=dl)
238 if act is None: act = getattr(self.loss_func, ‘activation’, noop)
239 res = cb.all_tensors()

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in _do_epoch_validate(self, ds_idx, dl)
185 if dl is None: dl = self.dls[ds_idx]
186 self.dl = dl
–> 187 with torch.no_grad(): self._with_events(self.all_batches, ‘validate’, CancelValidException)
188
189 def _do_epoch(self):

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in with_events(self, f, event_type, ex, final)
154 try: self(f’before
{event_type}’) ;f()
155 except ex: self(f’after_cancel_{event_type}’)
–> 156 finally: self(f’after_{event_type}’) ;final()
157
158 def all_batches(self):

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in call(self, event_name)
130 def ordered_cbs(self, event): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, event)]
131
–> 132 def call(self, event_name): L(event_name).map(self._call_one)
133
134 def _call_one(self, event_name):

/opt/conda/lib/python3.7/site-packages/fastcore/foundation.py in map(self, f, gen, *args, **kwargs)
177 def range(cls, a, b=None, step=None): return cls(range_of(a, b=b, step=step))
178
–> 179 def map(self, f, *args, gen=False, **kwargs): return self._new(map_ex(self, f, *args, gen=gen, **kwargs))
180 def argwhere(self, f, negate=False, **kwargs): return self._new(argwhere(self, f, negate, **kwargs))
181 def filter(self, f=noop, negate=False, gen=False, **kwargs):

/opt/conda/lib/python3.7/site-packages/fastcore/basics.py in map_ex(iterable, f, gen, *args, **kwargs)
605 res = map(g, iterable)
606 if gen: return res
–> 607 return list(res)
608
609 # Cell

/opt/conda/lib/python3.7/site-packages/fastcore/basics.py in call(self, *args, **kwargs)
595 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
596 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
–> 597 return self.func(*fargs, **kwargs)
598
599 # Cell

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in _call_one(self, event_name)
134 def _call_one(self, event_name):
135 assert hasattr(event, event_name), event_name
–> 136 [cb(event_name) for cb in sort_by_run(self.cbs)]
137
138 def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)

/opt/conda/lib/python3.7/site-packages/fastai/learner.py in (.0)
134 def _call_one(self, event_name):
135 assert hasattr(event, event_name), event_name
–> 136 [cb(event_name) for cb in sort_by_run(self.cbs)]
137
138 def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)

/opt/conda/lib/python3.7/site-packages/fastai/callback/core.py in call(self, event_name)
42 (self.run_valid and not getattr(self, ‘training’, False)))
43 res = None
—> 44 if self.run and _run: res = getattr(self, event_name, noop)()
45 if event_name==‘after_fit’: self.run=True #Reset self.run to True at each end of fit
46 return res

/opt/conda/lib/python3.7/site-packages/fastai/callback/core.py in after_validate(self)
122 if not hasattr(self, ‘preds’): return
123 if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim))
–> 124 if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim))
125 if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim))
126 if self.with_loss: self.losses = to_concat(self.losses)

/opt/conda/lib/python3.7/site-packages/fastai/torch_core.py in to_concat(xs, dim)
271 try: return retain_type(torch.cat(xs, dim=dim), xs[0])
272 except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
–> 273 for i in range_of(o_)) for o_ in xs], L())
274
275 # Cell

/opt/conda/lib/python3.7/site-packages/fastai/torch_core.py in (.0)
271 try: return retain_type(torch.cat(xs, dim=dim), xs[0])
272 except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
–> 273 for i in range_of(o_)) for o_ in xs], L())
274
275 # Cell

/opt/conda/lib/python3.7/site-packages/fastcore/basics.py in range_of(a, b, step)
479 "All indices of collection a, if a is a collection, otherwise range"
480 if is_coll(a): a = len(a)
–> 481 return list(range(a,b,step) if step is not None else range(a,b) if b is not None else range(a))
482
483 # Cell

TypeError: ‘NoneType’ object cannot be interpreted as an integer

If you are loading a model using load learner, make sure to set the cpu flag to false (ex. load_learner(model, cpu=False). Otherwise get_preds will have tensors on both the cpu and gpu and will throw the error “‘NoneType’ object cannot be interpreted as an integer”