GPU is recognized but not being used?

I’m trying to run

lr_find(learn, start_lr=1e-4, end_lr=1l0, num_it=100) #, start_lr=1e-2, end_lr=10, num_it=200
learn.recorder.plot()

It gives me an error that states it’s not finding my GPU. I tried the following from the docs page:

python -c ‘import fastai.utils; fastai.utils.show_install(1)’
watch -n 1 nvidia-smi

The output shows my GPU. I’m not sure why I’m getting the below error. Anyone see anything I’m missing?

LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.

RuntimeError Traceback (most recent call last)
in
----> 1 lr_find(learn, start_lr=1e-4, end_lr=10, num_it=100) #, start_lr=1e-2, end_lr=10, num_it=200
2 learn.recorder.plot()

~/anaconda3/lib/python3.7/site-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
30 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
31 epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
—> 32 learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
33
34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,

~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
198 callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
199 if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
–> 200 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
201
202 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
99 for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
100 xb, yb = cb_handler.on_batch_begin(xb, yb)
–> 101 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
102 if cb_handler.on_batch_end(loss): break
103

~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
24 if not is_listy(xb): xb = [xb]
25 if not is_listy(yb): yb = [yb]
—> 26 out = model(*xb)
27 out = cb_handler.on_loss_begin(out)
28

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

~/anaconda3/lib/python3.7/site-packages/fastai/tabular/models.py in forward(self, x_cat, x_cont)
32 def forward(self, x_cat:Tensor, x_cont:Tensor) -> Tensor:
33 if self.n_emb != 0:
—> 34 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
35 x = torch.cat(x, 1)
36 x = self.emb_drop(x)

~/anaconda3/lib/python3.7/site-packages/fastai/tabular/models.py in (.0)
32 def forward(self, x_cat:Tensor, x_cont:Tensor) -> Tensor:
33 if self.n_emb != 0:
—> 34 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
35 x = torch.cat(x, 1)
36 x = self.emb_drop(x)

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/sparse.py in forward(self, input)
115 return F.embedding(
116 input, self.weight, self.padding_idx, self.max_norm,
–> 117 self.norm_type, self.scale_grad_by_freq, self.sparse)
118
119 def extra_repr(self):

~/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1504 # remove once script supports set_grad_enabled
1505 no_grad_embedding_renorm(weight, input, max_norm, norm_type)
-> 1506 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1507
1508

RuntimeError: _th_index_select not supported on CPUType for Half

Never mind. I reinstalled pytorch by itself and now I’m not getting the error message.

2 Likes