Am I getting an error because I am running on my CPU and not a GPU? I can’t figure out why I am getting these errors in lesson4-collab.ipynb
learn.lr_find()
learn.recorder.plot(skip_end=15)
> LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
> ---------------------------------------------------------------------------
> RuntimeError Traceback (most recent call last)
> <ipython-input-23-ebd3a191e924> in <module>()
> ----> 1 learn.lr_find()
> 2 learn.recorder.plot(skip_end=15)
>
> ~/anaconda3/lib/python3.7/site-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
> 26 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
> 27 a = int(np.ceil(num_it/len(learn.data.train_dl)))
> ---> 28 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
> 29
> 30 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
>
> ~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
> 160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
> 161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
> --> 162 callbacks=self.callbacks+callbacks)
> 163
> 164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
>
> ~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
> 92 except Exception as e:
> 93 exception = e
> ---> 94 raise e
> 95 finally: cb_handler.on_train_end(exception)
> 96
>
> ~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
> 82 for xb,yb in progress_bar(data.train_dl, parent=pbar):
> 83 xb, yb = cb_handler.on_batch_begin(xb, yb)
> ---> 84 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
> 85 if cb_handler.on_batch_end(loss): break
> 86
>
> ~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
> 20
> 21 if not loss_func: return to_detach(out), yb[0].detach()
> ---> 22 loss = loss_func(out, *yb)
> 23
> 24 if opt is not None:
>
> ~/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
> 1742 if size_average is not None or reduce is not None:
> 1743 reduction = _Reduction.legacy_get_string(size_average, reduce)
> -> 1744 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
> 1745
> 1746
>
> ~/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in log_softmax(input, dim, _stacklevel, dtype)
> 1135 dim = torch.jit._unwrap_optional(dim)
> 1136 if dtype is None:
> -> 1137 ret = input.log_softmax(dim)
> 1138 else:
> 1139 _dtype = torch.jit._unwrap_optional(dtype)
>
> RuntimeError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
And this related one:
learn.fit_one_cycle(5, 5e-3)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-85-c1ed9d459fcc> in <module>()
----> 1 learn.fit_one_cycle(5, 5e-3)
~/anaconda3/lib/python3.7/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, callbacks, **kwargs)
18 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
19 pct_start=pct_start, **kwargs))
---> 20 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
21
22 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, **kwargs:Any):
~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162 callbacks=self.callbacks+callbacks)
163
164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
92 except Exception as e:
93 exception = e
---> 94 raise e
95 finally: cb_handler.on_train_end(exception)
96
~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
82 for xb,yb in progress_bar(data.train_dl, parent=pbar):
83 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 84 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
85 if cb_handler.on_batch_end(loss): break
86
~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
20
21 if not loss_func: return to_detach(out), yb[0].detach()
---> 22 loss = loss_func(out, *yb)
23
24 if opt is not None:
~/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
1742 if size_average is not None or reduce is not None:
1743 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 1744 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
1745
1746
~/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py in log_softmax(input, dim, _stacklevel, dtype)
1135 dim = torch.jit._unwrap_optional(dim)
1136 if dtype is None:
-> 1137 ret = input.log_softmax(dim)
1138 else:
1139 _dtype = torch.jit._unwrap_optional(dtype)
RuntimeError: Dimension out of range (expected to be in range of [-1, 0], but got 1)