After running the fit_one_cycle, I get the following error:
RuntimeError Traceback (most recent call last)
<ipython-input-21-be1ab4476b35> in <module>
----> 1 learn.fit_one_cycle(5, slice(lr))
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, callbacks, **kwargs)
19 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
20 pct_start=pct_start, **kwargs))
---> 21 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
22
23 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, **kwargs:Any):
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
164 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
165 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 166 callbacks=self.callbacks+callbacks)
167
168 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
92 except Exception as e:
93 exception = e
---> 94 raise e
95 finally: cb_handler.on_train_end(exception)
96
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
87 if hasattr(data,'valid_dl') and data.valid_dl is not None and data.valid_ds is not None:
88 val_loss = validate(model, data.valid_dl, loss_func=loss_func,
---> 89 cb_handler=cb_handler, pbar=pbar)
90 else: val_loss=None
91 if cb_handler.on_epoch_end(val_loss): break
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch)
52 if not is_listy(yb): yb = [yb]
53 nums.append(yb[0].shape[0])
---> 54 if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
55 if n_batch and (len(nums)>=n_batch): break
56 nums = np.array(nums, dtype=np.float32)
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/callback.py in on_batch_end(self, loss)
237 "Handle end of processing one batch with `loss`."
238 self.state_dict['last_loss'] = loss
--> 239 stop = np.any(self('batch_end', not self.state_dict['train']))
240 if self.state_dict['train']:
241 self.state_dict['iteration'] += 1
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
185 def __call__(self, cb_name, call_mets=True, **kwargs)->None:
186 "Call through to all of the `CallbakHandler` functions."
--> 187 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
188 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
189
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/callback.py in <listcomp>(.0)
185 def __call__(self, cb_name, call_mets=True, **kwargs)->None:
186 "Call through to all of the `CallbakHandler` functions."
--> 187 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
188 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
189
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/callback.py in on_batch_end(self, last_output, last_target, **kwargs)
272 if not is_listy(last_target): last_target=[last_target]
273 self.count += last_target[0].size(0)
--> 274 self.val += last_target[0].size(0) * self.func(last_output, *last_target).detach().cpu()
275
276 def on_epoch_end(self, **kwargs):
~/anaconda3/envs/nfastai/lib/python3.6/site-packages/fastai/metrics.py in accuracy_thresh(y_pred, y_true, thresh, sigmoid)
20 "Compute accuracy when `y_pred` and `y_true` are the same size."
21 if sigmoid: y_pred = y_pred.sigmoid()
---> 22 return ((y_pred>thresh)==y_true.byte()).float().mean()
23
24 def dice(input:FloatTensor, targs:LongTensor, iou:bool=False)->Rank0Tensor:
RuntimeError: The size of tensor a (418) must match the size of tensor b (8) at non-singleton dimension 1
Someone said it might be related to accuracy_thresh but I can’t figure out how to change it.
Here is my show_install():
=== Software ===
python version : 3.6.7
fastai version : 1.0.36
torch version : 1.0.1
nvidia driver : 390.116
torch cuda ver : 9.0.176
torch cuda is : available
torch cudnn ver : 7301
torch cudnn is : enabled
=== Hardware ===
nvidia gpus : 2
torch available : 2
- gpu0 : 8119MB | GeForce GTX 1080
- gpu1 : 8119MB | GeForce GTX 1080
=== Environment ===
platform : Linux-4.15.0-48-generic-x86_64-with-debian-buster-sid
distro : #51-Ubuntu SMP Wed Apr 3 08:28:49 UTC 2019
conda env : Unknown
python : /home/jgmeyer2/anaconda3/envs/nfastai/bin/python
sys.path : /home/jgmeyer2/anaconda3/envs/nfastai/lib/python36.zip
/home/jgmeyer2/anaconda3/envs/nfastai/lib/python3.6
/home/jgmeyer2/anaconda3/envs/nfastai/lib/python3.6/lib-dynload
/home/jgmeyer2/.local/lib/python3.6/site-packages
/home/jgmeyer2/anaconda3/envs/nfastai/lib/python3.6/site-packages
/home/jgmeyer2/anaconda3/envs/nfastai/lib/python3.6/site-packages/IPython/extensions
/home/jgmeyer2/.ipython