I want to apply the learning of Lesson3 to a different dataset with binary labels.
But I am getting this error, dont’t know why? Everything before this step works fine.
lr_find(learn)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-27-29bd39493606> in <module>
----> 1 lr_find(learn)
/opt/anaconda3/lib/python3.6/site-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
28 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
29 a = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 30 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
31
32 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
/opt/anaconda3/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162 callbacks=self.callbacks+callbacks)
163
164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/opt/anaconda3/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
72 cb_handler = CallbackHandler(callbacks, metrics)
73 pbar = master_bar(range(epochs))
---> 74 cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)
75
76 exception=False
/opt/anaconda3/lib/python3.6/site-packages/fastai/callback.py in on_train_begin(self, epochs, pbar, metrics)
192 self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
193 names = [(met.name if hasattr(met, 'name') else camel2snake(met.__class__.__name__)) for met in self.metrics]
--> 194 self('train_begin', metrics_names=names)
195
196 def on_epoch_begin(self)->None:
/opt/anaconda3/lib/python3.6/site-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/opt/anaconda3/lib/python3.6/site-packages/fastai/callback.py in <listcomp>(.0)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/opt/anaconda3/lib/python3.6/site-packages/fastai/callbacks/fp16.py in on_train_begin(self, **kwargs)
69 self.learn.data.test_dl.add_tfm(to_half)
70 #Get a copy of the model params in FP32
---> 71 self.model_params, self.master_params = get_master(self.learn.layer_groups, self.flat_master)
72 #Changes the optimizer so that the optimization step is done in FP32.
73 opt = self.learn.opt
/opt/anaconda3/lib/python3.6/site-packages/fastai/callbacks/fp16.py in get_master(layer_groups, flat_master)
23 return model_params, master_params
24 else:
---> 25 master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
26 for mp in master_params:
27 for param in mp: param.requires_grad = True
/opt/anaconda3/lib/python3.6/site-packages/fastai/callbacks/fp16.py in <listcomp>(.0)
23 return model_params, master_params
24 else:
---> 25 master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
26 for mp in master_params:
27 for param in mp: param.requires_grad = True
/opt/anaconda3/lib/python3.6/site-packages/fastai/callbacks/fp16.py in <listcomp>(.0)
23 return model_params, master_params
24 else:
---> 25 master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
26 for mp in master_params:
27 for param in mp: param.requires_grad = True
RuntimeError: cuda runtime error (59) : device-side assert triggered at /opt/conda/conda-bld/pytorch-nightly_1540411964561/work/aten/src/THC/THCTensorCopy.cu:102