Lesson 2 segmentation class

codes = array([‘Sky’, ‘Building’, ‘Pole’, ‘Road’, ‘Sidewalk’, ‘Tree’,
‘Sign’, ‘Fence’, ‘Car’, ‘Pedestrian’, ‘Cyclist’, ‘Void’])

is ‘Void’ one of the class?

total howmany claases are there in this example?


RuntimeError Traceback (most recent call last)
~.conda\envs\fastai\lib\site-packages\fastai\basic_train.py in fit(epochs, learn, callbacks, metrics)
100 xb, yb = cb_handler.on_batch_begin(xb, yb)
–> 101 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
102 if cb_handler.on_batch_end(loss): break

~.conda\envs\fastai\lib\site-packages\fastai\basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
32 if opt is not None:
—> 33 loss,skip_bwd = cb_handler.on_backward_begin(loss)
34 if not skip_bwd: loss.backward()

~.conda\envs\fastai\lib\site-packages\fastai\callback.py in on_backward_begin(self, loss)
289 “Handle gradient calculation on loss.”
–> 290 self.smoothener.add_value(loss.float().detach().cpu())
291 self.state_dict[‘last_loss’], self.state_dict[‘smooth_loss’] = loss, self.smoothener.smooth

RuntimeError: CUDA error: device-side assert triggered

During handling of the above exception, another exception occurred:

RuntimeError Traceback (most recent call last)
in
50
51 learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd, bottle=True).to_fp16()
—> 52 lr_find(learn)
53 learn.recorder.plot()

~.conda\envs\fastai\lib\site-packages\fastai\train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
39 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
40 epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
—> 41 learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
42
43 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,

~.conda\envs\fastai\lib\site-packages\fastai\basic_train.py in fit(self, epochs, lr, wd, callbacks)
198 else: self.opt.lr,self.opt.wd = lr,wd
199 callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
–> 200 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
201
202 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

~.conda\envs\fastai\lib\site-packages\fastai\basic_train.py in fit(epochs, learn, callbacks, metrics)
110 exception = e
111 raise
–> 112 finally: cb_handler.on_train_end(exception)
113
114 loss_func_name2activ = {‘cross_entropy_loss’: F.softmax, ‘nll_loss’: torch.exp, ‘poisson_nll_loss’: torch.exp,

~.conda\envs\fastai\lib\site-packages\fastai\callback.py in on_train_end(self, exception)
321 def on_train_end(self, exception:Union[bool,Exception])->None:
322 “Handle end of training, exception is an Exception or False if no exceptions during training.”
–> 323 self(‘train_end’, exception=exception)
324
325 @property

~.conda\envs\fastai\lib\site-packages\fastai\callback.py in call(self, cb_name, call_mets, **kwargs)
249 if call_mets:
250 for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
–> 251 for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
252
253 def set_dl(self, dl:DataLoader):

~.conda\envs\fastai\lib\site-packages\fastai\callback.py in _call_and_update(self, cb, cb_name, **kwargs)
239 def call_and_update(self, cb, cb_name, **kwargs)->None:
240 “Call cb_name on cb and update the inner state.”
–> 241 new = ifnone(getattr(cb, f’on
{cb_name}’)(**self.state_dict, **kwargs), dict())
242 for k,v in new.items():
243 if k not in self.state_dict:

~.conda\envs\fastai\lib\site-packages\fastai\callbacks\lr_finder.py in on_train_end(self, **kwargs)
33 def on_train_end(self, **kwargs:Any)->None:
34 “Cleanup learn model weights disturbed during LRFinder exploration.”
—> 35 self.learn.load(‘tmp’, purge=False)
36 if hasattr(self.learn.model, ‘reset’): self.learn.model.reset()
37 for cb in self.callbacks:

~.conda\envs\fastai\lib\site-packages\fastai\basic_train.py in load(self, file, device, strict, with_opt, purge, remove_module)
265 elif isinstance(device, int): device = torch.device(‘cuda’, device)
266 source = self.path/self.model_dir/f’{file}.pth’ if is_pathlike(file) else file
–> 267 state = torch.load(source, map_location=device)
268 if set(state.keys()) == {‘model’, ‘opt’}:
269 model_state = state[‘model’]

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
384 f = f.open(‘rb’)
385 try:
–> 386 return _load(f, map_location, pickle_module, **pickle_load_args)
387 finally:
388 if new_fd:

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in _load(f, map_location, pickle_module, **pickle_load_args)
571 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
572 unpickler.persistent_load = persistent_load
–> 573 result = unpickler.load()
574
575 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in persistent_load(saved_id)
534 obj = data_type(size)
535 obj._torch_load_uninitialized = True
–> 536 deserialized_objects[root_key] = restore_location(obj, location)
537 storage = deserialized_objects[root_key]
538 if view_metadata is not None:

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in restore_location(storage, location)
404 elif isinstance(map_location, torch.device):
405 def restore_location(storage, location):
–> 406 return default_restore_location(storage, str(map_location))
407 else:
408 def restore_location(storage, location):

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in default_restore_location(storage, location)
117 def default_restore_location(storage, location):
118 for _, _, fn in _package_registry:
–> 119 result = fn(storage, location)
120 if result is not None:
121 return result

~.conda\envs\fastai\lib\site-packages\torch\serialization.py in _cuda_deserialize(obj, location)
97 storage_type = getattr(torch.cuda, type(obj).name)
98 with torch.cuda.device(device):
—> 99 return storage_type(obj.size())
100 else:
101 return obj.cuda(device)

RuntimeError: CUDA error: device-side assert triggered

I am getting this error, is this because of mismatch in classes?