Fast progress is the newest 0.2.2?? Now with the following error:
RuntimeError Traceback (most recent call last)
<ipython-input-14-495233eaf2b4> in <module>
----> 1 learn.fit_one_cycle(4)
e:\fastcore\fastcore\utils.py in _f(*args, **kwargs)
428 init_args.update(log)
429 setattr(inst, 'init_args', init_args)
--> 430 return inst if to_return else f(*args, **kwargs)
431 return _f
432
e:\fastai2\fastai2\callback\schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
111 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
112 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 113 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
114
115 # Cell
e:\fastcore\fastcore\utils.py in _f(*args, **kwargs)
428 init_args.update(log)
429 setattr(inst, 'init_args', init_args)
--> 430 return inst if to_return else f(*args, **kwargs)
431 return _f
432
e:\fastai2\fastai2\learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
198 try:
199 self.epoch=epoch; self('begin_epoch')
--> 200 self._do_epoch_train()
201 self._do_epoch_validate()
202 except CancelEpochException: self('after_cancel_epoch')
e:\fastai2\fastai2\learner.py in _do_epoch_train(self)
173 try:
174 self.dl = self.dls.train; self('begin_train')
--> 175 self.all_batches()
176 except CancelTrainException: self('after_cancel_train')
177 finally: self('after_train')
e:\fastai2\fastai2\learner.py in all_batches(self)
151 def all_batches(self):
152 self.n_iter = len(self.dl)
--> 153 for o in enumerate(self.dl): self.one_batch(*o)
154
155 def one_batch(self, i, b):
e:\fastai2\fastai2\data\load.py in __iter__(self)
96 self.randomize()
97 self.before_iter()
---> 98 for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
99 if self.device is not None: b = to_device(b, self.device)
100 yield self.after_batch(b)
E:\Anaconda3\envs\fastai2\lib\site-packages\torch\utils\data\dataloader.py in __init__(self, loader)
717 # before it starts, and __del__ tries to join but will get:
718 # AssertionError: can only join a started process.
--> 719 w.start()
720 self._index_queues.append(index_queue)
721 self._workers.append(w)
E:\Anaconda3\envs\fastai2\lib\multiprocessing\process.py in start(self)
110 'daemonic processes are not allowed to have children'
111 _cleanup()
--> 112 self._popen = self._Popen(self)
113 self._sentinel = self._popen.sentinel
114 # Avoid a refcycle if the target function holds an indirect
E:\Anaconda3\envs\fastai2\lib\multiprocessing\context.py in _Popen(process_obj)
221 @staticmethod
222 def _Popen(process_obj):
--> 223 return _default_context.get_context().Process._Popen(process_obj)
224
225 class DefaultContext(BaseContext):
E:\Anaconda3\envs\fastai2\lib\multiprocessing\context.py in _Popen(process_obj)
320 def _Popen(process_obj):
321 from .popen_spawn_win32 import Popen
--> 322 return Popen(process_obj)
323
324 class SpawnContext(BaseContext):
E:\Anaconda3\envs\fastai2\lib\multiprocessing\popen_spawn_win32.py in __init__(self, process_obj)
87 try:
88 reduction.dump(prep_data, to_child)
---> 89 reduction.dump(process_obj, to_child)
90 finally:
91 set_spawning_popen(None)
E:\Anaconda3\envs\fastai2\lib\multiprocessing\reduction.py in dump(obj, file, protocol)
58 def dump(obj, file, protocol=None):
59 '''Replacement for pickle.dump() using ForkingPickler.'''
---> 60 ForkingPickler(file, protocol).dump(obj)
61
62 #
E:\Anaconda3\envs\fastai2\lib\site-packages\torch\multiprocessing\reductions.py in reduce_tensor(tensor)
240 ref_counter_offset,
241 event_handle,
--> 242 event_sync_required) = storage._share_cuda_()
243 tensor_offset = tensor.storage_offset()
244 shared_cache[handle] = StorageWeakRef(storage)
RuntimeError: cuda runtime error (801) : operation not supported at C:\w\1\s\tmp_conda_3.7_100118\conda\conda-bld\pytorch_1579082551706\work\torch/csrc/generic/StorageSharing.cpp:245
Thank you for your attention.