Windows 10 Pytorch 0.4.1

When I’m testing /examples/cifar.ipynb
right where you send the training

I’m getting this error

PicklingError Traceback (most recent call last)
in ()
1 learn = Learner(data, wrn_22(), metrics=accuracy).to_fp16()
----> 2 learn.fit_one_cycle(30, 3e-3, wd=0.4, div_factor=10, pct_start=0.5)

c:\users\gerar\fastai\fastai\train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, **kwargs)
16 cbs = [OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
17 pct_start=pct_start, **kwargs)]
—> 18 learn.fit(cyc_len, max_lr, wd=wd, callbacks=cbs)
19
20 def lr_find(learn:Learner, start_lr:float=1e-5, end_lr:float=10, num_it:int=100, **kwargs:Any):

c:\users\gerar\fastai\fastai\basic_train.py in fit(self, epochs, lr, wd, callbacks)
127 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
128 fit(epochs, self.model, self.loss_fn, opt=self.opt, data=self.data, metrics=self.metrics,
–> 129 callbacks=self.callbacks+callbacks)
130
131 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

c:\users\gerar\fastai\fastai\basic_train.py in fit(epochs, model, loss_fn, opt, data, callbacks, metrics)
80 except Exception as e:
81 exception = e
—> 82 raise e
83 finally: cb_handler.on_train_end(exception)
84

c:\users\gerar\fastai\fastai\basic_train.py in fit(epochs, model, loss_fn, opt, data, callbacks, metrics)
64 cb_handler.on_epoch_begin()
65
—> 66 for xb,yb in progress_bar(data.train_dl, parent=pbar):
67 xb, yb = cb_handler.on_batch_begin(xb, yb)
68 loss,_ = loss_batch(model, xb, yb, loss_fn, opt, cb_handler)

~\Anaconda3\lib\site-packages\fastprogress\fastprogress.py in iter(self)
59 self.update(0)
60 try:
—> 61 for i,o in enumerate(self._gen):
62 yield o
63 if self.auto_update: self.update(i+1)

c:\users\gerar\fastai\fastai\data.py in iter(self)
45 def iter(self):
46 “Process and returns items from DataLoader.”
—> 47 self.gen = map(self.proc_batch, self.dl)
48 return iter(self.gen)
49

~\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in iter(self)
499
500 def iter(self):
–> 501 return _DataLoaderIter(self)
502
503 def len(self):

~\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in init(self, loader)
287 for w in self.workers:
288 w.daemon = True # ensure that the worker exits on process exit
–> 289 w.start()
290
291 _update_worker_pids(id(self), tuple(w.pid for w in self.workers))

~\Anaconda3\lib\multiprocessing\process.py in start(self)
110 ‘daemonic processes are not allowed to have children’
111 _cleanup()
–> 112 self._popen = self._Popen(self)
113 self._sentinel = self._popen.sentinel
114 # Avoid a refcycle if the target function holds an indirect

~\Anaconda3\lib\multiprocessing\context.py in _Popen(process_obj)
221 @staticmethod
222 def _Popen(process_obj):
–> 223 return _default_context.get_context().Process._Popen(process_obj)
224
225 class DefaultContext(BaseContext):

~\Anaconda3\lib\multiprocessing\context.py in _Popen(process_obj)
320 def _Popen(process_obj):
321 from .popen_spawn_win32 import Popen
–> 322 return Popen(process_obj)
323
324 class SpawnContext(BaseContext):

~\Anaconda3\lib\multiprocessing\popen_spawn_win32.py in init(self, process_obj)
63 try:
64 reduction.dump(prep_data, to_child)
—> 65 reduction.dump(process_obj, to_child)
66 finally:
67 set_spawning_popen(None)

~\Anaconda3\lib\multiprocessing\reduction.py in dump(obj, file, protocol)
58 def dump(obj, file, protocol=None):
59 ‘’‘Replacement for pickle.dump() using ForkingPickler.’’’
—> 60 ForkingPickler(file, protocol).dump(obj)
61
62 #

PicklingError: Can’t pickle <function pad at 0x000002076D5EE510>: it’s not the same object as fastai.vision.transform.pad

fastai uses pytorch 1.0, not any earlier version, so expect bugs when working if 0.4.1
As for your error, it’s due to pytorch multiprocessing that sill bugs a bit. Put num_workers to 0.

Any source to install Windows 10 - pytorch 1.0 or to compile it from the source.

I’m trying to compile it from the source but keeps sending errors.

I managed to compile pytorch 1.0 on Windows 10. I get exactly the same output from this notebook using fastai 1.06. Where do you set num_workers = 0?

Whenever your create a DataBunch, you can pass that parameter.

Thanks, working fine now