RuntimeError: Could not infer dtype of NoneType

I’m hitting the following error RuntimeError: Could not infer dtype of NoneType when training a TextClassifier as follows

learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))

The error seems to happen in the fastai library. The funny things is two lines before the return we can read this comment

# XXX: Pytorch bug in dataloader using num_workers>0; TODO: create repro and report

Am I hitting this bug? if so what could be the workaround

Here is full stack trace

 0.00% [0/1 00:00<00:00]
epoch	train_loss	valid_loss	accuracy

 Interrupted
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-19-7115bd4f1526> in <module>()
      1 learn.freeze_to(-2)
----> 2 learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))

/usr/local/lib/python3.6/dist-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, callbacks, **kwargs)
     18     callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
     19                                         pct_start=pct_start, **kwargs))
---> 20     learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
     21 
     22 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, **kwargs:Any):

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
    160         callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
    161         fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162             callbacks=self.callbacks+callbacks)
    163 
    164     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
     92     except Exception as e:
     93         exception = e
---> 94         raise e
     95     finally: cb_handler.on_train_end(exception)
     96 

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
     87             if hasattr(data,'valid_dl') and data.valid_dl is not None:
     88                 val_loss = validate(model, data.valid_dl, loss_func=loss_func,
---> 89                                        cb_handler=cb_handler, pbar=pbar)
     90             else: val_loss=None
     91             if cb_handler.on_epoch_end(val_loss): break

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch)
     47     with torch.no_grad():
     48         val_losses,nums = [],[]
---> 49         for xb,yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)):
     50             if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False)
     51             val_losses.append(loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler))

/usr/local/lib/python3.6/dist-packages/fastprogress/fastprogress.py in __iter__(self)
     63         self.update(0)
     64         try:
---> 65             for i,o in enumerate(self._gen):
     66                 yield o
     67                 if self.auto_update: self.update(i+1)

/usr/local/lib/python3.6/dist-packages/fastai/basic_data.py in __iter__(self)
     45     def __iter__(self):
     46         "Process and returns items from `DataLoader`."
---> 47         for b in self.dl:
     48             y = b[1][0] if is_listy(b[1]) else b[1]
     49             if not self.skip_size1 or y.size(0) != 1: yield self.proc_batch(b)

/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
    613         if self.num_workers == 0:  # same-process loading
    614             indices = next(self.sample_iter)  # may raise StopIteration
--> 615             batch = self.collate_fn([self.dataset[i] for i in indices])
    616             if self.pin_memory:
    617                 batch = pin_memory_batch(batch)

/usr/local/lib/python3.6/dist-packages/fastai/text/data.py in pad_collate(samples, pad_idx, pad_first)
     90         if pad_first: res[-len(s[0]):,i] = LongTensor(s[0])
     91         else:         res[:len(s[0]):,i] = LongTensor(s[0])
---> 92     return res, tensor([s[1] for s in samples])
     93 
     94 def _get_processor(tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000,

/usr/local/lib/python3.6/dist-packages/fastai/torch_core.py in tensor(x, *rest)
     68     # XXX: Pytorch bug in dataloader using num_workers>0; TODO: create repro and report
     69     if is_listy(x) and len(x)==0: return tensor(0)
---> 70     return torch.tensor(x) if is_listy(x) else as_tensor(x)
     71 
     72 def np_address(x:np.ndarray)->int:

RuntimeError: Could not infer dtype of NoneType