Learner was working with that Loader.
learn.lr_find()
got up to 10% and throw a new error related with Dataloader. Likes like he is trying to collate Images. However, I don’t want it to do it. It is being made in the first layer of the model.
It is the error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-28-d81c6bd29d71> in <module>
----> 1 learn.lr_find()
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/callback/schedule.py in lr_find(self, start_lr, end_lr, num_it, stop_div, show_plot, suggestions)
226 n_epoch = num_it//len(self.dls.train) + 1
227 cb=LRFinder(start_lr=start_lr, end_lr=end_lr, num_it=num_it, stop_div=stop_div)
--> 228 with self.no_logging(): self.fit(n_epoch, cbs=cb)
229 if show_plot: self.recorder.plot_lr_find()
230 if suggestions:
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastcore/utils.py in _f(*args, **kwargs)
428 init_args.update(log)
429 setattr(inst, 'init_args', init_args)
--> 430 return inst if to_return else f(*args, **kwargs)
431 return _f
432
~/Documents/seg/seg/models/archs/mask_rcnn.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
114 try:
115 self.epoch=epoch; self('begin_epoch')
--> 116 self._do_epoch_train()
117 self._do_epoch_validate()
118 except CancelEpochException: self('after_cancel_epoch')
~/Documents/seg/seg/models/archs/mask_rcnn.py in _do_epoch_train(self)
89 try:
90 self.dl = self.dls.train; self('begin_train')
---> 91 self.all_batches()
92 except CancelTrainException: self('after_cancel_train')
93 finally: self('after_train')
~/Documents/seg/seg/models/archs/mask_rcnn.py in all_batches(self)
60 def all_batches(self):
61 self.n_iter = len(self.dl)
---> 62 for o in enumerate(self.dl): self.one_batch(*o)
63
64 def one_batch(self, i, b):
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py in __iter__(self)
96 self.randomize()
97 self.before_iter()
---> 98 for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
99 if self.device is not None: b = to_device(b, self.device)
100 yield self.after_batch(b)
~/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/dataloader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \
~/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _next_data(self)
951 if len(self._task_info[self._rcvd_idx]) == 2:
952 data = self._task_info.pop(self._rcvd_idx)[1]
--> 953 return self._process_data(data)
954
955 assert not self._shutdown and self._tasks_outstanding > 0
~/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _process_data(self, data)
994 self._try_put_index()
995 if isinstance(data, ExceptionWrapper):
--> 996 data.reraise()
997 return data
998
~/anaconda3/envs/seg/lib/python3.7/site-packages/torch/_utils.py in reraise(self)
393 # (https://bugs.python.org/issue2651), so we work around it.
394 msg = KeyErrorMessage(msg)
--> 395 raise self.exc_type(msg)
RuntimeError: Caught RuntimeError in DataLoader worker process 10.
Original Traceback (most recent call last):
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/_utils/worker.py", line 178, in _worker_loop
data = fetcher.fetch(index)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 34, in fetch
data = next(self.dataset_iter)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 107, in create_batches
yield from map(self.do_batch, self.chunkify(res))
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 128, in do_batch
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 127, in create_batch
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 46, in fa_collate
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 46, in <listcomp>
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py", line 45, in fa_collate
return (default_collate(t) if isinstance(b, _collate_types)
File "/home/david/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 55, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [3, 966, 1296] at entry 0 and [3, 1004, 1002] at entry 1