I’m trying to run image regression and this sticky issue is showing up again in colab. I thought it was tied to the warp
transform but it does not seem that way, as leaving max_warp=0
worked (and still does) for the PETs notebook.
dblock = DataBlock(blocks=(ImageBlock, PointBlock),
get_items=get_image_files,
splitter=splitter,
get_y=get_ctr)
item_tfms=[]
batch_tfms = [*aug_transforms(size=(120,160), max_warp=0), Normalize(*imagenet_stats)]
dbunch = dblock.databunch(path, path=path, bs=64, batch_tfms=batch_tfms)
dbunch.show_batch(max_n=9, figsize=(9,6))
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-22-70822119f58e> in <module>()
----> 1 dbunch.show_batch(max_n=9, figsize=(9,6))
14 frames
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in show_batch(self, b, max_n, ctxs, show, **kwargs)
76
77 def show_batch(self, b=None, max_n=10, ctxs=None, show=True, **kwargs):
---> 78 if b is None: b = self.one_batch()
79 if not show: return self._pre_show_batch(b, max_n=max_n)
80 show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in one_batch(self)
120 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
121 def one_batch(self):
--> 122 with self.fake_l.no_multiproc(): return first(self)
/usr/local/lib/python3.6/dist-packages/fastai2/core/utils.py in first(x)
148 def first(x):
149 "First element of `x`; i.e. a shortcut for `next(iter(x))`"
--> 150 return next(iter(x))
151
152 #Cell
/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in __iter__(self)
90 self.randomize()
91 self.before_iter()
---> 92 for b in _loaders[self.fake_l.num_workers==0](self.fake_l): yield self.after_batch(b)
93 self.after_iter()
94 if hasattr(self, 'it'): delattr(self, 'it')
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in __call__(self, o)
199 self.fs.append(t)
200
--> 201 def __call__(self, o): return compose_tfms(o, tfms=self.fs, split_idx=self.split_idx)
202 def __repr__(self): return f"Pipeline: {self.fs}"
203 def __getitem__(self,i): return self.fs[i]
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in compose_tfms(x, tfms, is_enc, reverse, **kwargs)
147 for f in tfms:
148 if not is_enc: f = f.decode
--> 149 x = f(x, **kwargs)
150 return x
151
/usr/local/lib/python3.6/dist-packages/fastai2/vision/augment.py in __call__(self, b, split_idx, **kwargs)
31 def __call__(self, b, split_idx=None, **kwargs):
32 self.before_call(b, split_idx=split_idx)
---> 33 return super().__call__(b, split_idx=split_idx, **kwargs) if self.do else b
34
35 #Cell
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in __call__(self, x, **kwargs)
85 @property
86 def use_as_item(self): return ifnone(self.as_item_force, self.as_item)
---> 87 def __call__(self, x, **kwargs): return self._call('encodes', x, **kwargs)
88 def decode (self, x, **kwargs): return self._call('decodes', x, **kwargs)
89 def setup(self, items=None): return self.setups(items)
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in _call(self, fn, x, split_idx, **kwargs)
94 f = getattr(self, fn)
95 if self.use_as_item or not is_listy(x): return self._do_call(f, x, **kwargs)
---> 96 res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
97 return retain_type(res, x)
98
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in <genexpr>(.0)
94 f = getattr(self, fn)
95 if self.use_as_item or not is_listy(x): return self._do_call(f, x, **kwargs)
---> 96 res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
97 return retain_type(res, x)
98
/usr/local/lib/python3.6/dist-packages/fastai2/core/transform.py in _do_call(self, f, x, **kwargs)
98
99 def _do_call(self, f, x, **kwargs):
--> 100 return x if f is None else retain_type(f(x, **kwargs), x, f.returns_none(x))
101
102 add_docs(Transform, decode="Delegate to `decodes` to undo transform", setup="Delegate to `setups` to set up transform")
/usr/local/lib/python3.6/dist-packages/fastai2/core/dispatch.py in __call__(self, *args, **kwargs)
96 if not f: return args[0]
97 if self.inst is not None: f = types.MethodType(f, self.inst)
---> 98 return f(*args, **kwargs)
99
100 def __get__(self, inst, owner):
/usr/local/lib/python3.6/dist-packages/fastai2/vision/augment.py in encodes(self, x)
315 def encodes(self, x:TensorImage): return self._encode(x, self.mode)
316 def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
--> 317 def encodes(self, x:(TensorPoint, TensorBBox)): return self._encode(x, self.mode, reverse=True)
318
319 #Cell
/usr/local/lib/python3.6/dist-packages/fastai2/vision/augment.py in _encode(self, x, mode, reverse)
311 def _encode(self, x, mode, reverse=False):
312 coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)
--> 313 return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode)
314
315 def encodes(self, x:TensorImage): return self._encode(x, self.mode)
/usr/local/lib/python3.6/dist-packages/fastai2/vision/augment.py in affine_coord(x, mat, coord_tfm, sz, mode, pad_mode)
260 if sz is None: sz = x._meta.get('sz', None)
261 if coord_tfm is not None: x = coord_tfm(x, invert=True)
--> 262 if mat is not None: x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))
263 return TensorPoint(x, sz=sz)
264
RuntimeError: inverse_cuda: For batch 0: U(17437184,17437184) is zero, singular U.
Thoughts?
@fanyi how did you go about doing that?
Only issue I have with that is these are the AffineCoordTfms:
[AffineCoordTfm: False (TensorBBox,object) -> encodes
(TensorPoint,object) -> encodes
(TensorImage,object) -> encodes
(TensorMask,object) -> encodes ]
Which I don’t want to not include