I have tried this also:
class FastAIElasticTransform(ItemTransform):
def __init__(self,probability):
self.probability=probability
self.transform=ElasticTransform(p=self.probability)
def encodes(self,x):
np_image = x[0]
np_mask = x[1]
# apply transform
transformed = self.transform(image=np_image,mask=np_mask)
tensor_image = transformed["image"]
tensor_mask = transformed["mask"]
return (tensor_image,tensor_mask)
class FastAIGridDistortion(ItemTransform):
def __init__(self,probability):
self.probability=probability
self.transform=GridDistortion(p=self.probability)
def encodes(self,x):
np_image = x[0]
np_mask = x[1]
# apply transform
transformed = self.transform(image=np_image,mask=np_mask)
tensor_image = transformed["image"]
tensor_mask = transformed["mask"]
return (tensor_image,tensor_mask)
class FastAIOpticalDistortion(ItemTransform):
def __init__(self,probability):
self.probability=probability
self.transform=OpticalDistortion(p=self.probability)
def encodes(self,x):
np_image = x[0]
np_mask = x[1]
# apply transform
transformed = self.transform(image=np_image,mask=np_mask)
tensor_image = transformed["image"]
tensor_mask = transformed["mask"]
return (tensor_image,tensor_mask)
manual = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
get_items=partial(get_image_files,folders=[manual_name]),
get_y=get_y_fn,
splitter=RandomSplitter(valid_pct=0.1,seed=2020),
item_tfms=[Resize((size,size)),FastAIElasticTransform(0.25),
FastAIGridDistortion(0.25),
FastAIOpticalDistortion(0.25)],
batch_tfms=[*aug_transforms(mult=1.0, do_flip=True,
flip_vert=True, max_rotate=10.,
max_zoom=1.1,max_warp=0.2,
p_affine=0.75, p_lighting=0)
,Normalize.from_stats(*imagenet_stats)]
)
manual.summary(path_images)
dls = manual.dataloaders(path_images,bs=bs)
dls.show_batch(vmin=0,vmax=1,figsize=(12, 9))
With this error
Setting-up type transforms pipelines
Collecting items from ../datasets/Images
Found 621 items
2 datasets of sizes 559,62
Setting up Pipeline: PILBase.create
Setting up Pipeline: <lambda> -> PILBase.create
Building one sample
Pipeline: PILBase.create
starting from
../datasets/Images/manual/165.png
applying PILBase.create gives
PILImage mode=RGB size=1002x1004
Pipeline: <lambda> -> PILBase.create
starting from
../datasets/Images/manual/165.png
applying <lambda> gives
../datasets/Labels/manual/165.png
applying PILBase.create gives
PILMask mode=L size=1002x1004
Final sample: (PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
Setting up after_item: Pipeline: AddMaskCodes -> FastAIElasticTransform -> FastAIGridDistortion -> FastAIOpticalDistortion -> Resize -> ToTensor
Setting up before_batch: Pipeline:
Setting up after_batch: Pipeline: IntToFloatTensor -> AffineCoordTfm -> LightingTfm -> Normalize
Building one batch
Applying item_tfms to the first sample:
Pipeline: AddMaskCodes -> FastAIElasticTransform -> FastAIGridDistortion -> FastAIOpticalDistortion -> Resize -> ToTensor
starting from
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying AddMaskCodes gives
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying FastAIElasticTransform gives
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying FastAIGridDistortion gives
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying FastAIOpticalDistortion gives
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying Resize gives
(PILImage mode=RGB size=1002x1002, PILMask mode=L size=1002x1002)
applying ToTensor gives
(TensorImage of size 3x1002x1002, TensorMask of size 1002x1002)
Adding the next 3 samples
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-150-4a5443614798> in <module>
12 ,Normalize.from_stats(*imagenet_stats)]
13 )
---> 14 manual.summary(path_images)
15 dls = manual.dataloaders(path_images,bs=bs)
16 dls.show_batch(vmin=0,vmax=1,figsize=(12, 9))
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastai2/data/block.py in summary(self, source, bs, **kwargs)
163 s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
164 print(f"\nAdding the next {bs-1} samples")
--> 165 s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
166 else:
167 print("No item_tfms to apply")
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastai2/data/block.py in <listcomp>(.0)
163 s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
164 print(f"\nAdding the next {bs-1} samples")
--> 165 s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
166 else:
167 print("No item_tfms to apply")
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in __call__(self, o)
183 self.fs.append(t)
184
--> 185 def __call__(self, o): return compose_tfms(o, tfms=self.fs, split_idx=self.split_idx)
186 def __repr__(self): return f"Pipeline: {' -> '.join([f.name for f in self.fs if f.name != 'noop'])}"
187 def __getitem__(self,i): return self.fs[i]
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in compose_tfms(x, tfms, is_enc, reverse, **kwargs)
136 for f in tfms:
137 if not is_enc: f = f.decode
--> 138 x = f(x, **kwargs)
139 return x
140
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in __call__(self, x, **kwargs)
102 def __call__(self, x, **kwargs):
103 if not _is_tuple(x): return super().__call__(x, **kwargs)
--> 104 return retain_type(super().__call__(list(x), **kwargs), x)
105
106 def decode(self, x, **kwargs):
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in __call__(self, x, **kwargs)
70 @property
71 def name(self): return getattr(self, '_name', _get_name(self))
---> 72 def __call__(self, x, **kwargs): return self._call('encodes', x, **kwargs)
73 def decode (self, x, **kwargs): return self._call('decodes', x, **kwargs)
74 def __repr__(self): return f'{self.name}: {self.encodes} {self.decodes}'
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in _call(self, fn, x, split_idx, **kwargs)
80 def _call(self, fn, x, split_idx=None, **kwargs):
81 if split_idx!=self.split_idx and self.split_idx is not None: return x
---> 82 return self._do_call(getattr(self, fn), x, **kwargs)
83
84 def _do_call(self, f, x, **kwargs):
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/transform.py in _do_call(self, f, x, **kwargs)
84 def _do_call(self, f, x, **kwargs):
85 if not _is_tuple(x):
---> 86 return x if f is None else retain_type(f(x, **kwargs), x, f.returns_none(x))
87 res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
88 return retain_type(res, x)
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/fastcore/dispatch.py in __call__(self, *args, **kwargs)
96 if not f: return args[0]
97 if self.inst is not None: f = MethodType(f, self.inst)
---> 98 return f(*args, **kwargs)
99
100 def __get__(self, inst, owner):
<ipython-input-149-dc21e5018226> in encodes(self, x)
10
11 # apply transform
---> 12 transformed = self.transform(image=np_image,mask=np_mask)
13 tensor_image = transformed["image"]
14
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/core/transforms_interface.py in __call__(self, force_apply, **kwargs)
85 )
86 kwargs[self.save_key][id(self)] = deepcopy(params)
---> 87 return self.apply_with_params(params, **kwargs)
88
89 return kwargs
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/core/transforms_interface.py in apply_with_params(self, params, force_apply, **kwargs)
98 target_function = self._get_target_function(key)
99 target_dependencies = {k: kwargs[k] for k in self.target_dependence.get(key, [])}
--> 100 res[key] = target_function(arg, **dict(params, **target_dependencies))
101 else:
102 res[key] = None
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/augmentations/transforms.py in apply(self, img, random_state, interpolation, **params)
1301 self.value,
1302 np.random.RandomState(random_state),
-> 1303 self.approximate,
1304 )
1305
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/augmentations/functional.py in wrapped_function(img, *args, **kwargs)
52 def wrapped_function(img, *args, **kwargs):
53 shape = img.shape
---> 54 result = func(img, *args, **kwargs)
55 result = result.reshape(shape)
56 return result
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/augmentations/functional.py in elastic_transform(img, alpha, sigma, alpha_affine, interpolation, border_mode, value, random_state, approximate)
1140 cv2.warpAffine, M=matrix, dsize=(width, height), flags=interpolation, borderMode=border_mode, borderValue=value
1141 )
-> 1142 img = warp_fn(img)
1143
1144 if approximate:
~/anaconda3/envs/segmentation/lib/python3.7/site-packages/albumentations/augmentations/functional.py in __process_fn(img)
178 img = np.dstack(chunks)
179 else:
--> 180 img = process_fn(img, **kwargs)
181 return img
182
TypeError: Expected Ptr<cv::UMat> for argument 'src'