Ohhh, understand!!
Is there any need of adding super().__init__(**kwargs)
too to the AlbumentationWrapper
?
I have tried that and I get the next error:
class SegmentationAlbumentationsTransform(ItemTransform):
split_idx = 0
def __init__(self, aug,**kwargs):
super().__init__(**kwargs)
self.aug = aug
def encodes(self, x):
img,mask = x
aug = self.aug(image=np.array(img), mask=np.array(mask))
return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
transformPipeline=Compose([OneOf([
OpticalDistortion(p=0.3),
GridDistortion(p=0.1),
ElasticTransform(p=0.3),
], p=0.2)],
p=1)
transformPipeline=SegmentationAlbumentationsTransform(transformPipeline)
dataset1 = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
get_items=partial(get_image_files,folders=[dataset1_name]),
get_y=get_y_fn,
splitter=RandomSplitter(valid_pct=0.1,seed=2020),
item_tfms=[Resize((size,size)),transformPipeline],
batch_tfms=[*aug_transforms(mult=1.0, do_flip=True,
flip_vert=True, max_rotate=10.,
max_zoom=1.1,max_warp=0.2,
p_affine=0.75, p_lighting=0),Normalize.from_stats(*imagenet_stats)]
)
dataset1.summary(path_images)
dls = dataset1.dataloaders(path_images,bs=bs)
dls.show_batch(vmin=0,vmax=1,figsize=(12, 9))
Setting-up type transforms pipelines
Collecting items from ../datasets/Images
Found 621 items
2 datasets of sizes 559,62
Setting up Pipeline: PILBase.create
Setting up Pipeline: <lambda> -> PILBase.create
Building one sample
Pipeline: PILBase.create
starting from
../datasets/Images/manual/165.png
applying PILBase.create gives
PILImage mode=RGB size=1002x1004
Pipeline: <lambda> -> PILBase.create
starting from
../datasets/Images/manual/165.png
applying <lambda> gives
../datasets/Labels/manual/165.png
applying PILBase.create gives
PILMask mode=L size=1002x1004
Final sample: (PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
Setting up after_item: Pipeline: AddMaskCodes -> SegmentationAlbumentationsTransform -> Resize -> ToTensor
Setting up before_batch: Pipeline:
Setting up after_batch: Pipeline: IntToFloatTensor -> AffineCoordTfm -> LightingTfm -> Normalize
Could not do one pass in your dataloader, there is something wrong in it
Building one batch
Applying item_tfms to the first sample:
Pipeline: AddMaskCodes -> SegmentationAlbumentationsTransform -> Resize -> ToTensor
starting from
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying AddMaskCodes gives
(PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004)
applying SegmentationAlbumentationsTransform gives
[PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004]
applying Resize gives
[PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004]
applying ToTensor gives
[PILImage mode=RGB size=1002x1004, PILMask mode=L size=1002x1004]
Adding the next 3 samples
No before_batch transform to apply
Collating items in a batch
Error! It's not possible to collate your items in a batch
PILImage is not collatable
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-21-3028df996ec0> in <module>
9 p_affine=0.75, p_lighting=0),Normalize.from_stats(*imagenet_stats)]
10 )
---> 11 manual.summary(path_images)
12 dls = manual.dataloaders(path_images,bs=bs)
13 dls.show_batch(vmin=0,vmax=1,figsize=(12, 9))
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/block.py in summary(self, source, bs, **kwargs)
181 why = _find_fail_collate(s)
182 print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
--> 183 raise e
184
185 if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/block.py in summary(self, source, bs, **kwargs)
175 print("\nCollating items in a batch")
176 try:
--> 177 b = dls.train.create_batch(s)
178 b = retain_types(b, s[0] if is_listy(s) else s)
179 except Exception as e:
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py in create_batch(self, b)
124 def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
125 def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
--> 126 def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
127 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
128 def to(self, device): self.device = device
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py in fa_collate(t)
44 b = t[0]
45 return (default_collate(t) if isinstance(b, _collate_types)
---> 46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
47 else default_collate(t))
48
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py in <listcomp>(.0)
44 b = t[0]
45 return (default_collate(t) if isinstance(b, _collate_types)
---> 46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
47 else default_collate(t))
48
~/anaconda3/envs/seg/lib/python3.7/site-packages/fastai2/data/load.py in fa_collate(t)
45 return (default_collate(t) if isinstance(b, _collate_types)
46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
---> 47 else default_collate(t))
48
49 # Cell
~/anaconda3/envs/seg/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py in default_collate(batch)
79 return [default_collate(samples) for samples in transposed]
80
---> 81 raise TypeError(default_collate_err_msg_format.format(elem_type))
TypeError: default_collate: batch must contain tensors, numpy arrays, numbers, dicts or lists; found <class 'fastai2.vision.core.PILImage'>
If I delete split_idx = 0
it is working!!!