Hello, i am newbie in DL and fastai. Try to create segmented images with the help of unet_learner.
My code below:
#get_transforms without randomness
def preprocess(do_flip:bool=True, flip_vert:bool=False, max_rotate:float=10., max_zoom:float=1.1,
max_lighting:float=0.2, max_warp:float=0.2, p_affine:float=0.75,
p_lighting:float=0.75, xtra_tfms:Optional[Collection[Transform]]=None)->Collection[Transform]:
“Utility func to easily create a list of flip, rotate, zoom
, warp, lighting transforms.”
res = [rand_crop()]
if do_flip: res.append(dihedral_affine() if flip_vert else flip_affine(p=0.5))
if max_warp: res.append(symmetric_warp(magnitude=(-max_warp,max_warp), p=p_affine))
if max_rotate: res.append(rotate(degrees=(-max_rotate,max_rotate), p=p_affine))
if max_zoom>1: res.append(rand_zoom(scale=(1.,max_zoom), p=p_affine))
if max_lighting:
res.append(brightness(change=(0.5*(1-max_lighting), 0.5*(1+max_lighting)), p=p_lighting))
res.append(contrast(scale=(1-max_lighting, 1/(1-max_lighting)), p=p_lighting))
# train , valid
return (res + listify(xtra_tfms), [crop_pad(is_random=False)])
data = (SegmentationItemList
.from_folder(path_img)
.random_split_by_pct(valid_pct=0.2)
.label_from_func(get_y_fn, classes=[0,1])
.transform(preprocess(), size=size, tfm_y=True)
.databunch(bs=bs)
.normalize(imagenet_stats))
wd=1e-2
learn = unet_learner(data, models.resnet34, wd=wd,
metrics=metrics
)
lr=3e-3
learn.fit_one_cycle(5, slice(lr), pct_start=0.9)
data_test=(SegmentationItemList.from_folder(path_train_pred))
#create mask
learn.predict(data_test[0])[0]
#see initial image
data_test[0].apply_tfms(preprocess(), size=size)
As a result i receive mask of some part of initial image and the same part of the image. How can i get entire (not only part) masked initial image? As i understand, i need to get rid of transforms on test set (except resizing transforms), but can’t figure out, how to do it.