Custom Transforms with CV2

I am trying to implement application of Contrast Limited Histogram Equalization(CLAHE) on my training images. The function I am using is :

def apply_clahe(img,clahe):
img = (np.float32(img)).transpose(2,1,0)
lab = cv2.cvtColor(np.float32(img), cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
return rgb
My custom transform definition is:
def _apply_ill(x):
x=apply_clahe(x,clahe1)
return Image(x)
apply_ill = TfmPixel(_apply_ill)

Finally I add this transform to standard transforms :slight_smile:

_tfms = get_transforms(do_flip=True,flip_vert=True,max_zoom=1.4)
tfms = [_tfms[0]+[apply_ill()], _tfms[1]+[apply_ill()] ]

Finally I define my databunch as:
data = ImageDataBunch.from_folder(dataPath,train=’.’,valid_pct=0.2,ds_tfms=tfms,size=(200,200),num_workers=1,bs=32).normalize(imagenet_stats)

I am getting the following error which is attributed to CV2:
Exception: It’s not possible to apply those transforms to your dataset: OpenCV(3.4.2) C:\Miniconda3\conda-bld\opencv-suite_1534379934306\work\modules\imgproc\src\clahe.cpp:351: error: (-215:Assertion failed) _src.type() == (((0) & ((1 << 3) - 1)) + (((1)-1) << 3)) || _src.type() == (((2) & ((1 << 3) - 1)) + (((1)-1) << 3)) in function ‘`anonymous-namespace’::CLAHE_Impl::apply’

I am new to CV2. Can anyone help me in deciphering the error?

The following worked for me -

def custom_transform(img): # you get a PyTorch tensor as input, not a fastai Image object
    img = image2np(img)*255 # convert to numpy array in range 0-255
    img = img.astype(np.uint8) # convert to int

    # If your image is a regular colour image, remember it will be in RGB so convert to BGR if needed
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    lab_planes = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16,16))
    lab_planes[0] = clahe.apply(lab_planes[0])
    lab = cv2.merge(lab_planes)
    bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    
    return pil2tensor(bgr, dtype=np.float32)/255 # convert back to tensor and return

custom_trans = TfmPixel(custom_transform)

transforms = get_transforms(flip_vert = False, 
                            max_rotate = 5,
                           p_lighting = 0.2)

transforms[0].insert(0,custom_trans())

# If you also need the transform on the validation dataset
transforms[1].insert(0,custom_trans())
1 Like