Hingeloss Error:Siamesse network

Hi i am trying to use HingeEmbeddingLoss function of pytorch.
I assumed it accepts the input as Tuple of images ,Target whether images are similar or dissimilar (1 or 0) . I pass tuple of images for siamesse network as input to network which outputs 256 length tensor output.

But i get following error
hinge_embedding_loss(): argument ‘input’ (position 1) must be Tensor, not tuple.

What am i missing
class SiamImageItemList(ImageItemList):
def init(self, *args, **kwargs):
super().init(*args, **kwargs)
# self._label_cls=FloatList

    def __len__(self)->int: return len(self.items) or 1 
    
    def get(self, i):
        sz=224
        match=1
        if i>=len(self.items)//2:#"First set of iteration will generate similar pairs, next will generate different pairs"
            match = 0
        fn = self.items[i]
        #print(i)
        img1 = super().get(i) # Returns Image class object
        #print(img1.shape,'img')
        #img1=np.asarray(img1)
        #img1 = PIL.Image.fromarray(read_img(fn[fn.rfind('/')+1:],bbox_df,img1) )
        #img1 = read_img(fn[fn.rfind('/')+1:],bbox_df,img1) 
        imgs = self.xtra.Image.values
        ids = self.xtra.Id.values
        wcls = ids[i]
        simgs = imgs[ids == wcls]
        dimgs = imgs[ids != wcls]
        if len(simgs)==1 and match==1:
            fn2=fn
        else:
            while True:
                np.random.shuffle(simgs)
                np.random.shuffle(dimgs)
                if simgs[0] != fn:
                    fn2 = [simgs[0] if match==1 else dimgs[0]][0]
                    break
            fn2 = self.items[np.where(imgs==fn2)[0][0]]
        img2 = super().open(fn2) # Returns Image class object
        #img2=np.asarray(img2)
        #img2 = PIL.Image.fromarray(read_img(fn[fn.rfind('/')+1:],bbox_df,img2) )
        return SiamImage(img1, img2)

    class SiamImage(ItemBase):
        def __init__(self, img1, img2): ## These should of Image type
            self.img1, self.img2 = img1, img2
            #print(img1.data.shape)
            self.obj, self.data = (img1, img2), [(img1.data-mean[...,None,None])/std[...,None,None], (img2.data-mean[...,None,None])/std[...,None,None]]
        def apply_tfms(self, tfms,*args, **kwargs):
            self.img1 = self.img1.apply_tfms(tfms, *args, **kwargs)
            self.img2 = self.img2.apply_tfms(tfms, *args, **kwargs)
            self.data = [(self.img1.data-mean[...,None,None])/std[...,None,None], (self.img2.data-mean[...,None,None])/std[...,None,None]]
            return self
        def __repr__(self): return f'{self.__class__.__name__} {self.img1.shape, self.img2.shape}'
        def to_one(self):
            return Image(mean[...,None,None]+torch.cat(self.data,2)*std[...,None,None])

    learn=Learner(data,SiameseNet(),loss_func=F.hinge_embedding_loss,
                 path=path,metrics=[acc_02], callback_fns=partial(GradientClipping, clip=1))
    learn.callback_fns.append(partial(SaveModelCallback,monitor='val_loss',mode='min'))
    learn.callback_fns.append(partial(ReduceLROnPlateauCallback, min_delta=1e-5, patience=3))