fastai.vision.image.Image to torch.HalfTensor

I’d like to experiment with fp16 inference. Previously I was training with fp16 and setting back to fp32 for inference so that my data and weight type were both dtype torch.FloatTensor.

If I leave my weights as torch.HalfTensor, how can I convert a fastai.vision.image.Image object to torch.HalfTensor as well?

I’ve tried img.data.to(torch.fp16) but it doesn’t seem to change dtype inplace.

1 Like

I think img.data.half() should work.

2 Likes

I’m trying to do inference in fastai 1v61 while staying inside the fp16 regime. I’ve taken the fastai.vision.image and converted it to fp16 as you noted:

halfIm = vision.Image(img.data.half())

And confirmed that this produces a new image with a float16 tensor:

halfIm.data

tensor([[[0.0000, 0.0000, 0.0000,  ..., 0.0000, 0.0000, 0.0000],
         [0.0000, 0.0000, 0.0000,  ..., 0.0000, 0.0000, 0.0000],
         [0.0000, 0.0000, 0.0000,  ..., 0.0000, 0.0000, 0.0000],
         ...,
       dtype=torch.float16)

However, when calling predict, I still get an error indicating that the tensor is not a HalfTensor (RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.cuda.HalfTensor) should be the same):

learn.predict(halfIm)

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-143-2782c1b936ed> in <module>
----> 1 learn.predict(halfIm)

/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in predict(self, item, return_x, batch_first, with_dropout, **kwargs)
    371         "Return predicted class, label and probabilities for `item`."
    372         batch = self.data.one_item(item)
--> 373         res = self.pred_batch(batch=batch, with_dropout=with_dropout)
    374         raw_pred,x = grab_idx(res,0,batch_first=batch_first),batch[0]
    375         norm = getattr(self.data,'norm',False)

/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in pred_batch(self, ds_type, batch, reconstruct, with_dropout, activ)
    350         activ = ifnone(activ, _loss_func2activ(self.loss_func))
    351         with torch.no_grad():
--> 352             if not with_dropout: preds = loss_batch(self.model.eval(), xb, yb, cb_handler=cb_handler)
    353             else: preds = loss_batch(self.model.eval().apply(self.apply_dropout), xb, yb, cb_handler=cb_handler)
    354             res = activ(preds[0])

/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
     24     if not is_listy(xb): xb = [xb]
     25     if not is_listy(yb): yb = [yb]
---> 26     out = model(*xb)
     27     out = cb_handler.on_loss_begin(out)
     28 

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    575             result = self._slow_forward(*input, **kwargs)
    576         else:
--> 577             result = self.forward(*input, **kwargs)
    578         for hook in self._forward_hooks.values():
    579             hook_result = hook(self, input, result)

/opt/conda/lib/python3.7/site-packages/fastai/layers.py in forward(self, x)
    134         for l in self.layers:
    135             res.orig = x
--> 136             nres = l(res)
    137             # We have to remove res.orig to avoid hanging refs and therefore memory leaks
    138             res.orig = None

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    575             result = self._slow_forward(*input, **kwargs)
    576         else:
--> 577             result = self.forward(*input, **kwargs)
    578         for hook in self._forward_hooks.values():
    579             hook_result = hook(self, input, result)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
     98     def forward(self, input):
     99         for module in self:
--> 100             input = module(input)
    101         return input
    102 

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    575             result = self._slow_forward(*input, **kwargs)
    576         else:
--> 577             result = self.forward(*input, **kwargs)
    578         for hook in self._forward_hooks.values():
    579             hook_result = hook(self, input, result)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
    375 
    376     def forward(self, input):
--> 377         return self._conv_forward(input, self.weight)
    378 
    379 class Conv3d(_ConvNd):

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight)
    372                             _pair(0), self.dilation, self.groups)
    373         return F.conv2d(input, weight, self.bias, self.stride,
--> 374                         self.padding, self.dilation, self.groups)
    375 
    376     def forward(self, input):

RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.cuda.HalfTensor) should be the same

I believe that I’m properly converting the input, so not sure what else to modify here.