I’ve got more adventures with fp16 on inference time. This time with norm/denorm.
learn.predict(data)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-42-f62f0b041cab> in <module>()
13 data = data.type(torch.HalfTensor)
14 data = data.cuda()
---> 15 cat, t1, t2 = learn.predict(data)
~/anaconda3/lib/python3.7/site-packages/fastai/basic_train.py in predict(self, item, **kwargs)
249 "Return prect class, label and probabilities for `item`."
250 self.callbacks.append(RecordOnCPU())
--> 251 batch = self.data.one_item(item)
252 res = self.pred_batch(batch=batch)
253 pred = res[0]
~/anaconda3/lib/python3.7/site-packages/fastai/basic_data.py in one_item(self, item, detach, denorm)
147 ds = self.single_ds
148 with ds.set_item(item):
--> 149 return self.one_batch(ds_type=DatasetType.Single, detach=detach, denorm=denorm)
150
151 def show_batch(self, rows:int=5, ds_type:DatasetType=DatasetType.Train, **kwargs)->None:
~/anaconda3/lib/python3.7/site-packages/fastai/basic_data.py in one_batch(self, ds_type, detach, denorm)
134 w = self.num_workers
135 self.num_workers = 0
--> 136 try: x,y = next(iter(dl))
137 finally: self.num_workers = w
138 if detach: x,y = to_detach(x),to_detach(y)
~/anaconda3/lib/python3.7/site-packages/fastai/basic_data.py in __iter__(self)
70 for b in self.dl:
71 #y = b[1][0] if is_listy(b[1]) else b[1] # XXX: Why is this line here?
---> 72 yield self.proc_batch(b)
73
74 @classmethod
~/anaconda3/lib/python3.7/site-packages/fastai/basic_data.py in proc_batch(self, b)
63 "Proces batch `b` of `TensorImage`."
64 b = to_device(b, self.device)
---> 65 for f in listify(self.tfms): b = f(b)
66 return b
67
~/anaconda3/lib/python3.7/site-packages/fastai/vision/data.py in _normalize_batch(b, mean, std, do_x, do_y)
74 x,y = b
75 mean,std = mean.to(x.device),std.to(x.device)
---> 76 if do_x: x = normalize(x,mean,std)
77 if do_y and len(y.shape) == 4: y = normalize(y,mean,std)
78 return x,y
~/anaconda3/lib/python3.7/site-packages/fastai/vision/data.py in normalize(x, mean, std)
64 def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
65 "Normalize `x` with `mean` and `std`."
---> 66 return (x-mean[...,None,None]) / std[...,None,None]
67
68 def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage:
RuntimeError: expected type torch.cuda.FloatTensor but got torch.cuda.HalfTensor
so I converted the mean and std to the type of input data:
def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
mean,std = mean.type(x.type()),std.type(x.type())
return (x-mean[...,None,None]) / std[...,None,None]
def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage:
"Denormalize `x` with `mean` and `std`."
mean,std = mean.type(x.type()),std.type(x.type())
return x.cpu()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu()
and got this:
~/anaconda3/lib/python3.7/site-packages/fastai/vision/data.py in denormalize(x, mean, std, do_x)
70 "Denormalize `x` with `mean` and `std`."
71 mean,std = mean.type(x.type()),std.type(x.type())
---> 72 return x.cpu()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu()
73
74 def _normalize_batch(b:Tuple[Tensor,Tensor], mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Tensor,Tensor]:
RuntimeError: "mul" not implemented for 'torch.HalfTensor'
After that I changed denormalize and converted x to the type of mean:
def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage:
"Denormalize `x` with `mean` and `std`."
x = x.type(mean.type())
return x.cpu()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu()
It worked. I’d like to fix it properly, this looks hacky. Any idea how?