I am trying to replicate this experiment - https://becominghuman.ai/extract-a-feature-vector-for-any-image-with-pytorch-9717561d1d4c
but, with a model I trained on resnext50. Here is how I proceed
# to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
PATH = "/home/ubuntu/datadrive/SLEEVE_LENGTH_CROPPED_PREPARED_SQUARED/"
sz=299
arch=resnext50
bs=28
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
#tfms = tfms_from_model(arch, sz)
data = ImageClassifierData.from_paths(PATH, tfms=tfms, val_name="valid", bs=bs, num_workers=4)
learn = ConvLearner.pretrained(arch, data, precompute=True, ps=0.5)
learn.precompute=False
learn.load('rxt50_sleeve_length')
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
from PIL import Image
scaler = transforms.Resize((299, 299))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
to_tensor = transforms.ToTensor()
model = learn.model
layer = model._modules.get('10')
def get_vector(image_name):
# 1. Load the image with Pillow library
img = Image.open(image_name)
# 2. Create a PyTorch Variable with the transformed image
t_img = Variable(normalize(to_tensor(scaler(img))).unsqueeze(0))
# 3. Create a vector of zeros that will hold our feature vector
my_embedding = torch.zeros(1000)
# 4. Define a function that will copy the output of a layer
def copy_data(m, i, o):
my_embedding.copy_(o.data)
# 5. Attach that function to our selected layer
h = layer.register_forward_hook(copy_data)
# 6. Run the model on our transformed image
model(t_img)
# 7. Detach our copy function from the layer
h.remove()
# 8. Return the feature vector
return my_embedding
Here, I try to extract features from (10): BatchNorm1d(4096, eps=1e-05, momentum=0.1, affine=True) layer of the net.
The function get_vector() runs fine on ResNet18 as mentioned in the article but when I try to run it on my model, I get the following error -
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-150-679a3b08470c> in <module>()
----> 1 p = get_vector('/home/ubuntu/datadrive/SLEEVE_LENGTH_CROPPED_PREPARED_SQUARED/train/cap_sleeves/11487246404239-Roadster-Women-Blue-Checked-A-Line-Dress-2141487246403875-1-148921074751.jpg.jpg')
2 p
<ipython-input-149-2e49da99815b> in get_vector(image_name)
27 h = layer.register_forward_hook(copy_data)
28 # 6. Run the model on our transformed image
---> 29 model(t_img)
30 # 7. Detach our copy function from the layer
31 h.remove()
~/.conda/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
355 result = self._slow_forward(*input, **kwargs)
356 else:
--> 357 result = self.forward(*input, **kwargs)
358 for hook in self._forward_hooks.values():
359 hook_result = hook(self, input, result)
~/.conda/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
65 def forward(self, input):
66 for module in self._modules.values():
---> 67 input = module(input)
68 return input
69
~/.conda/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
355 result = self._slow_forward(*input, **kwargs)
356 else:
--> 357 result = self.forward(*input, **kwargs)
358 for hook in self._forward_hooks.values():
359 hook_result = hook(self, input, result)
~/.conda/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
280 def forward(self, input):
281 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 282 self.padding, self.dilation, self.groups)
283
284
~/.conda/envs/fastai/lib/python3.6/site-packages/torch/nn/functional.py in conv2d(input, weight, bias, stride, padding, dilation, groups)
88 _pair(0), groups, torch.backends.cudnn.benchmark,
89 torch.backends.cudnn.deterministic, torch.backends.cudnn.enabled)
---> 90 return f(input, weight, bias)
91
92
RuntimeError: Expected object of type torch.FloatTensor but found type torch.cuda.FloatTensor for argument #2 'weight'
How do I rectify it ?