[Solved] Facing error while loading fastai models in PyTorch

I am trying to load a model trained in fastai and doing inference in pure PyTorch. Can someone pls take a look and tell me what I am doing wrong ?

Thanks

import torch
import torchvision

resnet34 = torchvision.models.resnet34(pretrained=False)
model = torch.nn.Sequential(*list(resnet34.children())[:-2])

import torch.nn as nn

class AdaptiveConcatPool2d(nn.Module):
    "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`."
    def __init__(self):
        "Output will be 2*sz or 2 if sz is None"
        super().__init__()
        sz = 1
        self.ap,self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)

class Lambda(nn.Module):
    "An easy way to create a pytorch layer for a simple `func`."
    def __init__(self, func):
        "create a layer that simply calls `func` with `x`"
        super().__init__()
        self.func=func

def forward(self, x): return self.func(x)

model1 = nn.Sequential(AdaptiveConcatPool2d(),
                       Lambda(lambda x: x.view(x.size(0),-1)),
                       nn.BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
                       nn.Dropout(p=0.25),
                       nn.Linear(in_features=1024, out_features=512, bias=True),
                       nn.ReLU(inplace=True),
                       nn.BatchNorm1d(512, eps=1e-05,momentum=0.1, affine=True, track_running_stats=True),
                       nn.Dropout(0.5),
                       nn.Linear(in_features=512, out_features=102, bias=True))

model2  = nn.Sequential(model, model1)
for param in model2.parameters():
            param.requires_grad = False

state_dict = torch.load('Flower224resnet34freeze.pth', map_location='cpu')

model2.load_state_dict(state_dict['model'], strict=False)

model = model2
a = torch.rand((1, 3, 224, 224))
output = model(a)

I am getting the following error on running this

---------------------------------------------------------------------------
NotImplementedError                       Traceback (most recent call last)
<ipython-input-21-31e9ef03dacc> in <module>
----> 1 output = model(a)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in forward(self, *input)
     83             registered hooks while the latter silently ignores them.
     84         """
---> 85         raise NotImplementedError
     86 
     87     def register_buffer(self, name, tensor):

NotImplementedError:

In the code that you have pasted, your forward methods are not indented thus they aren’t part of the class and as such the default implementation of the forward method from nn.Module is just one line of code which raises a NotImplementedError. Try indenting your forward methods correctly.

https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module

Man, I’m really stupid, didn’t see that. Thanks @stephenjohnson.

1 Like

Glad to be of help. Sometimes it just takes a second pair of eyes.

1 Like

Hey, Salil, can I see your code after you solved this?

```
import torch
import torchvision

resnet34 = torchvision.models.resnet34(pretrained=False)
model = torch.nn.Sequential(*list(resnet34.children())[:-2])

import torch.nn as nn

class AdaptiveConcatPool2d(nn.Module):
    "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`."
    def __init__(self):
        "Output will be 2*sz or 2 if sz is None"
        super().__init__()
        sz = 1
        self.ap,self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
        def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)

class Lambda(nn.Module):
    "An easy way to create a pytorch layer for a simple `func`."
    def __init__(self, func):
        "create a layer that simply calls `func` with `x`"
        super().__init__()
        self.func=func

    def forward(self, x): return self.func(x)

model1 = nn.Sequential(AdaptiveConcatPool2d(),
                       Lambda(lambda x: x.view(x.size(0),-1)),
                       nn.BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
                       nn.Dropout(p=0.25),
                       nn.Linear(in_features=1024, out_features=512, bias=True),
                       nn.ReLU(inplace=True),
                       nn.BatchNorm1d(512, eps=1e-05,momentum=0.1, affine=True, track_running_stats=True),
                       nn.Dropout(0.5),
                       nn.Linear(in_features=512, out_features=102, bias=True))

model2  = nn.Sequential(model, model1)
for param in model2.parameters():
            param.requires_grad = False

state_dict = torch.load('Flower224resnet34freeze.pth', map_location='cpu')

model2.load_state_dict(state_dict['model'], strict=False)

model = model2
a = torch.rand((1, 3, 224, 224))
output = model(a)
```

You can choose map_location according to you device.

3 Likes