How to create custom pytorch class

Hey,

I am trying to create my own CNN for images of size 854x854 and following chapters 13 and 14 in book. Starting with only 2 layers, here is my code:

class Net(Module):   
def __init__(self):
    super(Net, self).__init__()
    
    self.cnn_layers = Sequential(
            # Conv layer 1
            Conv2d(3,6,kernel_size=7, stride=2, padding=2),
            BatchNorm2d(6),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),
        
            # Conv layer 2
            Conv2d(6, 6, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(6),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),
    )
    
    self.linear_layers = Sequential(
            Linear(6 * 214 * 214, 8)
    )
    
# forward pass    
def forward(self, x):
    x = self.cnn_layers(x)
    x = x.view(x.size(0), -1)
    x = self.linear_layers(x)
    return x

Then I define my learner:

learn = Learner(dls, Net, metrics=error_rate,loss_func=nn.CrossEntropyLoss())

When I try to use lr_finder or fit_one_cycle, I get the following error:

   TypeError                                 Traceback (most recent call last)
<ipython-input-26-d31966f904ce> in <module>
----> 1 learn.fit_one_cycle(4,2e-3)

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
    105                   moms=None, cbs=None, reset_opt=False):
    106     "Fit `self.model` for `n_epoch` using the 1cycle policy."
--> 107     if self.opt is None: self.create_opt()
    108     self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
    109     lr_max = np.array([h['lr'] for h in self.opt.hypers])

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in create_opt(self)
    138     def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
    139     def create_opt(self):
--> 140         self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
    141         if not self.wd_bn_bias:
    142             for p in self._bn_bias_state(True ): p['do_wd'] = False

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/torch_core.py in trainable_params(m)
    569 def trainable_params(m):
    570     "Return all trainable parameters of `m`"
--> 571     return [p for p in m.parameters() if p.requires_grad]
    572 
    573 # Cell

TypeError: parameters() missing 1 required positional argument: 'self'

Where am I going wrong here? :sweat_smile:
Cheers,
R

The custom network seems correct. The mistake is in building the learner. I think Learner expects an already initialized class.

learn = Learner(dls, Net(), metrics=error_rate,loss_func=nn.CrossEntropyLoss())
1 Like

Thank you!

I now get the following error.What is the best way to check the dimensions are correct going into the linear layer?

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-20-d18828793d27> in <module>
----> 1 lr_min,lr_steep=learn.lr_find()
      2 print(f"Minimum/10: {lr_min:.2e}, steepest point: {lr_steep:.2e}")

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/callback/schedule.py in lr_find(self, start_lr, end_lr, num_it, stop_div, show_plot, suggestions)
    220     n_epoch = num_it//len(self.dls.train) + 1
    221     cb=LRFinder(start_lr=start_lr, end_lr=end_lr, num_it=num_it, stop_div=stop_div)
--> 222     with self.no_logging(): self.fit(n_epoch, cbs=cb)
    223     if show_plot: self.recorder.plot_lr_find()
    224     if suggestions:

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
    210             self.opt.set_hypers(lr=self.lr if lr is None else lr)
    211             self.n_epoch = n_epoch
--> 212             self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
    213 
    214     def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _do_fit(self)
    201         for epoch in range(self.n_epoch):
    202             self.epoch=epoch
--> 203             self._with_events(self._do_epoch, 'epoch', CancelEpochException)
    204 
    205     def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _do_epoch(self)
    195 
    196     def _do_epoch(self):
--> 197         self._do_epoch_train()
    198         self._do_epoch_validate()
    199 

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _do_epoch_train(self)
    187     def _do_epoch_train(self):
    188         self.dl = self.dls.train
--> 189         self._with_events(self.all_batches, 'train', CancelTrainException)
    190 
    191     def _do_epoch_validate(self, ds_idx=1, dl=None):

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in all_batches(self)
    164     def all_batches(self):
    165         self.n_iter = len(self.dl)
--> 166         for o in enumerate(self.dl): self.one_batch(*o)
    167 
    168     def _do_one_batch(self):

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in one_batch(self, i, b)
    183         b_on_device = tuple( e.to(device=self.dls.device) for e in b if hasattr(e, "to")) if self.dls.device is not None else b
    184         self._split(b_on_device)
--> 185         self._with_events(self._do_one_batch, 'batch', CancelBatchException)
    186 
    187     def _do_epoch_train(self):

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
    158 
    159     def _with_events(self, f, event_type, ex, final=noop):
--> 160         try: self(f'before_{event_type}');  f()
    161         except ex: self(f'after_cancel_{event_type}')
    162         self(f'after_{event_type}');  final()

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/fastai/learner.py in _do_one_batch(self)
    167 
    168     def _do_one_batch(self):
--> 169         self.pred = self.model(*self.xb)
    170         self('after_pred')
    171         if len(self.yb):

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

<ipython-input-18-acbe5db720e8> in forward(self, x)
     34         x = self.cnn_layers(x)
     35         x = x.view(x.size(0), -1)
---> 36         x = self.linear_layers(x)
     37         return x

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    115     def forward(self, input):
    116         for module in self:
--> 117             input = module(input)
    118         return input
    119 

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/modules/linear.py in forward(self, input)
     91 
     92     def forward(self, input: Tensor) -> Tensor:
---> 93         return F.linear(input, self.weight, self.bias)
     94 
     95     def extra_repr(self) -> str:

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/functional.py in linear(input, weight, bias)
   1685     if not torch.jit.is_scripting():
   1686         if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
-> 1687             return handle_torch_function(linear, tens_ops, input, weight, bias=bias)
   1688     if input.dim() == 2 and bias is not None:
   1689         # fused op is marginally faster

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/overrides.py in handle_torch_function(public_api, relevant_args, *args, **kwargs)
   1061         # Use `public_api` instead of `implementation` so __torch_function__
   1062         # implementations can do equality/identity comparisons.
-> 1063         result = overloaded_arg.__torch_function__(public_api, types, args, kwargs)
   1064 
   1065         if result is not NotImplemented:

~/miniconda3/envs/fastai_V2/lib/python3.8/site-packages/torch/nn/functional.py in linear(input, weight, bias)
   1688     if input.dim() == 2 and bias is not None:
   1689         # fused op is marginally faster
-> 1690         ret = torch.addmm(bias, input, weight.t())
   1691     else:
   1692         output = input.matmul(weight.t())

RuntimeError: mat1 dim 1 must match mat2 dim 0

I usually use print debugging in the forward function. It works, but it is probably not the best method :smiley:

1 Like

it is all about the right number of dimentions of matrices , and use fastai loss_fun CrossEntropyLossFlat instead of nn.CrossEntropyLoss

1 Like