Creating CNN with Learner

I have an image classification assignment and we are not supposed to use transfer learning. So I’m trying to create a CNN form scratch using torch.nn and Learner.
So far I’m having a lot of problem in syncing the dimensions of the output layer with that of nn.CrossEntropy (it is a multiclass classification problem). I have looked around the net but could not find something that would help.
I had another previous model in which I tried to manually to manually decrease the size of the matrices in each channel to 6x6 (the nnumber of classes is 6), but then I was again stuck. So like I want a general way/tip in which I can get out of this situation.

Thanks a lot

Edit 1: Note: The data object passed in learner is of type ImageDataBunch
Here is the code

class Model(nn.Module):
    def __init__(self, inp_dim=150, inp_channels=3):
        super(Model, self).__init__()
        self.g1 = nn.Sequential(
            nn.Conv2d(inp_channels, 10, 3),
            nn.ReLU(),
            nn.Flatten()
        )
    def forward(self, x):
        x = self.g1(x)
        print(x.shape)
        # x = x.view(-1, 6)
        return x

model = Model().cuda()
# doc(Learner)
learner = Learner(data, model, loss_func=nn.CrossEntropyLoss())

learner.fit_one_cycle(2)

I’m getting the following error

RuntimeError                              Traceback (most recent call last)
<ipython-input-75-97c25af695bc> in <module>
----> 1 learner.fit_one_cycle(2)

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, final_div, wd, callbacks, tot_epochs, start_epoch)
     21     callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
     22                                        final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
---> 23     learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
     24 
     25 def fit_fc(learn:Learner, tot_epochs:int=1, lr:float=defaults.lr,  moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
    198         else: self.opt.lr,self.opt.wd = lr,wd
    199         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
--> 200         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    201 
    202     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
     97             cb_handler.set_dl(learn.data.train_dl)
     98             cb_handler.on_epoch_begin()
---> 99             for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
    101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastprogress/fastprogress.py in __iter__(self)
     45         except Exception as e:
     46             self.on_interrupt()
---> 47             raise e
     48 
     49     def update(self, val):

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastprogress/fastprogress.py in __iter__(self)
     39         if self.total != 0: self.update(0)
     40         try:
---> 41             for i,o in enumerate(self.gen):
     42                 if i >= self.total: break
     43                 yield o

/opt/conda/envs/fastai/lib/python3.6/site-packages/fastai/basic_data.py in __iter__(self)
     73     def __iter__(self):
     74         "Process and returns items from `DataLoader`."
---> 75         for b in self.dl: yield self.proc_batch(b)
     76 
     77     @classmethod

/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __next__(self)
    343 
    344     def __next__(self):
--> 345         data = self._next_data()
    346         self._num_yielded += 1
    347         if self._dataset_kind == _DatasetKind.Iterable and \

/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _next_data(self)
    836             if len(self._task_info[self._rcvd_idx]) == 2:
    837                 data = self._task_info.pop(self._rcvd_idx)[1]
--> 838                 return self._process_data(data)
    839 
    840             assert not self._shutdown and self._tasks_outstanding > 0

/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _process_data(self, data)
    879         self._try_put_index()
    880         if isinstance(data, ExceptionWrapper):
--> 881             data.reraise()
    882         return data
    883 

/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/_utils.py in reraise(self)
    392             # (https://bugs.python.org/issue2651), so we work around it.
    393             msg = KeyErrorMessage(msg)
--> 394         raise self.exc_type(msg)

RuntimeError: Caught RuntimeError in DataLoader worker process 1.
Original Traceback (most recent call last):
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/_utils/worker.py", line 178, in _worker_loop
    data = fetcher.fetch(index)
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch
    return self.collate_fn(data)
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/fastai/torch_core.py", line 127, in data_collate
    return torch.utils.data.dataloader.default_collate(to_data(batch))
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 79, in default_collate
    return [default_collate(samples) for samples in transposed]
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 79, in <listcomp>
    return [default_collate(samples) for samples in transposed]
  File "/opt/conda/envs/fastai/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 55, in default_collate
    return torch.stack(batch, 0, out=out)
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 150 and 136 in dimension 2 at /opt/conda/conda-bld/pytorch_1579022034529/work/aten/src/TH/generic/THTensor.cpp:612

Hi @Narang

You can try using cnn_learner instead of torch.nn
If you are trying to create a learner from scratch, I would suggest setting the argument pretrained to False while using cnn_learner

Here’s the link to the docs for the same :
https://docs.fast.ai/vision.learner.html#cnn_learner

@Narang did you ensure all your samples are the same size? IE did you add in a resize transform? This doesn’t have anything to do with the model I think

We are not actually allowed to import any models :frowning:

My last linear layer didnt have correct dimensions. It took me too long to figure out sorry.