Cannot use ResNext models

I’m trying to use ResNext models in my kaggle kernal. I tried using various resnext models from various repos and they all give me some error.

Error Type one: not being able to read the model or load a model. Sometimes the issue is with cloning or installing repo, and other times simply cannot find model or some error as in with pytorch’s hub.load

Error Type two: After loading the model, an error with learn.lr_find or learn.fit_one_cycle where input and output sizes dont match. A sample error is as below

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-21-c7a9c29f9dd1> in <module>
----> 1 learn.lr_find()
      2 learn.recorder.plot()

/opt/conda/lib/python3.6/site-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
     30     cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
     31     epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32     learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
     33 
     34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
    200         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
    201         self.cb_fns_registered = True
--> 202         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    203 
    204     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
     99             for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
--> 101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
    102                 if cb_handler.on_batch_end(loss): break
    103 

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
     28 
     29     if not loss_func: return to_detach(out), yb[0].detach()
---> 30     loss = loss_func(out, *yb)
     31 
     32     if opt is not None:

/opt/conda/lib/python3.6/site-packages/fastai/layers.py in __call__(self, input, target, **kwargs)
    236         if self.floatify: target = target.float()
    237         input = input.view(-1,input.shape[-1]) if self.is_2d else input.view(-1)
--> 238         return self.func.__call__(input, target.view(-1), **kwargs)
    239 
    240 def CrossEntropyFlat(*args, axis:int=-1, **kwargs):

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
    599                                                   self.weight,
    600                                                   pos_weight=self.pos_weight,
--> 601                                                   reduction=self.reduction)
    602 
    603 

/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
   2096 
   2097     if not (target.size() == input.size()):
-> 2098         raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
   2099 
   2100     return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)

ValueError: Target size (torch.Size([96])) must be the same as input size (torch.Size([16000]))

Can someone show me a concrete example of using resnext on a kaggle kernal that would work?please help.