Error in fit_one_cycle method

I am getting " TypeError: ‘str’ object cannot be interpreted as an integer" while executing the classifier.

TypeError Traceback (most recent call last)
in ()
2 classifier.load_encoder(‘ft_enc’)
3 print(“Here”)
----> 4 classifier.fit_one_cycle(1)
5 classifier.freeze_to(-1)
6 classifier.fit_one_cycle(cyc_len=1,max_lr=slice(1e-1, 1e-1), moms = (0.8,0.7))

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, callbacks, **kwargs)
20 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
21 pct_start=pct_start, **kwargs))
—> 22 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
23
24 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, **kwargs:Any):

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
172 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
173 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
–> 174 callbacks=self.callbacks+callbacks)
175
176 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
94 except Exception as e:
95 exception = e
—> 96 raise e
97 finally: cb_handler.on_train_end(exception)
98

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
84 for xb,yb in progress_bar(data.train_dl, parent=pbar):
85 xb, yb = cb_handler.on_batch_begin(xb, yb)
—> 86 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
87 if cb_handler.on_batch_end(loss): break
88

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
17 if not is_listy(xb): xb = [xb]
18 if not is_listy(yb): yb = [yb]
—> 19 out = model(*xb)
20 out = cb_handler.on_loss_begin(out)
21

~/last_try/envs/ulm/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

~/last_try/envs/ulm/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
—> 92 input = module(input)
93 return input
94

~/last_try/envs/ulm/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

~/last_try/envs/ulm/lib/python3.6/site-packages/fastai/text/models.py in forward(self, input)
167 self.reset()
168 raw_outputs, outputs = [],[]
–> 169 for i in range(0, sl, self.bptt):
170 r, o = super().forward(input[:,i: min(i+self.bptt, sl)])
171 if i>(sl-self.max_seq):

Same method is working fine for training the learner. Please suggest some solution for the issue.