PeakMemMetric broken between 1.0.45 and 1.0.48 releases

Hi

I think recent changes to callbacks have broken the PeakMemMetric. My notebook was fine until I upgraded. Now getting this error trace:

TypeError                                 Traceback (most recent call last)
<ipython-input-6-c80ed7386cef> in <module>
     33 #
     34 learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04, metrics=metrics, use_bn=True, callback_fns=PeakMemMetric)
---> 35 learn.lr_find()
     36 learn.recorder.plot(suggestion=True)

e:\Anaconda3\lib\site-packages\fastai\train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
     30     cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
     31     epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32     learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
     33 
     34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=False, clip:float=None,

e:\Anaconda3\lib\site-packages\fastai\basic_train.py in fit(self, epochs, lr, wd, callbacks)
    189         if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
    190         fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 191             callbacks=self.callbacks+callbacks)
    192 
    193     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

e:\Anaconda3\lib\site-packages\fastai\basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
     99                                        cb_handler=cb_handler, pbar=pbar)
    100             else: val_loss=None
--> 101             if cb_handler.on_epoch_end(val_loss): break
    102     except Exception as e:
    103         exception = e

e:\Anaconda3\lib\site-packages\fastai\callback.py in on_epoch_end(self, val_loss)
    299         "Epoch is done, process `val_loss`."
    300         self.state_dict['last_metrics'] = [val_loss] if val_loss is not None else None
--> 301         self('epoch_end', call_mets = val_loss is not None)
    302         self.state_dict['epoch'] += 1
    303         return self.state_dict['stop_training']

e:\Anaconda3\lib\site-packages\fastai\callback.py in __call__(self, cb_name, call_mets, **kwargs)
    233         if call_mets:
    234             for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
--> 235         for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
    236 
    237     def set_dl(self, dl:DataLoader):

e:\Anaconda3\lib\site-packages\fastai\callback.py in _call_and_update(self, cb, cb_name, **kwargs)
    223     def _call_and_update(self, cb, cb_name, **kwargs)->None:
    224         "Call `cb_name` on `cb` and update the inner state."
--> 225         new = ifnone(getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs), dict())
    226         for k,v in new.items():
    227             if k not in self.state_dict:

e:\Anaconda3\lib\site-packages\fastai\callbacks\mem.py in on_epoch_end(self, last_metrics, **kwargs)
     63         elif gpu_used > 0: gpu_peak -= gpu_used
     64         # The numbers are deltas in MBs (beginning of the epoch and the end)
---> 65         return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak])

e:\Anaconda3\lib\site-packages\fastai\torch_core.py in add_metrics(last_metrics, mets)
    394     "Return a dictionary for updating `last_metrics` with `mets`."
    395     mets = listify(mets)
--> 396     return {'last_metrics': last_metrics + mets}
    397 
    398 def try_save(state:Dict, path:Path, fname:PathOrStr):

TypeError: unsupported operand type(s) for +: 'NoneType' and 'list'

Is there a different way we should use this now or is it a bug ?

Yes that bug is known and has been fixed in master. You’ll have it in 1.0.49, or you can do a dev install.

That was fast. Thank you !

EDIT: Confirmed that upgrading to 1.0.49 fixed the issue.