Debugging learn class in slightly different version of lesson3 planet notebook

hello all, I am looking at the lesson 3 planet notebook, but made a copy for a one-off I am exploring for another project. I have a data frame I’ve created with filenames and ‘yes/no’ labels as the only columns (‘tags’ being the label column), so I’m using that data frame with to load the data object. Thus one row of the data frame looks something like:
Index image_filename tags
0 <path_to_file.png> yes
1 <path_to_file.png> no

I’m struggling to debug an error I am seeing in the fit_one_cycle method when fitting the model head. Here is my code extracted from the notebook along with the error message.

I would like to note I have been able to get around this by specifying metrics=error_rate as in the lesson2 notebook, but not with any other metrics such as what is provided in the notebook or torch.nn.BCELoss. Does anyone have insight as to why that would be?

np.random.seed(42)
src = (ImageList.from_df(df, path='')).split_by_rand_pct(0.2).label_from_df('tags')​
data = (src.transform(tfms, size=224)
    .databunch().normalize(imagenet_stats))​
data.show_batch(rows=3, figsize=(12,9)) # this does show me images with labels​
arch = models.resnet50​
acc_02 = partial(accuracy_thresh, thresh=0.2)
f_score = partial(fbeta, thresh=0.2)
learn = cnn_learner(data, arch, metrics=[acc_02, f_score])​
learn.lr_find()​
learn.recorder.plot() # this suggests a lr of 0.001​
lr = 0.001​
learn.fit_one_cycle(5, slice(lr)) 

and i get the error:
RuntimeError Traceback (most recent call last)
in ()
----> 1 learn.fit_one_cycle(5, lr)

9 frames
/usr/local/lib/python3.6/dist-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, final_div, wd, callbacks, tot_epochs, start_epoch)
21 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
22 final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
—> 23 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
24
25 def fit_fc(learn:Learner, tot_epochs:int=1, lr:float=defaults.lr, moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
198 else: self.opt.lr,self.opt.wd = lr,wd
199 callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
–> 200 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
201
202 def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
104 if not cb_handler.skip_validate and not learn.data.empty_val:
105 val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func,
–> 106 cb_handler=cb_handler, pbar=pbar)
107 else: val_loss=None
108 if cb_handler.on_epoch_end(val_loss): break

/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch)
61 if not is_listy(yb): yb = [yb]
62 nums.append(first_el(yb).shape[0])
—> 63 if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
64 if n_batch and (len(nums)>=n_batch): break
65 nums = np.array(nums, dtype=np.float32)

/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_batch_end(self, loss)
306 “Handle end of processing one batch with loss.”
307 self.state_dict[‘last_loss’] = loss
–> 308 self(‘batch_end’, call_mets = not self.state_dict[‘train’])
309 if self.state_dict[‘train’]:
310 self.state_dict[‘iteration’] += 1

/usr/local/lib/python3.6/dist-packages/fastai/callback.py in call(self, cb_name, call_mets, **kwargs)
248 “Call through to all of the CallbakHandler functions.”
249 if call_mets:
–> 250 for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
251 for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
252

/usr/local/lib/python3.6/dist-packages/fastai/callback.py in _call_and_update(self, cb, cb_name, **kwargs)
239 def call_and_update(self, cb, cb_name, **kwargs)->None:
240 “Call cb_name on cb and update the inner state.”
–> 241 new = ifnone(getattr(cb, f’on
{cb_name}’)(**self.state_dict, **kwargs), dict())
242 for k,v in new.items():
243 if k not in self.state_dict:

/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_batch_end(self, last_output, last_target, **kwargs)
342 if not is_listy(last_target): last_target=[last_target]
343 self.count += first_el(last_target).size(0)
–> 344 val = self.func(last_output, *last_target)
345 if self.world:
346 val = val.clone()

/usr/local/lib/python3.6/dist-packages/fastai/metrics.py in accuracy_thresh(y_pred, y_true, thresh, sigmoid)
33 “Computes accuracy when y_pred and y_true are the same size.”
34 if sigmoid: y_pred = y_pred.sigmoid()
—> 35 return ((y_pred>thresh).byte()==y_true.byte()).float().mean()
36
37 def top_k_accuracy(input:Tensor, targs:Tensor, k:int=5)->Rank0Tensor:

/usr/local/lib/python3.6/dist-packages/torch/tensor.py in wrapped(*args, **kwargs)
26 def wrapped(*args, **kwargs):
27 try:
—> 28 return f(*args, **kwargs)
29 except TypeError:
30 return NotImplemented

RuntimeError: The size of tensor a (2) must match the size of tensor b (64) at non-singleton dimension 1