# HELP understanding fastai callbacks

can someone tell me how in the below code the loss.backward() isnt called on the validation data? I dont see the loss.backward() in any conditional statements so how come it doesnt run when validating.

def one_batch(xb, yb, cb):
if not cb.begin_batch(xb,yb): return
loss = cb.learn.loss_func(cb.learn.model(xb), yb)
if not cb.after_loss(loss): return
loss.backward()
if cb.after_backward(): cb.learn.opt.step()

def all_batches(dl, cb):
for xb,yb in dl:
one_batch(xb, yb, cb)
if cb.do_stop(): return

def fit(epochs, learn, cb):
if not cb.begin_fit(learn): return
for epoch in range(epochs):
if not cb.begin_epoch(epoch): continue
all_batches(learn.data.train_dl, cb)

``````    if cb.begin_validate():
if cb.do_stop() or not cb.after_epoch(): break
cb.after_fit()``````

Because the validation data is called with βwith torch.no_grad():β so the no_grad() doesnβt allow any gradients to be computed even if they are called

1 Like

@bluesky314 but take a look at this simpler version of fit

def one_batch(xb, yb, learn):
loss = learn.loss_func(learn.model(xb), yb)

``````loss.backward()
learn.opt.step()
``````

def all_batch(dl, learn):
for xb,yb in dl:
one_batch(xb,yb, learn)

def fit(epoch, learn):
learn.model.train = True
all_batch(learn.data.train_dl, learn)

``````learn.model.train = False
``````

when i tried to fit this i got the following error

RuntimeError Traceback (most recent call last)
in
----> 1 fit(1, learn)

in fit(epoch, learn)
15
16 learn.model.train = False

in all_batch(dl, learn)
8 def all_batch(dl, learn):
9 for xb,yb in dl:
β> 10 one_batch(xb,yb, learn)
11
12 def fit(epoch, learn):

in one_batch(xb, yb, learn)
2 loss = learn.loss_func(learn.model(xb), yb)
3
----> 4 loss.backward()
5 learn.opt.step()

D:\Program_files\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
105 products. Defaults to `False`.
106 ββ"