How can i get correct accuracy on Test data with my fastai model

I have this model.I am trying to get test accuracy on my model. I have 2 different classes.Although I get 0.96 accuracy on train and valid data, I couldn’t get true rate on Test data. Output of test data is showed at below. When I try to get results on second class, Metrics show 0.0 values

-- coding: utf-8 --

“”"
Spyder Editor

This is a temporary script file.
“”"

from fastai.vision import *
import torch as t

path = Path(r’C:\Users\Admin\Desktop\saglika1’)

np.random.seed(41)
data = ImageDataBunch.from_folder(path, train=“Training”, valid_pct=0.1, test=“Test”,
ds_tfms=get_transforms(), size=(256,256), bs=32, num_workers=4).normalize()

data.classes, data.c, len(data.train_ds), len(data.valid_ds)

print(“Number of examples in training:”, len(data.train_ds))
print(“Number of examples in validation:”, len(data.valid_ds))

xb,yb = data.one_batch()
xb.shape,yb.shape

data.show_batch(rows=3, figsize=(10,10))

def conv_block(ni, nf, size=3, stride=1):
for_pad = lambda s: s if s > 2 else 3
return nn.Sequential(
nn.Conv2d(ni, nf, kernel_size=size, stride=stride,
padding=(for_pad(size) - 1)//2, bias=False),
nn.BatchNorm2d(nf),
nn.LeakyReLU(negative_slope=0.1, inplace=True)
)

def triple_conv(ni, nf):
return nn.Sequential(
conv_block(ni, nf),
conv_block(nf, ni, size=1),
conv_block(ni, nf)
)

def maxpooling():
return nn.MaxPool2d(2, stride=2)

model = nn.Sequential(
conv_block(3, 8),
maxpooling(),
conv_block(8, 16),
maxpooling(),
triple_conv(16, 32),
maxpooling(),
triple_conv(32, 64),
maxpooling(),
triple_conv(64, 128),
maxpooling(),
triple_conv(128, 256),
conv_block(256, 128, size=1),
conv_block(128, 256),
conv_layer(256, 3),
Flatten(),
nn.Linear(507, 2)
)

if t.cuda.is_available():
model.to(“cuda”)

learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)

learn.fit_one_cycle(50, max_lr=3e-3)

learn.recorder.plot_losses()

probs,targets = learn.get_preds(ds_type=DatasetType.Test)

accuracy(probs,targets)

data.classes, data.c, len(data.train_ds), len(data.test_ds)

probs = np.argmax(probs, axis=1)
correct = 0
for idx, pred in enumerate(probs):
if pred == targets[idx]:
correct += 1
accuracy = correct / len(probs)
print(len(probs), correct, accuracy)

from sklearn.metrics import confusion_matrix
np.set_printoptions(threshold=np.inf) # shows whole confusion matrix
cm1 = confusion_matrix(targets, probs)
print(cm1)

from sklearn.metrics import classification_report
y_true1 = targets
y_pred1 = probs
target_names = [‘INMEVAR’, ‘INMEYOK’]
print(classification_report(y_true1, y_pred1, target_names=target_names))

interp = ClassificationInterpretation.from_learner(learn)

cm_fig = interp.plot_confusion_matrix(return_fig=True)
ax = cm_fig.gca()
ax.set_ylim(interp.data.c - .5, - .5);

RESULTS:

 precision    recall  f1-score   support

 INMEVAR       1.00      0.52      0.69       325
 INMEYOK       0.00      0.00      0.00         0

accuracy                                   0.52       325

macro avg 0.50 0.26 0.34 325
weighted avg 1.00 0.52 0.69 325

C:\Users\Admin\Downloads\Winpython64-3.7.7.1\WPy64-3771\python-3.7.7.amd64\lib\site-packages\sklearn\metrics_classification.py:1221: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in labels with no true samples. Use zero_division parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))