Hidden layer of Pretrained model gives different output everytime

I figured out how to get the output of an intermediate layer, and when I started playing with that output I noticed that the hidden layer gives a different output every time I initial one:

arch = resnext101_64
PATH = ‘somepath’
bs=50
sz=500
data = get_data(sz, bs, val_idxs, label_csv)
learn = ConvLearner.pretrained(arch, data, precompute=False, ps=0)
learn.save(‘testn1.model’)
learn.load(‘testn1.model’)
model = learn.model
model = model.eval()

get nn.module minus ‘n’ last layers

class nnBottom(nn.Module):
    def __init__(self, original_model):
        super(nnBottom, self).__init__()
        self.features = nn.Sequential(*list(original_model.children())[:-2])
    def forward(self, x):
        x = self.features(x)
        return x
nnbot = nnBottom(model)
nnbot = nnbot.eval()
from torchvision import transforms
imsize = 500
loader = transforms.Compose([transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

def image_loader(image_name):
    """load image, returns cuda tensor"""
    image = Image.open(image_name)
    image = loader(image).float()
    image = Variable(image, requires_grad=True)
image = image.unsqueeze(0)  #this is for VGG, may not be needed for ResNet
return image.cuda()  #assumes that you're using GPU
image = image_loader('data/someimage.png')
latent = nnbot(image).data.cpu().numpy()
latent.tolist()[0]

[0.15148745477199554,
0.0,
0.0,
0.0,
0.15419943630695343, … ]

Do it all again, same exact code,

latent.tolist()[0]

[0.5094183087348938,
0.0,
0.753930926322937,
0.22921140491962433,
0.24140295386314392, … ]

Is this because of the dropout layers??? Can I make it deterministic somehow?