Problem with setting up Tensorflow with a base model

I’m trying to make image recognizer using Tensorflow. I’m trying to import pre-trained model using Tensorflow Hub but there are some problems with the data format.

class ImportData:
def init(self,width,height):
print(“Starting…”)
images, labels = [], []
PATH = “…/fastai/courses/dl1/data/dogbreed/”
categories = np.array([])
file_len = 0
with open(f’{PATH}labels.csv’,‘r’) as file:
seen = set() # set for fast O(1) amortized lookup
next(file) # Skips the first row which are column names.
for line in file:
file_len += 1
category = line.split(",")[1][:-1]
if category in seen: continue # skip duplicate

            seen.add(category)
            categories = np.append(categories, category)
            
            if file_len > 1000:
                break
                
    with open(f'{PATH}labels.csv','r') as file:
        next(file) # Skips the first row which are column names.

        for i, line in enumerate(file):
            sys.stdout.write("Progress: %d%%   \r" % (round(i/file_len*100,1)) )
            sys.stdout.flush()
            label = np.array([0]*len(categories))
            name = line.split(",")[0]
            category = line.split(",")[1][:-1]
            label[np.where(categories == category)] = 1

            img_path = f'{PATH}train/{name}.jpg'
            try:
                im = Image.open(img_path)
                im = im.resize([width,height], Image.ANTIALIAS)
            except:
                print ("Skip a corrupted file: ", img_path)
                continue
            
            pixels = np.array(im.convert('L').getdata())
            images.append(pixels/255.0)
            labels.append(label)
            if i > 1000:
                break
            
    train_images, test_images, train_labels, test_labels = \
        train_test_split(images, labels, test_size=0.2, random_state=0)
    
    class train:
        def __init__(self):
            self.images = []
            self.labels = []
            self.batch_counter = 0
            
        def next_batch(self, num):
            if self.batch_counter + num >= len(self.labels):
                batch_images = self.images[self.batch_counter:]
                batch_labels = self.labels[self.batch_counter:]
                left = num - len(batch_labels)
                batch_images.extend(self.images[:left])
                batch_labels.extend(self.labels[:left])
                self.batch_counter = left
            else:
                batch_images = self.images[self.batch_counter:self.batch_counter+num]
                batch_labels = self.labels[self.batch_counter:self.batch_counter+num]                  
                self.batch_counter += num
                
            return (batch_images, batch_labels)
                
    class test:
        def __init__(self):
            self.images = []
            self.labels = []
            
    self.train = train()
    self.test = test()
            
    self.train.images = train_images
    self.train.labels = train_labels
    self.test.images = test_images
    self.test.labels = test_labels

input_data = ImportData(img_shape[0],img_shape[1])

module = hub.Module(“https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/1”)
features = module(input_data.train.images)
logits = tf.layers.dense(features, num_classes)
probabilities = tf.nn.softmax(logits)

And error message is following:
TypeError: Can’t convert ‘images’: Expected float32, got array([0.47843137, 0.46666667, 0.45882353, …, 0.45098039, 0.37254902,
0.30980392]) of type ‘ndarray’ instead.

I have been trying to read some tutorials but all seems to handle data differently.

So is there anyone with knowledge of Tensorflow hub nad might help m

So I’m looking for easy tensorflow hub tutorials. All seem to use strange ways and I can implement my own data. Is there someone with knowledge of Tensorflow Hub who might help me?