I tried to train inception_v3 model from torchvision in Google CoLab and I got an error:
RuntimeError: Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [1, 1000]
Could someone, please, give me some directions to solve this error?
data = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(seed=42),
item_tfms=Resize(299),
get_y=parent_label)
dls = data.dataloaders(path, batch_size=32)
import torchvision.models as models
model = models.inception_v3
def _inception_v3_split(m):
return L(m[0][:23], m[0][23:], m[1:]).map(params)
_inception_v3_meta = {'cut': -2, 'split': _inception_v3_split, 'stats': imagenet_stats}
model_meta[models.inception_v3] = {**_inception_v3_meta}
learn = vision_learner(dls, model, pretrained=True, metrics=accuracy)