MemoryErrror when calling vgg = Vgg16()

Does Vgg16() work with t2.micro? When I run the following code in lesson1.ipynb:

vgg = Vgg16()
# Grab a few images at a time for training and validation.
# NB: They must be in subdirectories named based on their category
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)

I get this:

---------------------------------------------------------------------------
MemoryError                               Traceback (most recent call last)
<ipython-input-8-2b6861506a11> in <module>()
----> 1 vgg = Vgg16()
      2 # Grab a few images at a time for training and validation.
      3 # NB: They must be in subdirectories named based on their category
      4 batches = vgg.get_batches(path+'train', batch_size=batch_size)
      5 val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)

/home/ubuntu/nbs/courses/deeplearning1/nbs/vgg16.pyc in __init__(self)
     45     def __init__(self):
     46         self.FILE_PATH = 'http://files.fast.ai/models/'
---> 47         self.create()
     48         self.get_classes()
     49 

/home/ubuntu/nbs/courses/deeplearning1/nbs/vgg16.pyc in create(self)
    132 
    133         model.add(Flatten())
--> 134         self.FCBlock()
    135         self.FCBlock()
    136         model.add(Dense(1000, activation='softmax'))

/home/ubuntu/nbs/courses/deeplearning1/nbs/vgg16.pyc in FCBlock(self)
    111         """
    112         model = self.model
--> 113         model.add(Dense(4096, activation='relu'))
    114         model.add(Dropout(0.5))
    115 

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/models.pyc in add(self, layer)
    306                  output_shapes=[self.outputs[0]._keras_shape])
    307         else:
--> 308             output_tensor = layer(self.outputs[0])
    309             if type(output_tensor) is list:
    310                 raise Exception('All layers in a Sequential model '

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/engine/topology.pyc in __call__(self, x, mask)
    485                                     '`layer.build(batch_input_shape)`')
    486             if len(input_shapes) == 1:
--> 487                 self.build(input_shapes[0])
    488             else:
    489                 self.build(input_shapes)

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/layers/core.pyc in build(self, input_shape)
    693 
    694         self.W = self.init((input_dim, self.output_dim),
--> 695                            name='{}_W'.format(self.name))
    696         if self.bias:
    697             self.b = K.zeros((self.output_dim,),

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/initializations.pyc in glorot_uniform(shape, name, dim_ordering)
     57     fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
     58     s = np.sqrt(6. / (fan_in + fan_out))
---> 59     return uniform(shape, s, name=name)
     60 
     61 

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/initializations.pyc in uniform(shape, scale, name)
     30 
     31 def uniform(shape, scale=0.05, name=None):
---> 32     return K.random_uniform_variable(shape, -scale, scale, name=name)
     33 
     34 

/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in random_uniform_variable(shape, low, high, dtype, name)
    138 
    139 def random_uniform_variable(shape, low, high, dtype=_FLOATX, name=None):
--> 140     return variable(np.random.uniform(low=low, high=high, size=shape),
    141                     dtype=dtype, name=name)
    142 

mtrand.pyx in mtrand.RandomState.uniform (numpy/random/mtrand/mtrand.c:17350)()

mtrand.pyx in mtrand.cont2_array_sc (numpy/random/mtrand/mtrand.c:3092)()

MemoryError:
1 Like

Hi Matthew,

I had faced the same problem. I believe you’re running a t2.micro instance? Basically python is complaining that there are insufficient ram. Just stop the instance, change it to t2.large (although personally I found t2.medium working as well).

Hope this helps.

1 Like