Hi,
I am trying to use my ubuntu 16.04. I set it up by https://github.com/fastai/courses/blob/master/setup/install-gpu.sh. When I ran lesson 1 notebook, I will got memory error. Not sure what is the reason for it.
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-8-2b6861506a11> in <module>()
----> 1 vgg = Vgg16()
2 # Grab a few images at a time for training and validation.
3 # NB: They must be in subdirectories named based on their category
4 batches = vgg.get_batches(path+'train', batch_size=batch_size)
5 val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
/home/dongguo/Documents/courses/deeplearning1/nbs/vgg16.pyc in __init__(self)
30 def __init__(self):
31 self.FILE_PATH = 'http://files.fast.ai/models/'
---> 32 self.create()
33 self.get_classes()
34
/home/dongguo/Documents/courses/deeplearning1/nbs/vgg16.pyc in create(self)
74
75 model.add(Flatten())
---> 76 self.FCBlock()
77 self.FCBlock()
78 model.add(Dense(1000, activation='softmax'))
/home/dongguo/Documents/courses/deeplearning1/nbs/vgg16.pyc in FCBlock(self)
59 def FCBlock(self):
60 model = self.model
---> 61 model.add(Dense(4096, activation='relu'))
62 model.add(Dropout(0.5))
63
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/models.pyc in add(self, layer)
330 output_shapes=[self.outputs[0]._keras_shape])
331 else:
--> 332 output_tensor = layer(self.outputs[0])
333 if isinstance(output_tensor, list):
334 raise TypeError('All layers in a Sequential model '
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/engine/topology.pyc in __call__(self, x, mask)
544 '`layer.build(batch_input_shape)`')
545 if len(input_shapes) == 1:
--> 546 self.build(input_shapes[0])
547 else:
548 self.build(input_shapes)
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/layers/core.pyc in build(self, input_shape)
796 name='{}_W'.format(self.name),
797 regularizer=self.W_regularizer,
--> 798 constraint=self.W_constraint)
799 if self.bias:
800 self.b = self.add_weight((self.output_dim,),
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/engine/topology.pyc in add_weight(self, shape, initializer, name, trainable, regularizer, constraint)
416 """
417 initializer = initializations.get(initializer)
--> 418 weight = initializer(shape, name=name)
419 if regularizer is not None:
420 self.add_loss(regularizer(weight))
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/initializations.pyc in glorot_uniform(shape, name, dim_ordering)
64 fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
65 s = np.sqrt(6. / (fan_in + fan_out))
---> 66 return uniform(shape, s, name=name)
67
68
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/initializations.pyc in uniform(shape, scale, name, dim_ordering)
31
32 def uniform(shape, scale=0.05, name=None, dim_ordering='th'):
---> 33 return K.random_uniform_variable(shape, -scale, scale, name=name)
34
35
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in random_uniform_variable(shape, low, high, dtype, name)
187 def random_uniform_variable(shape, low, high, dtype=None, name=None):
188 return variable(np.random.uniform(low=low, high=high, size=shape),
--> 189 dtype=dtype, name=name)
190
191
/home/dongguo/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in variable(value, dtype, name)
85 else:
86 value = np.asarray(value, dtype=dtype)
---> 87 variable = theano.shared(value=value, name=name, strict=False)
88 variable._keras_shape = value.shape
89 variable._uses_learning_phase = False
/home/dongguo/anaconda2/lib/python2.7/site-packages/theano/compile/sharedvalue.pyc in shared(value, name, strict, allow_downcast, **kwargs)
266 try:
267 var = ctor(value, name=name, strict=strict,
--> 268 allow_downcast=allow_downcast, **kwargs)
269 utils.add_tag_trace(var)
270 return var
/home/dongguo/anaconda2/lib/python2.7/site-packages/theano/sandbox/cuda/var.pyc in float32_shared_constructor(value, name, strict, allow_downcast, borrow, broadcastable, target)
186 # type.broadcastable is guaranteed to be a tuple, which this next
187 # function requires
--> 188 deviceval = type_support_filter(value, type.broadcastable, False, None)
189
190 try:
MemoryError: ('Error allocating 411041792 bytes of device memory (out of memory).', "you might consider using 'theano.shared(..., borrow=True)'")