Segmentation fault with pytorch .backward() for Progressive Growing of GAN

I am trying to implement Progressive Growing of GAN using pytorch. The toy dataset I am using is MNIST. instead of celebA as used in the original paper.
While training when i try to calculate gradient using .backward() method on the discriminator loss.It stops and leads to segmentation fault.Click here for github repo of code
System configuration:

  • 8 gb RAM
  • running on cpu
  • python 3.6
    The network is quiet small, I doubt it is because of memory.

    def train(self,num_of_epochs=1):
        smoothing_on=False
        for epoch in range(num_of_epochs):
            for batch_no,(G_data,D_data) in enumerate(zip(self.G.data_loader,self.D.data_loader)):
                
                #load data
                G_data=Variable(G_data)        
                D_data=Variable(D_data[0])
                # calculate _loss
                if smoothing_on:
                    outputs=self.D(D_data,with_smoothing=True)
                    real_loss=torch.mean((outputs-1)**2)
                    outputs=self.G(G_data,with_smoothing=True)
                    fake_loss=torch.mean((self.D(outputs)-1)**2)                   
                else:
                    outputs=self.D(D_data)
                    real_loss=torch.mean((outputs-1)**2)
                    outputs=self.G(G_data)
                    fake_loss=torch.mean((self.D(outputs)-1)**2)
                # Backprop + optimize
                d_loss = real_loss + fake_loss
                self.reset_grad()
                d_loss.backward()
                
                self.D.optimizer.step()
                # Train G so that D recognizes G(z) as real.
                
                g_loss = fake_loss
                self.reset_grad()
                g_loss.backward()
                self.G.optimizer.step()

                #update weights

            if smoothing_on:
                self.G.smoothing_factor+=0.2
                self.D.smoothing_factor+=0.2
            if epoch%10==0 and epoch!=0:
                self.G.add_layer()
                self.D.add_layer()
                smoothing_on=False
            elif epoch%5:
                self.G.add_smoothing_branch()
                self.D.add_smoothing_branch()
                smoothing_on=True