Hi there,
I tired super resolution based on Jeremyâs code, but my generated pictures have some side effects (maybe not checker board, but something like that). I canât figure out how can I eliminate this. It would be nice if you guys could help me
I used different training solution, but the architecture seems to be the same. Here is my code:
def conv_block(input, filters=64, size=(3, 3), strides=(1, 1), padding='same', act=True):
input = Conv2D(filters, kernel_size=size, strides=strides, padding=padding)(input)
input = BatchNormalization()(input)
return Activation('relu')(input) if act else input
def res_block(input, filters=64, size=(3, 3)):
x = conv_block(input, filters=filters, size=size)
x = conv_block(x, filters=filters, size=size, act=False)
return add([x, input])
def up_block(x, filters=64, size=(3, 3)):
x = UpSampling2D()(x)
return conv_block(x, filters=filters, size=size)
low_res_input = Input((None, None, 3)) # lr_shape + (3,)
x = conv_block(low_res_input, filters=64, size=(9, 9))
for i in range(4):
x = res_block(x, filters=64, size=(3, 3))
x = up_block(x, filters=64, size=(3, 3))
x = up_block(x, filters=64, size=(3, 3))
predicted_output = Conv2D(3, kernel_size=(9, 9), strides=(1, 1), activation='relu', padding='same')(x)
I use this custom objective, I think this solution is more understandable:
vgg_input = Input(hr_shape + (3,))
imagenet_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
vgg = VGG16(include_top=False, input_tensor=Lambda(lambda x: (x - imagenet_mean)[:, :, :, ::-1])(vgg_input))
for l in vgg.layers:
l.trainable = False
vgg_featurizer = Model(vgg_input, vgg.get_layer(f'block2_conv1').output)
def custom_objective(y_true, y_pred):
diff = vgg_featurizer(y_true) - vgg_featurizer(y_pred)
dims = list(range(1, K.ndim(diff)))
return K.expand_dims(K.sqrt(K.mean(diff ** 2, dims)), 0)
Here is some generated examples:
https://imgur.com/PNUXb3d
https://imgur.com/0PSILDl
Please help me, I tried a lot of stuff unsuccessfully, Iâm a bit angry