I was playing with some Keras samples, defining models through subclassing, but I can't get it working.
from keras import layers, Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
import numpy as np
import datetime
import os
class Encoder(Model):
def __init__(self, name: str = "encoder"):
super(Encoder, self).__init__(name=name)
self._conv16 = layers.Conv2D(16, (3, 3), activation='relu', padding='same', name="conv16")
self._conv8 = layers.Conv2D(8, (3, 3), activation='relu', padding='same', name="conv8")
self._max_pool = layers.MaxPooling2D((2, 2), padding='same', name="max_pool")
def call(self, inputs: np.ndarray):
x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(inputs)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2, 2), padding='same')(x)
# x = self._conv16(inputs)
# x = self._max_pool(x)
# x = self._conv8(x)
# x = self._max_pool(x)
# x = self._conv8(x)
# encoded = self._max_pool(x)
return encoded
class Decoder(Model):
def __init__(self, name: str = "decoder"):
super(Decoder, self).__init__(name=name)
self._conv16 = layers.Conv2D(16, (3, 3), activation='relu', padding='same', name="conv16")
self._conv8 = layers.Conv2D(8, (3, 3), activation='relu', padding='same', name="conv8")
self._conv1 = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same', name="conv1")
self._up_sampling = layers.UpSampling2D((2, 2), name="up_samp")
def call(self, inputs: np.ndarray):
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(inputs)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(16, (3, 3), activation='relu')(x)
x = layers.UpSampling2D((2, 2))(x)
decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
# x = self._conv8(inputs)
# x = self._up_sampling(x)
# x = self._conv8(x)
# x = self._up_sampling(x)
# x = self._conv16(x)
# x = self._up_sampling(x)
# decoded = self._conv1(x)
return decoded
class Autoencoder(Model):
def __init__(self, name: str = "autoencoder"):
super(Autoencoder, self).__init__(name=name)
self.encoder = Encoder()
self.decoder = Decoder()
def call(self, inputs: np.ndarray):
encoded = self.encoder(inputs)
reconstructed = self.decoder(encoded)
return reconstructed
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
print(x_train.shape)
print(x_test.shape)
autoencoder = Autoencoder()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test)
First of all, I can't get the commented code working. Whenever I try to use it, I get errors such "ValueError: Input 0 of layer "conv8" is incompatible with the layer: expected axis -1of input shape to have value 1, but received input with shape (60000, 56, 56, 8)". I can't spot a difference in the model definition.
Secondly, running the above code gives me:
ValueError: tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See