My use case is the following: I am creating a dimensionality reducing AutoEncoder with Tensorflow. I have implemented three custom layers and with that a model
class ConvLayer(Layer):
def __init__(self, filter, kernel, act, **kwargs):
super().__init__()
self.filter = filter
self.kernel = kernel
self.act = act
super(ConvLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.conv = Conv1D(self.filter, self.kernel, padding='same')
self.norm = BatchNormalization()
self.acti = Activation(self.act)
def get_config(self):
config = super(ConvLayer, self).get_config()
config.update({
"filter": self.filter,
"kernel": self.kernel,
"act" : self.act
})
return config
def call(self, inputs):
x = self.conv(inputs)
x = self.norm(x)
return self.acti(x)
class _Conv1DTranspose(Layer):
def __init__(self, filter, kernel, **kwargs):
super().__init__()
self.filter = filter
self.kernel = kernel
super(_Conv1DTranspose, self).__init__(**kwargs)
def build(self, input_shape):
self.first = Lambda(lambda x: K.expand_dims(x, axis=2))
self.conv = Conv2DTranspose(self.filter, (self.kernel, 1), padding='same')
self.second = Lambda(lambda x: K.squeeze(x, axis=2))
def get_config(self):
config = super(_Conv1DTranspose, self).get_config()
config.update({
"filter": self.filter,
"kernel": self.kernel
})
return config
def call(self, inputs):
x = self.first(inputs)
x = self.conv(x)
return self.second(x)
class DeconvLayer(Layer):
def __init__(self, filter, kernel, act, **kwargs):
super().__init__()
self.filter = filter
self.kernel = kernel
self.act = act
super(DeconvLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.conv = _Conv1DTranspose(self.filter, self.kernel)
self.norm = BatchNormalization()
self.acti = Activation(self.act)
def get_config(self):
config = super(DeconvLayer, self).get_config()
config.update({
"filter": self.filter,
"kernel": self.kernel,
"act" : self.act
})
return config
def call(self, inputs):
x = self.conv(inputs)
x = self.norm(x)
return self.acti(x)
def create_model(latent_dim):
encoder = Sequential([
ConvLayer(128, 2, 'selu'),
ConvLayer(128, 2, 'selu'),
ConvLayer(128, 2, 'selu'),
ConvLayer(128, 2, 'selu'),
MaxPooling1D(5),
ConvLayer(64, 2, 'selu'),
ConvLayer(64, 2, 'selu'),
ConvLayer(64, 2, 'selu'),
ConvLayer(64, 2, 'selu'),
MaxPooling1D(2),
ConvLayer(32, 2, 'selu'),
ConvLayer(32, 2, 'selu'),
ConvLayer(32, 2, 'selu'),
ConvLayer(32, 2, 'selu'),
MaxPooling1D(2),
Flatten(),
Dense(latent_dim, activation='selu') ], name='Encoder')
decoder = Sequential([
Dense((latent_dim * 32), activation='selu'),
Reshape((50, 32)),
UpSampling1D(2),
DeconvLayer(32, 2, 'selu'),
DeconvLayer(32, 2, 'selu'),
DeconvLayer(32, 2, 'selu'),
DeconvLayer(32, 2, 'selu'),
UpSampling1D(2),
DeconvLayer(64, 2, 'selu'),
DeconvLayer(64, 2, 'selu'),
DeconvLayer(64, 2, 'selu'),
DeconvLayer(64, 2, 'selu'),
UpSampling1D(5),
DeconvLayer(128, 2, 'selu'),
DeconvLayer(128, 2, 'selu'),
DeconvLayer(128, 2, 'selu'),
DeconvLayer(128, 2, 'selu'),
DeconvLayer(1, 2, 'sigmoid') ], name='Decoder')
return encoder, decoder
I am training and saving the encoder part in a separate file, autoencoder.py, with encoder.save("encoder_dim_50.h5")
Now in my main.py I want to load in my model and use it to reduce some dimensions.
Here starts my issue, I am importing the custom layers from autoencoder import _Conv1DTranspose, ConvLayer, DeconvLayer
and while importing, it start to run the whole training sequence again?!
The code does not even reach the loading of the model
self.Encoder = tf.keras.models.load_model(model_path, custom_objects={'_Conv1DTranspose': _Conv1DTranspose,
'ConvLayer' : ConvLayer,
'DeconvLayer' : DeconvLayer})
Am I missing some glaring issue here, or should I implement my custom layers in the main.py as well?
Thank you for your time
CodePudding user response:
To stop your code from automatically running, convert your code in the following format, it always prevents the code from auto running.
def __main__():
#do whatever you want in this function as this will run if you run this file directly
print('hello world')
if __name__ == "__main__"
__main__()
This way, your code should never run on its own and you can use your function the way you want.
CodePudding user response:
Do you have Tensorflow 1 or 2? I think it has to do with running in eager_mode
. By default, it will build a graph and therefore run it twice upon startup.