I have an operation like the following:
def custom_operation(input):
kernel = tf.constant([3, 4, 5], tf.float32)
frames = tf.signal.frame(input, 3, 1)
return tf.reduce_sum(tf.abs(frames - tf.reshape(kernel, (1, 3))), axis=-1)
custom_operation(tf.constant([1, 2, 3, 4, 5, 6, 7], tf.float32))
# <tf.Tensor: shape=(5,), dtype=float32, numpy=array([6., 3., 0., 3., 6.], dtype=float32)>
I'd like to use this in a Keras custom layer, where input
is the input of the layer and kernel
is a tensor with trainable values, instead of the hardcoded [3, 4, 5]
.
It doesn't seem hard enough to tweak the Conv1D
layer from Keras to call custom_operation
instead of tf.nn.conv1d
but I don't know how to make kernel
trainable.
CodePudding user response:
How about this:
import tensorflow as tf
from tensorflow.keras.layers import Layer
class CustomLayer(Layer):
"""``CustomLayer``."""
def __init__(self, name="custom"):
super().__init__(name=name)
def build(self, input_shape):
self.w = self.add_weight(
shape=(1, 3),
initializer="random_normal",
trainable=True)
def call(self, x):
frames = tf.signal.frame(x, 3, 1)
return tf.math.reduce_sum(tf.math.abs(frames - self.w), axis=-1)
Test the layer.
x = tf.constant([1, 2, 3, 4, 5, 6, 7], tf.float32)
CustomLayer()(x)
# <tf.Tensor: shape=(5,),
# dtype=float32,
# numpy= array([ 6.0877113, 9.087711 , 12.087711 , 15.087711 , 18.087711 ],
# dtype=float32)>