Home > OS >  second derivative is None in tensorflow automatic differentiation
second derivative is None in tensorflow automatic differentiation

Time:11-04

In the code below, I'm computing the second derivative (y_xx_lin) of a linear network modelLinear which has linear activation functions throughout, and the second derivative (y_xx_tanh) of a tanh network modelTanh which has tanh activations for all its layers except the last layer which is linear.

My question is: y_xx_lin is None but y_xx_tanh shows some values. Following this Stackoverflow question I'm guessing that y_xx_lin is None because the second derivative of a linear function is zero for all input values and thus in some sense not linked to the input. Is this the case?

Even if this is so, I would like TensorFlow to calculate the derivative and return it, instead of returning None. Is this possible?

# Second derivative of a linear network appears to be None

import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanSquaredError
import tensorflow.keras.backend as K
import numpy as np
import matplotlib.pyplot as plt

def build_network(activation='linear'):
    input_layer  = Input(1)
    inner_layer  = Dense(6, activation=activation)(input_layer)
    inner_layer1 = Dense(6, activation=activation)(inner_layer)
    inner_layer2 = Dense(6, activation=activation)(inner_layer1)
    output_layer = Dense(1, activation='linear')(inner_layer2)
    model = Model(input_layer, output_layer)
    return model

def get_first_second_derivative(X_train,y_train,model):
    with tf.GradientTape(persistent=True) as tape_second:
        tape_second.watch(X_train)
        
        with tf.GradientTape(persistent=True) as tape_first:
            # Watch the variables with who/whom we want to compute gradients
            tape_first.watch(X_train)
    
            # get the output of the NN
            output = model(X_train)
    
        y_x  = tape_first.gradient(output,X_train)

    y_xx = tape_second.gradient(y_x,X_train)
    
    return y_x,y_xx

modelLinear = build_network(activation='linear')
modelLinear.compile(optimizer=Adam(learning_rate=0.1),loss='mse')

modelTanh = build_network(activation='tanh')
modelTanh.compile(optimizer=Adam(learning_rate=0.1),loss='mse')

X_train = np.linspace(-1,1,10).reshape((-1,1))
y_train = X_train*X_train

X_train = tf.convert_to_tensor(X_train,dtype=tf.float64)
y_train = tf.convert_to_tensor(y_train,dtype=tf.float64)

y_x_lin,y_xx_lin   = get_first_second_derivative(X_train,y_train,modelLinear)
y_x_tanh,y_xx_tanh = get_first_second_derivative(X_train,y_train,modelTanh)

print('Type of y_xx_lin = ',type(y_xx_lin))

CodePudding user response:

It works if you set lambda x: x ** 1 instead of 'linear' like

...

id_func = lambda x: x ** 1

def build_network(activation=id_func):
    input_layer  = Input(1)
    inner_layer  = Dense(6, activation=activation)(input_layer)
    inner_layer1 = Dense(6, activation=activation)(inner_layer)
    inner_layer2 = Dense(6, activation=activation)(inner_layer1)
    output_layer = Dense(1, activation=id_func)(inner_layer2)
    model = Model(input_layer, output_layer)
    return model

...

modelLinear = build_network(activation=id_func)

...

The reason why it works and why you code fails is in the answer you already cited. With such a weird implementation of the identity function TensorFlow backpropagation works correctly.

Tested with TensorfFlow version 2.9.2.

CodePudding user response:

If you want to calculate deviation from input as a series ( I see your question and intention but you can do it with the model layer as codes example, you may adjust a bit for convenience)

Sample: << How fast it growths is how fast they turn out >>

import tensorflow as tf

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyLSTMLayer( tf.keras.layers.LSTM ):
    def __init__(self, units, return_sequences, return_state):
        super(MyLSTMLayer, self).__init__( units, return_sequences=True, return_state=False )
        self.num_units = units

    def build(self, input_shape):
        self.kernel = self.add_weight("kernel",
        shape=[int(input_shape[-1]),
        self.num_units])

    def call(self, inputs):
        derivative_number = tf.constant([ 2.0 ])
        
        ZeroPadding1D_front = tf.keras.layers.ZeroPadding1D(padding=( 1, 0 ))
        ZeroPadding1D_back = tf.keras.layers.ZeroPadding1D(padding=( 0, 1 ))

        reshape = tf.reshape( inputs, shape=(1, 1024, 1), name="Reshape" )
        subtract = tf.math.subtract( ZeroPadding1D_front( reshape ), ZeroPadding1D_back( reshape ), name="Subtract" )
        devide = tf.math.divide_no_nan( subtract, derivative_number, name="Devide" )

        # X = [ 1, 2, 3, 4, 5 ]
        # Y = 2
        # X/Y = [ ( 2 - 1 / 2 ), ( 3 - 2 / 2 ), ( 4 - 3 / 2 ), ( 5 - 4 / 2 ) ]
        # X/Y = [ 0.5, 0.5, 0.5, 0.5 ]

        return devide
        
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
start = 3
limit = 3075
delta = 3
sample = tf.range( start, limit, delta )
sample = tf.cast( sample, dtype=tf.float32 )
sample = tf.constant( sample, shape=( 1, 1, 1024 ), dtype=tf.float32 )
layer = MyLSTMLayer( 1024, True, False )

model = tf.keras.Sequential([
    tf.keras.Input(shape=(1, 1024)),
    layer,
])

model.summary()

print( "Sample: " )
print( sample )
print( "Predict: " )
print( model.predict(sample) )

Output:

Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #
=================================================================
 my_lstm_layer (MyLSTMLayer)  (1, 1025, 1)             1048576

=================================================================
Total params: 1,048,576
Trainable params: 1,048,576
Non-trainable params: 0
_________________________________________________________________
Sample:
tf.Tensor([[[3.000e 00 6.000e 00 9.000e 00 ... 3.066e 03 3.069e 03 3.072e 03]]], shape=(1, 1, 1024), dtype=float32)
Predict:
1/1 [==============================] - 0s 69ms/step
[[[-1.500e 00]
  [-1.500e 00]
  [-1.500e 00]
  ...
  [-1.500e 00]
  [-1.500e 00]
  [ 1.536e 03]]]
  • Related