Home > Mobile >  Custom Loss Function returning - InvalidArgumentError: The second input must be a scalar, but it has
Custom Loss Function returning - InvalidArgumentError: The second input must be a scalar, but it has

Time:11-03

I'm trying to use a modified version of this custom loss and I'm getting the error below

InvalidArgumentError:  The second input must be a scalar, but it has shape [64] [[{{node gradient_tape/custom_loss/cond_1/StatelessIf/gradient_tape/custom_loss/weighted_loss/Mul/_30}}]] [Op:__inference_train_function_147002]
    
    Function call stack:
    train_function

This is the code

import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import Loss
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization, Flatten
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint

def custom_loss(y_true, y_pred):

  mse = tf.keras.losses.MeanSquaredError()
  penalty = 10

  # penalize the loss heavily if the actual and the prediction are on different sides of zero
  loss = tf.cond( tf.logical_or( 
                    (tf.logical_and(tf.greater(y_true, 0.0), tf.less(y_pred, 0.0))), 
                    (tf.logical_and(tf.less(y_true, 0.0), tf.greater(y_pred, 0.0))) 
                    ),
                   lambda: mse(y_true, y_pred) * penalty,
                   lambda: mse(y_true, y_pred) * penalty / 4)
  
  print("starting second condition")
  # add slightly more penalty if prediction overshoots actual in any direction
  loss = tf.cond( tf.logical_or( 
                    (tf.logical_and(tf.greater(y_true, 0.0), tf.greater(y_pred, y_true))), 
                    (tf.logical_and(tf.less(y_true, 0.0), tf.less(y_pred, y_true))) 
                    ),
                   lambda: loss * penalty / 5,
                   lambda: loss * penalty / 10)
  return loss

EPOCHS = 25
BATCH_SIZE = 64
MODEL_NAME = f"MODEL 01-{str(int(time.time())}"

model = Sequential()
model.add(LSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Flatten())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(1))

opt = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=1e-6)

metric= tf.keras.metrics.MeanSquaredError()
model.compile(loss=custom_loss, optimizer=opt, metrics=[metric]) 
val_metric = 'val_' metric.name

tensorboard = TensorBoard(log_dir=f'logs/{MODEL_NAME}')
filepath = base_path "cryptodata/models/RNN_Final-{epoch:02d}-{val_mean_squared_error:.3f}-" str(int(time.time())) ".hd5"
checkpoint = ModelCheckpoint(filepath=filepath, monitor=val_metric, verbose=0, mode='max',metric=metric)

train_x = np.random.randn(1588, 60, 34)  
train_y = np.random.rand(1588,)
val_x = np.random.randn(85, 60, 34)
val_y = np.random.randn(85,)

history = model.fit(train_x, train_y, 
                    batch_size=BATCH_SIZE,
                    epochs=100, 
                    validation_data=(val_x, val_y),
                    callbacks=[checkpoint, tensorboard])

I've tried casting the y_true and y_pred in the custom loss function like so y_pred=tf.convert_to_tensor(y_pred); y_true = tf.cast(y_true, y_pred.dtype but that didn't work. Also adding the print function showed that the function was called twice successfully but failed after that.

I don't get the error when I use in-built loss functions.

CodePudding user response:

The problem is that your custom_loss is returning a function rather than a scalar value. If you replace tf.cond with tf.where your code will work.

import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization, Flatten

def custom_loss(y_true, y_pred):

    mse = tf.keras.losses.MeanSquaredError()

    penalty = 10

    # penalize the loss heavily if the actual and the prediction are on different sides of zero
    loss = tf.where(
        condition=tf.logical_or((tf.logical_and(tf.greater(y_true, 0.0), tf.less(y_pred, 0.0))), (tf.logical_and(tf.less(y_true, 0.0), tf.greater(y_pred, 0.0)))),
        x=mse(y_true, y_pred) * penalty,
        y=mse(y_true, y_pred) * penalty / 4
    )

    # add slightly more penalty if prediction overshoots actual in any direction
    loss = tf.where(
        condition=tf.logical_or((tf.logical_and(tf.greater(y_true, 0.0), tf.greater(y_pred, y_true))), (tf.logical_and(tf.less(y_true, 0.0), tf.less(y_pred, y_true)))),
        x=loss * penalty / 5,
        y=loss * penalty / 10
    )

    return loss

train_x = np.random.randn(1588, 60, 34)
train_y = np.random.rand(1588, )
val_x = np.random.randn(85, 60, 34)
val_y = np.random.randn(85, )

model = Sequential()
model.add(LSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(LSTM(128, input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Flatten())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(1))

opt = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=1e-6)

model.compile(loss=custom_loss, optimizer=opt, metrics=['mse'])

model.fit(train_x, train_y, batch_size=128, epochs=3, validation_data=(val_x, val_y))
# Epoch 1/3
# 13/13 [==============================] - 8s 321ms/step - loss: 11.3129 - mse: 1.6341 - val_loss: 6.9313 - val_mse: 1.1116
# Epoch 2/3
# 13/13 [==============================] - 3s 234ms/step - loss: 7.3409 - mse: 1.0789 - val_loss: 7.2055 - val_mse: 1.1238
# Epoch 3/3
# 13/13 [==============================] - 3s 231ms/step - loss: 5.3962 - mse: 0.8513 - val_loss: 7.4492 - val_mse: 1.1512

model.predict(train_x)
# array([[0.25150445],
#        [0.2647993 ],
#        [0.2405027 ],
#        ...,
#        [0.31251353],
#        [0.29376918],
#        [0.21620636]], dtype=float32)
  • Related