I would like to integrate a custom loss function for my LSTM in python. The code shows my approach so far. How would I best implement the loss function shown in the images? How would I handle the constraint <0?
Thanks for any help!
Code
# Importing the libraries
ep=25 #Epochs
bs=32 #Batch-Size
vs=0.2 #Validation-Split
r=ep 1 #Range
# Importing the training set
dataset_train = pd.read_csv(r'C:\Users\Name\Desktop\Recurrent Neural Networks\JPM_train.csv',delimiter =';')
training_set = dataset_train.iloc[:, 1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
X_val=[]
y_val=[]
for i in range(60, 1516):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train, X_val, y_val = np.array(X_train), np.array(y_train), np.array(X_val), np.array(y_val)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
def custom_loss(y_true, y_pred):
if(#HERE):
loss=(predicted_stock_price-real_stock_price)^2
else:
loss=0
return loss
# Initialising the RNN
model = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
model.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# Adding the output layer
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer = 'adam', loss = custom_loss ,metrics=['accuracy'])
# Fitting the RNN to the Training set
history=model.fit(X_train, y_train, epochs = ep, batch_size = bs, validation_split=vs)
# Getting the real stock price of 2017
dataset_test = pd.read_csv(r'C:\Users\Name\Desktop\Recurrent Neural Networks\JPM_test.csv',delimiter =';')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['Preis'], dataset_test['Preis']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = model.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
history_dict = history.history
print(history_dict.keys())
accuracy = history_dict['accuracy']
validation_accuracy = history_dict['val_accuracy']
loss = history_dict['loss']
validation_loss = history_dict['val_loss']
gs = gridspec.GridSpec(2, 2)
#plt.tight_layout()
#plt.subplots_adjust(hspace=1.0)
fig = plt.figure(figsize=(16,16))
# Visualising the results
ax = plt.subplot(gs[1, :]) # row 1, span all columns
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
Only the Custom loss function
def custom_loss(y_true, y_pred):
if(#HERE):
loss=(predicted_stock_price-real_stock_price)^2
else:
loss=0
return loss
Pictures of the targeted loss function
Here is the link to the original text: https://www.researchgate.net/publication/342094242_Deep_Stock_Predictions
CodePudding user response:
You can use this loss function that calculates the current prediction (t1) minus the previous real_stock_price (t-1) :
def custom_loss(y_true, y_pred):
if((y_true[0]-y_true[1])*(y_pred-y_true[1])):
loss=(y_pred -y_true[0] )^2
else:
loss=0
return loss
I think that the derivatives in the backpropagation will not be affected by this shifting of time.