Home > Net >  TensorFlow Shape Error After Model is Already Trained
TensorFlow Shape Error After Model is Already Trained

Time:02-11

I am training a TensorFlow RNN model using LSTM layers to determine if sound is coming more from the right or left in a stereo audio signal. The model training goes smoothly, then, once it is done training, I get an Invalid Argument Error as shown below. Does anyone know what could be causing this? I have tried fixing it using the solution to a similar question found here, but to no avail.

I do not understand why it is expecting a tensor of shape [32,2]. Did I define that somewhere I am unaware of?

Here is my code:

# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 15:51:56 2022

@author: andre
"""

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from datetime import datetime
from sklearn import metrics
from scipy.io import wavfile
import os
import glob


# Load in Right Side .WAV Data.
X1 = []
count1 = 0
database_path = "C:\\Users\\andre\\OneDrive\\Documents\\ESI2022\\MLDatabases\\Right\\"
for filename in glob.glob(os.path.join(database_path, '*.wav')):
    X1.append(wavfile.read(filename)[1])
    count1 = count1   1

# Load in Left side .WAV Data.
X2 = [] 
count2 = 0
database_path2 = "C:\\Users\\andre\\OneDrive\\Documents\\ESI2022\\MLDatabases\\Right\\"
for filename2 in glob.glob(os.path.join(database_path2, '*.wav')):
    X2.append(wavfile.read(filename2)[1])
    count2 = count2   1    

# Get the smallest size audio file (this will be sample size input to model)
sample_size = len(X1[0])
for data in X1:
    if len(data) < sample_size:
        sample_size = len(data)

# Make audio data into equal size chunks
X1e = []
for i in X1:
    num_chunks = len(i)//sample_size
    for j in range(num_chunks):
        X1e.append(i[(j 1)*sample_size-sample_size:(j 1)*sample_size])
X1 = X1e
        
X2e = []
for i in X2:
    num_chunks = len(i)//sample_size
    for j in range(num_chunks):
        X2e.append(i[(j 1)*sample_size-sample_size:(j 1)*sample_size])        
X2=X2e  

del X1e
del X2e   

# Create Output data that is the same length as the input data.
Y1 = np.ones([X1.__len__()],dtype='float32').tolist()
Y2 = np.zeros([X2.__len__()],dtype='float32').tolist()


# Concatenate Left and Right .WAV data and output data as numpy arrays.
X1.extend(X2)
X = np.asarray(X1)
Y = np.asarray(Y1 Y2).astype(np.int16)

Xnew=X[0:4900][:][:]
Ynew=Y[0:4900][:][:]

# Split data into test training data.
X_train,X_test,Y_train,Y_test=train_test_split(Xnew,Ynew,test_size=0.204,random_state=0,shuffle=True)



'''  
print(X[1])    
time = np.linspace(0.,33792, 33792)
plt.plot(time, X[1][:,1], label="Left channel")
plt.plot(time, X[1][:,0], label="Right channel")
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.show()
'''

# Create the Model
model = Sequential()

# Add a LSTM layer with 1 output, and ambiguous input data length.
model.add(layers.LSTM(2,batch_input_shape=(100,sample_size,2),return_sequences=True))
model.add(layers.LSTM(2,return_sequences=False))

# Compile Model
#history = model.compile(loss='mean_absolute_error', metrics=['accuracy'],optimizer='adam',output='sparse_categorical_crossentropy')
optimizer = Adam(learning_rate=2*1e-4)

history = model.compile(
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    optimizer="adam",
    metrics=["accuracy"],
)
model.summary()

# Define Training Parameters
num_epochs = 5
num_batch_size = 100

# Save the most accurate model to file. (Verbosity Gives more information)
checkpointer = ModelCheckpoint(filepath="SavedModels/checkpointModel.hdf5", verbose=1,save_best_only=True)

# Start the timer
start = datetime.now()

# Train the model
model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)

# Get and Print Model Validation Accuracy
test_accuracy=model.evaluate(X_test,Y_test,verbose=0)
print(test_accuracy[1])

And here is the full error message:

runfile('C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML/testML.py', wdir='C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML')
Model: "sequential_14"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 lstm_28 (LSTM)              (100, 3072, 2)            40        
                                                                 
 lstm_29 (LSTM)              (100, 2)                  40        
                                                                 
=================================================================
Total params: 80
Trainable params: 80
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
39/39 [==============================] - ETA: 0s - loss: 0.7387 - accuracy: 0.4936  
Epoch 00001: val_loss improved from inf to 0.71954, saving model to SavedModels\checkpointModel.hdf5
39/39 [==============================] - 60s 1s/step - loss: 0.7387 - accuracy: 0.4936 - val_loss: 0.7195 - val_accuracy: 0.5100
Epoch 2/5
39/39 [==============================] - ETA: 0s - loss: 0.7217 - accuracy: 0.4995 
Epoch 00002: val_loss improved from 0.71954 to 0.70838, saving model to SavedModels\checkpointModel.hdf5
39/39 [==============================] - 57s 1s/step - loss: 0.7217 - accuracy: 0.4995 - val_loss: 0.7084 - val_accuracy: 0.5090
Epoch 3/5
39/39 [==============================] - ETA: 0s - loss: 0.7105 - accuracy: 0.4990 
Epoch 00003: val_loss improved from 0.70838 to 0.70385, saving model to SavedModels\checkpointModel.hdf5
39/39 [==============================] - 57s 1s/step - loss: 0.7105 - accuracy: 0.4990 - val_loss: 0.7038 - val_accuracy: 0.5070
Epoch 4/5
39/39 [==============================] - ETA: 0s - loss: 0.7043 - accuracy: 0.4995 
Epoch 00004: val_loss improved from 0.70385 to 0.70144, saving model to SavedModels\checkpointModel.hdf5
39/39 [==============================] - 57s 1s/step - loss: 0.7043 - accuracy: 0.4995 - val_loss: 0.7014 - val_accuracy: 0.5070
Epoch 5/5
39/39 [==============================] - ETA: 0s - loss: 0.7010 - accuracy: 0.5018 
Epoch 00005: val_loss improved from 0.70144 to 0.69969, saving model to SavedModels\checkpointModel.hdf5
39/39 [==============================] - 57s 1s/step - loss: 0.7010 - accuracy: 0.5018 - val_loss: 0.6997 - val_accuracy: 0.5130
Traceback (most recent call last):

  File "C:\Users\andre\OneDrive\Documents\ESI2022\PythonScripts\BeltML\testML.py", line 124, in <module>
    test_accuracy=model.evaluate(X_test,Y_test,verbose=0)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None

  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 58, in quick_execute
    tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,

InvalidArgumentError:    Specified a list with shape [100,2] from a tensor with shape [32,2]
     [[{{node TensorArrayUnstack/TensorListFromTensor}}]]
     [[sequential_14/lstm_28/PartitionedCall]] [Op:__inference_test_function_121112]

Function call stack:
test_function -> test_function -> test_function

CodePudding user response:

You get this error because you hard-coded the batch size in the first LSTM layer and the number of data samples is not evenly divisible by 100. You have to take care of the remainder somehow. I would recommend removing the batch size from the first layer and only entering the batch size in model.fit. This way your model will be able to handle the remaining smaller batch(es). Here is an example:

X_train = tf.random.normal((2632, 5, 2))
Y_train = tf.random.uniform((2632, 1), maxval=2, dtype=tf.int32)

model = Sequential()

model.add(layers.LSTM(2,input_shape=(5,2),return_sequences=True))
model.add(layers.LSTM(2,return_sequences=False))

optimizer = Adam(learning_rate=2*1e-4)

history = model.compile(
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    optimizer="adam",
    metrics=["accuracy"],
)
model.summary()

num_epochs = 5
num_batch_size = 100

checkpointer = ModelCheckpoint(filepath="SavedModels/checkpointModel.hdf5", verbose=1,save_best_only=True)

start = datetime.now()
num_epochs = 5
num_batch_size = 100

model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs , callbacks=[checkpointer],verbose=1)

Also, the post you linked to should actually help you.

  • Related