Home > front end >  Keras model not traning. Accuracy always 0.5
Keras model not traning. Accuracy always 0.5

Time:10-14

My Keras CNN binary classification model always has training validation accuracy 0.5. The first problem of my CNN is the low images number (total is 980, with 180 to test set) and Im not able to get more images to build this CNN.

I would like to know what can i do with my model to change the validation accuracy. If the image number is the issue, which model can i run to train with this set?

classifier=Sequential()

from google.colab import drive
drive.mount('/content/gdrive')

train_set = '/content/gdrive/MyDrive/Extra/ICs/ML/Martinez/train_set'
test_set = '/content/gdrive/MyDrive/Extra/ICs/ML/Martinez/test_set'

train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range = 50,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0,
    horizontal_flip=True,
    fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1./255)


train_gen = train_datagen.flow_from_directory(
    train_set,
    target_size = (224,224),
    batch_size=100,
    class_mode='categorical')

test_gen = test_datagen.flow_from_directory(
    test_set,
    target_size=(224,224),
    batch_size=10,
    shuffle=False,
    class_mode='categorical')

model = tf.keras.Sequential([
    Conv2D(32, (3,3), activation='relu', input_shape=(224, 224, 3)),
    MaxPool2D(2,2),
    Conv2D(32, (3,3), activation='relu'),
    MaxPool2D(2,2),
    Conv2D(64, (3,3), activation='relu'),
    MaxPool2D(2,2),
    
    Flatten(),
    Dense(780, activation='relu'),
    Dense(1, activation='sigmoid')
])

print(len(train_gen))

model.summary()

model.compile(
    loss=tf.keras.losses.BinaryCrossentropy(),
    optimizer=tf.keras.optimizers.Adam(),
    metrics='accuracy'
)

model.fit(train_gen, steps_per_epoch=6, epochs=6, validation_data=test_gen, validation_steps=10)

CodePudding user response:

I think the number of images in the test set won't let you create any classification models. Try lowering the number of images you use in the training and adding more in the test set, but I'm not sure it'll work

CodePudding user response:

Simply select the correct input format.

Sample: Expecting the sequences input is series [0, 0], [1, 1]

import os
from os.path import exists

import tensorflow as tf
import tensorflow_io as tfio

import matplotlib.pyplot as plt

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)

list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
    image = tf.io.read_file( file )
    image = tfio.experimental.image.decode_tiff(image, index=0)
    list_file_actual.append(image)
    image = tf.image.resize(image, [32,32], method='nearest')
    list_file.append(image)
    list_label.append([0, 0])
    
for file in files_2.take(5):
    image = tf.io.read_file( file )
    image = tfio.experimental.image.decode_tiff(image, index=0)
    list_file_actual.append(image)
    image = tf.image.resize(image, [32,32], method='nearest')
    list_file.append(image)
    list_label.append([1, 1])

checkpoint_path = "F:\\models\\checkpoint\\"   os.path.basename(__file__).split('.')[0]   "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\"   os.path.basename(__file__).split('.')[0]   "\\loggings.log"

if not exists(checkpoint_dir) : 
    os.mkdir(checkpoint_dir)
    print("Create directory: "   checkpoint_dir)
    
log_dir = checkpoint_dir

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 2), dtype=tf.int64)))

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
    tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
    tf.keras.layers.Normalization(mean=3., variance=2.),
    tf.keras.layers.Normalization(mean=4., variance=6.),
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPooling2D((2, 2)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Reshape((128, 225)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(192, activation='relu'),
    tf.keras.layers.Dense(2, activation='sigmoid'),
])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.SGD(
    learning_rate=0.001,
    momentum=0.0,
    nesterov=False,
    name='SGD',
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""                               
lossfn = tf.keras.losses.BinaryCrossentropy(
    from_logits=False,
    label_smoothing=0.0,
    axis=-1,
    reduction=tf.keras.losses.Reduction.AUTO,
    name='binary_crossentropy'
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs={}):
        # if( logs['loss'] <= 0.2 ):
            # self.model.stop_training = True
        if( logs['accuracy'] >= 0.80 ):
            self.model.stop_training = True
    
custom_callback = custom_callback()

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
    model.load_weights(checkpoint_path)
    print("model load: "   checkpoint_path)
    input("Press Any Key!")

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=10, epochs=10000, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)

plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
    img = tf.keras.preprocessing.image.array_to_img(
        list_file[i],
        data_format=None,
        scale=True
    )
    img_array = tf.keras.preprocessing.image.img_to_array(img)
    img_array = tf.expand_dims(img_array, 0)
    predictions = model.predict(img_array)
    score = tf.nn.softmax(predictions[0])
    plt.subplot(5, 2, i   1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(list_file_actual[i])
    plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2))   ":"    str(list_label_actual[tf.math.argmax(score)]))
    
plt.show()

input('...')

Output :

Epoch 77/10000
10/10 [==============================] - 0s 20ms/step - loss: 0.4016 - accuracy: 0.7000
Epoch 78/10000
10/10 [==============================] - 0s 19ms/step - loss: 0.3973 - accuracy: 0.7000
Epoch 79/10000
10/10 [==============================] - 0s 19ms/step - loss: 0.3929 - accuracy: 0.8000
  • Related