Home > OS >  UnimplementedError with 1 channel image classification
UnimplementedError with 1 channel image classification

Time:11-07

I am new to CNN and facing the error UnimplementedError: Fused conv implementation does not support grouped convolutions for now. [[node sequential_3/activation_15/Relu (defined at <ipython-input-37-0f4d43f688ae>:5) ]] [Op:__inference_test_function_5422]

I was follwoing the code from this video, worked for 3 channel images but I need to work with 1 channel images, which is not working. Following is my code where I changed the specific lines as-

model.add(Conv2D(32, (3, 3), input_shape = (256, 256, 1))) and also added color_mode = 'grayscale' in train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), color_mode = 'grayscale', batch_size=batch_size, class_mode='binary')

However, not getting rid of the error. Could someone please help me to figure it out? Full Code is as follows-

##import libraries here

img_width, img_height = 256, 256
train_dir = './train'
valid_dir = './validation'
train_samples = 35
valid_samples = 15
epochs = 7
batch_size = 5

if K.image_data_format() == 'channels_first':
  shape = (1, img_width, img_height)
else:
  shape = (img_width, img_height, 1)

train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(valid_dir, target_size=(img_width, img_height), 
                                                   batch_size=batch_size, class_mode='binary')

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()

model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

model.fit_generator(train_generator, 
                    steps_per_epoch=train_samples // batch_size, 
                    epochs=epochs, 
                    validation_data=valid_generator, 
                    validation_steps=valid_samples // batch_size)

CodePudding user response:

The problem is probably caused by a discrepancy between the defined channels in your model and the actual number of channels in your dataset. I would recommend explicitly converting your images to grayscale before feeding them into your model:

import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt

dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)

img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)

train_ds = tf.data.Dataset.from_generator(
    lambda: img_gen.flow_from_directory(data_dir, batch_size=32, shuffle=True),
    output_types=(tf.float32, tf.float32))

def convert_to_grayscale(image, label):
  return tf.image.rgb_to_grayscale(image), label

images, _ = next(iter(train_ds.take(1)))
image = images[0]
print('Before conversion --> ', image.shape)

train_ds = train_ds.map(convert_to_grayscale)
images, _ = next(iter(train_ds.take(1)))

image = images[0]
print('After conversion --> ', image.shape)
Found 3670 files belonging to 5 classes.
Using 2936 files for training.
Before conversion -->  (256, 256, 3)
After conversion -->  (256, 256, 1)

Here is a complete working example:

import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt

dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)

img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_generator = img_gen.flow_from_directory(data_dir, target_size=(256, 256),
    batch_size=32,
    class_mode='sparse')

def convert_to_grayscale(images, labels):
  return tf.image.rgb_to_grayscale(images), labels

train_ds = tf.data.Dataset.from_generator(lambda: train_generator, output_types=(tf.float32, tf.float32))

train_ds = train_ds.map(convert_to_grayscale)

num_classes = 5

model = tf.keras.Sequential([
  tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(256, 256, 1)),
  tf.keras.layers.MaxPooling2D(),
  tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
  tf.keras.layers.MaxPooling2D(),
  tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
  tf.keras.layers.MaxPooling2D(),
  tf.keras.layers.Flatten(),
  tf.keras.layers.Dense(128, activation='relu'),
  tf.keras.layers.Dense(num_classes)
])

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))

model.fit(train_ds, epochs=10)
  • Related