Home > Software design >  Tensorflow incompatible with the layer: expected min_ndim=3, found ndim=2. error
Tensorflow incompatible with the layer: expected min_ndim=3, found ndim=2. error

Time:06-03

I have the dataset in desired shape but Tensorflow CNN model isn't accepting it. I am unable to figure out what I should change.

train_X.shape, train_Y_one_hot.shape, test_X.shape, test_Y_one_hot.shape
((1073965, 15), (1073965, 8), (268492, 15), (268492, 8))

train_X[0]
array([-3.75609105e-06,  7.77089567e-05,  4.41936374e-05, -6.78777110e-07,
        1.17593595e-04, -7.11814884e-05,  8.92277296e-05,  5.97469947e-05,
       -2.17992064e-05, -1.38704555e-05,  1.66007993e-05,  6.45235327e-05,
        4.69597312e-06,  5.03974802e-06,  4.71866744e-04])

CNN model:

def classifySupervised(train_X, train_Y_one_hot, test_X, test_Y_one_hot,valid_X, valid_Y_one_hot,num_classes):
#     train_data = tf.data.Dataset.from_tensor_slices((train_X,train_Y_one_hot))
#     valid_data = tf.data.Dataset.from_tensor_slices((valid_X,valid_Y_one_hot))
    batch_size = 21
    epochs = 30
    #Complete stock code
    model1 = Sequential()
    model1.add(Conv1D(32, kernel_size=3,activation='relu',input_shape=(15),padding='same'))
    model1.add(LeakyReLU(alpha=0.1))
    model1.add(MaxPooling1D(2,padding='same'))
    model1.add(Dropout(0.25))
    model1.add(Conv1D(64, 3, activation='relu',padding='same'))
    model1.add(LeakyReLU(alpha=0.1))
    model1.add(MaxPooling1D(pool_size=2,padding='same'))
    model1.add(Dropout(0.25))
    model1.add(Conv1D(128, 3, activation='relu',padding='same'))
    model1.add(LeakyReLU(alpha=0.20))                  
    model1.add(MaxPooling1D(pool_size=2,padding='same'))
    model1.add(Dropout(0.4))
    model1.add(Dense(128, activation='relu'))
    model1.add(LeakyReLU(alpha=0.1))       
    model1.add(Dropout(0.3))
    model1.add(Dense(num_classes, activation='softmax'))
    model1.compile(loss=keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(),metrics=['accuracy'])
    model1_train_dropout = model1.fit(train_X,train_Y_one_hot, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X,valid_Y_one_hot))
    model1.save("Blythe_gt_train.h5py")
    #loadedmodel = .load("sanjoukin_gt_train.h5py")
    test_eval = model1.evaluate(test_X, test_Y_one_hot, verbose=11)
        
    
    #Prediction maybe sepearate function
    predicted_classes = model1.predict(test_X)
    predicted_classes = np.argmax(np.round(predicted_classes),axis=1)
    predicted_classes.shape, test_Y.shape
    correct = np.where(predicted_classes==test_Y)[0]
    print("Found %d correct labels" % len(correct))
    for i, correct in enumerate(correct[:9]):
        plt.subplot(3,3,i 1)
        plt.imshow(test_X[correct].reshape(28,28), cmap='gray', interpolation='none')
        plt.title("Predicted {}, Class {}".format(predicted_classes[correct], test_Y[correct]))
        plt.tight_layout()
    incorrect = np.where(predicted_classes!=test_Y)[0]
    print("Found %d incorrect labels" % len(incorrect))
    for i, incorrect in enumerate(incorrect[:9]):
        plt.subplot(3,3,i 1)
        plt.imshow(test_X[incorrect].reshape(28,28), cmap='gray', interpolation='none')
        plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], test_Y[incorrect]))
    plt.tight_layout()
    
    #Classification Report
    
    target_names = ["Class {}".format(i) for i in range(num_classes)]
    print(classification_report(test_Y, predicted_classes, target_names=target_names))
    return test_eval
    pass
classifySupervised(train_X, train_Y_one_hot, test_X, test_Y_one_hot,valid_X,valid_Y_one_hot,nClasses) 

This is the error I get:

---------------------------------------------------------------------------

ValueError                                Traceback (most recent call last)
Input In [41], in <cell line: 56>()
     54     return test_eval
     55     pass
---> 56 classifySupervised(train_X, train_Y_one_hot, test_X, test_Y_one_hot,valid_X,valid_Y_one_hot,nClasses)

Input In [41], in classifySupervised(train_X, train_Y_one_hot, test_X, test_Y_one_hot, valid_X, valid_Y_one_hot, num_classes)
      6 #Complete stock code
      7 model1 = Sequential()
----> 8 model1.add(Conv1D(32, kernel_size=3,activation='relu',input_shape=(15,),padding='same'))
      9 model1.add(LeakyReLU(alpha=0.1))
     10 model1.add(MaxPooling1D(2,padding='same'))

File ~\.conda\envs\tf3\lib\site-packages\tensorflow\python\training\tracking\base.py:587, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
    585 self._self_setattr_tracking = False  # pylint: disable=protected-access
    586 try:
--> 587   result = method(self, *args, **kwargs)
    588 finally:
    589   self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

File ~\.conda\envs\tf3\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
     65 except Exception as e:  # pylint: disable=broad-except
     66   filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67   raise e.with_traceback(filtered_tb) from None
     68 finally:
     69   del filtered_tb

File ~\.conda\envs\tf3\lib\site-packages\keras\engine\input_spec.py:228, in assert_input_compatibility(input_spec, inputs, layer_name)
    226   ndim = x.shape.rank
    227   if ndim is not None and ndim < spec.min_ndim:
--> 228     raise ValueError(f'Input {input_index} of layer "{layer_name}" '
    229                      'is incompatible with the layer: '
    230                      f'expected min_ndim={spec.min_ndim}, '
    231                      f'found ndim={ndim}. '
    232                      f'Full shape received: {tuple(shape)}')
    233 # Check dtype.
    234 if spec.dtype is not None:

ValueError: Input 0 of layer "conv1d_8" is incompatible with the layer: expected min_ndim=3, found ndim=2. Full shape received: (None, 15)

CodePudding user response:

train_X and test_X are missing a dimension. A Conv1D layer needs data with the shape (samples, timesteps, features). So the easiest thing you can do is add a dimension before calling model.fit(*):

train_X = tf.expand_dims(train_X, axis=-1)
test_X = tf.expand_dims(test_X, axis=-1)

Also, replace the last MaxPooling1D layer in your model with a GlobalMaxPool1D layer.

The MaxPooling1D layers calculates the max value of a tensor for a given window size and stride. It then outputs a 3D tensor (batch_size, timesteps, features), whereas Dense layers usually work with 2D Tensors (batch_size, features). The GlobalMaxPool1D layer downsamples the input representation by taking the maximum value over the time dimension and returns a 2D tensor.

  • Related