Home > Blockchain >  Tensorflow Keras multiple input model
Tensorflow Keras multiple input model

Time:11-17

I need to adapt this model for two text columns input (instead one column)

tfhub_handle_encoder = \
    "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1"
tfhub_handle_preprocess = \
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"

def build_classifier_model():

text_input = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text')

preprocessing_layer = hub.KerasLayer(
    tfhub_handle_preprocess, name='preprocessing')

encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
    tfhub_handle_encoder, trainable=True, name='BERT_encoder')

outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
    6, activation='softmax', name='classifier')(net)
model = tf.keras.Model(text_input, net)

loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
    learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
    optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

history = classifier_model.fit(
    x=X_train['f'].values,
    y=y_train_c,
    validation_data=(X_valid['f'].values, y_valid_c),
    epochs=15)

Seems like this is model from tutorial: https://www.tensorflow.org/text/tutorials/classify_text_with_bert

I have tried modify code for two input layer, but get error because after concatenate there is wrong tensor dimensions:

def build_classifier_model():

input1 = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text')

input2 = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text1')
text_input = tf.keras.layers.concatenate([input1, input2], axis=-1)


preprocessing_layer = hub.KerasLayer(
    tfhub_handle_preprocess, name='preprocessing')

encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
    tfhub_handle_encoder, trainable=True, name='BERT_encoder')

outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
    6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], net)

loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
    learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
    optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

Error:

InvalidArgumentError:  logits and labels must be broadcastable: logits_size=[64,6] labels_size=[32,6]
     [[node categorical_crossentropy/softmax_cross_entropy_with_logits (defined at tmp/ipykernel_39/1837193519.py:5) ]] [Op:__inference_train_function_271676]

If use concatenate with another dimension then model doensn't compile

CodePudding user response:

Weirdly enough, replacing your Concatenation layer with tf.strings.join inside your model seems to work:

def build_classifier_model():

  input1 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text')

  input2 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text1')
  text_input = tf.strings.join([input1, input2])

  preprocessing_layer = hub.KerasLayer(
      tfhub_handle_preprocess, name='preprocessing')

  encoder_inputs = preprocessing_layer(text_input)
  encoder = hub.KerasLayer(
      tfhub_handle_encoder, trainable=True, name='BERT_encoder')

  outputs = encoder(encoder_inputs)
  net = outputs['pooled_output']
  net = tf.keras.layers.Dropout(0.1)(net)
  output = tf.keras.layers.Dense(
      6, activation='softmax', name='classifier')(net)
  model = tf.keras.Model([input1, input2], output)

  loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
  metric = tf.metrics.CategoricalAccuracy('accuracy')
  optimizer = Adam(
      learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
  model.compile(
      optimizer=optimizer, loss=loss, metrics=metric)
  model.summary()
  return model
Epoch 1/5
 497/1094 [============>.................] - ETA: 2:14 - loss: 1.8664 - accuracy: 0.1641

You could also consider simply doing text_input = input1 input2 , since the Concatenation layer seems to mess up the batch dimension. Or you could feed each input to your encoder and concatenate the results afterwards:

def build_classifier_model():

  input1 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text')

  input2 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text1')

  preprocessing_layer = hub.KerasLayer(
      tfhub_handle_preprocess, name='preprocessing')

  encoder_input1 = preprocessing_layer(input1)
  encoder_input2 = preprocessing_layer(input2)
  encoder = hub.KerasLayer(
      tfhub_handle_encoder, trainable=True, name='BERT_encoder')

  output1 = encoder(encoder_input1)
  output2 = encoder(encoder_input2)

  net = tf.keras.layers.Concatenate(axis=-1)([output1['pooled_output'], output2['pooled_output']])
  net = tf.keras.layers.Dropout(0.1)(net)
  output = tf.keras.layers.Dense(
      6, activation='softmax', name='classifier')(net)
  model = tf.keras.Model([input1, input2], output)

  loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
  metric = tf.metrics.CategoricalAccuracy('accuracy')
  optimizer = Adam(
      learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
  model.compile(
      optimizer=optimizer, loss=loss, metrics=metric)
  model.summary()
  return model
  • Related