Home > Net >  How to define a new Tensor with a dynamic shape to support batching in a custom layer
How to define a new Tensor with a dynamic shape to support batching in a custom layer

Time:12-22

I am trying to implement a custom layer that would preprocess a tokenized sequence of words into a matrix with a predefined number of elements equal to the size of vocabulary. Essentially, I'm trying to implement a 'bag of words' layer. This is the closest I could come up with:

    def get_encoder(vocab_size=args.vocab_size):
       encoder = TextVectorization(max_tokens=vocab_size)
       encoder.adapt(train_dataset.map(lambda text, label: text))
       return encoder

    class BagOfWords(tf.keras.layers.Layer):
        def __init__(self, vocab_size=args.small_vocab_size, batch_size=args.batch_size):
            super(BagOfWords, self).__init__()
            self.vocab_size = vocab_size
            self.batch_size = batch_size

        def build(self, input_shape):
            super().build(input_shape)

        def call(self, inputs):
            if inputs.shape[-1] == None:
                return tf.constant(np.zeros([self.batch_size, self.vocab_size])) # 32 is the batch size
            outputs = tf.zeros([self.batch_size, self.vocab_size])
            if inputs.shape[-1] != None:
                for i in range(inputs.shape[0]):
                    for ii in range(inputs.shape[-1]):
                        ouput_idx = inputs[i][ii]
                        outputs[i][ouput_idx] = outputs[i][ouput_idx]   1
            return outputs

    model = keras.models.Sequential()
    model.add(encoder)
    model.add(bag_of_words)
    model.add(keras.layers.Dense(64, activation='relu'))
    model.add(keras.layers.Dense(1, activation='sigmoid'))

No surprise that I get an error when calling fit() on the model: "Incompatible shapes: [8,1] vs. [32,1]". This happens on the last steps, when the batch size is less than 32.

My question is: Putting aside performance, how do I define the outputs Tensor for my bag of words matrix so that it has a dynamic shape for batching and get my code working?

Edit 1 After the comment, I realised that the code doesn't work indeed because it never goes to the 'else' branch. I edited it a bit so that it uses only tf functions:

 class BagOfWords(tf.keras.layers.Layer):
        def __init__(self, vocab_size=args.small_vocab_size, batch_size=args.batch_size):
            super(BagOfWords, self).__init__()
            self.vocab_size = vocab_size
            self.batch_size = batch_size
            self.outputs = tf.Variable(tf.zeros([batch_size, vocab_size]))

        def build(self, input_shape):
            super().build(input_shape)

        def call(self, inputs):
            if tf.shape(inputs)[-1] == None:
                return tf.zeros([self.batch_size, self.vocab_size])
            self.outputs.assign(tf.zeros([self.batch_size, self.vocab_size]))
            for i in range(tf.shape(inputs)[0]):
                for ii in range(tf.shape(inputs)[-1]):
                    output_idx = inputs[i][ii]
                    if output_idx >= tf.constant(self.vocab_size, dtype=tf.int64):
                        output_idx = tf.constant(1, dtype=tf.int64)
                    self.outputs[i][output_idx].assign(self.outputs[i][output_idx]   1)                        
            return outputs

It didn't help though: AttributeError: 'Tensor' object has no attribute 'assign'.

CodePudding user response:

Correct me if I am wrong, but I think that using the output_mode="multi_hot" of the TextVectorization layer would be sufficient to do what you want to do. According to the docs, the multi_hot output mode:

Outputs a single int array per batch, of either vocab_size or max_tokens size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item

So it could be as simple as this:

import tensorflow as tf

def get_encoder():
    encoder = tf.keras.layers.TextVectorization(output_mode="multi_hot")
    encoder.adapt(train_dataset.map(lambda text, label: text))
    return encoder

texts  = [
          'All my cats in a row',
          'When my cat sits down, she looks like a Furby toy!',
          'The cat from outer space',
          'Sunshine loves to sit like this for some reason.']

labels = [[1], [0], [1], [1]]
train_dataset = tf.data.Dataset.from_tensor_slices((texts, labels))

model = tf.keras.Sequential()
model.add(get_encoder())
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss = tf.keras.losses.BinaryCrossentropy())
model.fit(train_dataset.batch(2), epochs=2)

This is how your texts would be encoded:

import tensorflow as tf

texts  = ['All my cats in a row',
          'When my cat sits down, she looks like a Furby toy!',
          'The cat from outer space',
          'Sunshine loves to sit like this for some reason.']
encoder = get_encoder()
inputs = encoder(texts)
print(inputs)
tf.Tensor(
[[0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 0.
  0. 0. 1. 1.]
 [0. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 1. 0. 1. 0.
  0. 1. 0. 0.]
 [0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1.
  0. 0. 0. 0.]
 [0. 0. 1. 0. 0. 0. 0. 1. 1. 0. 1. 0. 1. 0. 1. 0. 0. 1. 0. 1. 0. 0. 0. 0.
  1. 0. 0. 0.]], shape=(4, 28), dtype=float32)

So just as you tried in your custom layer, the presence of words in a sequence is marked with 1 and the absence of words is marked with 0.

CodePudding user response:

Here is an example of a Bag-of-Words custom keras layer without using any additional preprocessing layers:

import tensorflow as tf

class BagOfWords(tf.keras.layers.Layer):
   def __init__(self, vocabulary_size):
       super(BagOfWords, self).__init__()
       self.vocabulary_size = vocabulary_size

   def call(self, inputs):  
       batch_size = tf.shape(inputs)[0]
       outputs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
       for i in range(batch_size):
         string = inputs[i]
         string_length = tf.shape(tf.where(tf.math.not_equal(string, b'')))[0]
         string = string[:string_length]
         string_array = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
         for s in string:
           string_array = string_array.write(string_array.size(), tf.where(tf.equal(s, self.vocabulary_size), 1.0, 0.0))
         outputs = outputs.write(i, tf.cast(tf.reduce_any(tf.cast(string_array.stack(), dtype=tf.bool), axis=0), dtype=tf.float32))
       return outputs.stack()

And here are the manual preprocessing steps and the model:

labels = [[1], [0], [1], [0]]

texts  = ['All my cats in a row',
          'When my cat sits down, she looks like a Furby toy!',
          'The cat from the outer space',
          'Sunshine loves to sit like this for some reason.']

DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\ ,-\./:;<=>?@\[\\\]^_`{|}~\']'
tensor_of_strings = tf.constant(texts)
tensor_of_strings = tf.strings.lower(tensor_of_strings)
tensor_of_strings = tf.strings.regex_replace(tensor_of_strings, DEFAULT_STRIP_REGEX, "")
split_strings = tf.strings.split(tensor_of_strings).to_tensor()
flattened_split_strings = tf.reshape(split_strings, (split_strings.shape[0] * split_strings.shape[1]))
unique_words, _ = tf.unique(flattened_split_strings)
unique_words = tf.random.shuffle(unique_words)

bag_of_words = BagOfWords(vocabulary_size = unique_words)
train_dataset = tf.data.Dataset.from_tensor_slices((split_strings, labels))
model = tf.keras.Sequential()
model.add(bag_of_words)
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss = tf.keras.losses.BinaryCrossentropy())
model.fit(train_dataset.batch(2), epochs=2)
Epoch 1/2
4/4 [==============================] - 2s 7ms/step - loss: 0.7081
Epoch 2/2
4/4 [==============================] - 0s 6ms/step - loss: 0.7008
<keras.callbacks.History at 0x7f5ba844bad0>

And this is what the 4 encoded sentences look like:

print(bag_of_words(split_strings))
tf.Tensor(
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0.
  1. 1. 1. 0.]
 [1. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0.
  0. 1. 1. 0.]
 [0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 1. 0.
  0. 0. 0. 0.]
 [0. 1. 0. 1. 1. 0. 0. 1. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.
  0. 0. 0. 1.]], shape=(4, 28), dtype=float32)
  • Related