In a token labelling task I am using a transformers tokenizer, which outputs objects of the BatchEncoding class. I am tokenizing each text separately because I need to extract the labels from the text and re-arrange them after tokenizing (due to subtokens). However, I can't find a way to either create a tensorflow Dataset from the list of BatchEncoding objects or merge all the BatchEncoding objects into one to create the dataset.
Here are the main parts of the code:
tokenizer = BertTokenizerFast.from_pretrained('bert-base-multilingual-uncased')
def extract_labels(raw_text):
# split text into words and extract label
(...)
return clean_words, labels
def tokenize_text(words, labels):
# tokenize text
tokens = tokenizer(words, is_split_into_words=True, padding='max_length', truncation=True, max_length=MAX_LENGTH)
# since words might be split into subwords, labels need to be re-arranged
# only the first subword has the label
(...)
tokens['labels'] = label_ids
return tokens
tokens = []
for raw_text in data:
clean_text, l = extract_labels(raw_text)
t = tokenize_text(clean_text, l)
tokens.append(t)
type(tokens[0])
# transformers.tokenization_utils_base.BatchEncoding
tokens[0]
# {'input_ids': [101, 69887, 10112, ..., 0, 0, 0], 'attention_mask': [1, 1, 1, ... 0, 0, 0], 'labels': [-100, 0, -100, ..., -100, -100, -100]}
Update, as asked, a basic example to reproduce:
from transformers import BertTokenizerFast
import tensorflow as tf
tokenizer = BertTokenizerFast.from_pretrained('bert-base-multilingual-uncased')
tokens = []
for text in ["Hello there", "Good morning"]:
t = tokenizer(text.split(), is_split_into_words=True, padding='max_length', truncation=True, max_length=10)
t['labels'] = list(map(lambda x: 1, t.word_ids())) # fake labels to simplify example
tokens.append(t)
print(type(tokens[0])) # now tokens is a list of BatchEncodings
print(tokens)
If I directly tokenized the whole dataset I'd have a single BatchEnconding comprising everything, but I would not be able to handle the labels:
data = ["Hello there", "Good morning"]
tokens = tokenizer(data, padding='max_length', truncation=True, max_length=10)
# now tokens is a batch encoding comprising all the dataset
print(type(tokens))
print(tokens)
# This way I can get a tf dataset like this:
tf.data.Dataset.from_tensor_slices(tokens)
Note that I need to first iterate the texts to get the labels and I need each text's word_ids() to rearrange the labels.
CodePudding user response:
You have a few options. You can use a defaultdict
:
from collections import defaultdict
import tensorflow as tf
result = defaultdict(list)
for d in tokens:
for k, v in d.items():
result[k].append(v)
dataset = tf.data.Dataset.from_tensor_slices(dict(result))
Or you can use pandas
as shown here:
import pandas as pd
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices(pd.DataFrame.from_dict(tokens).to_dict(orient="list"))
Or just create the correct structure while preprocessing your data:
from transformers import BertTokenizerFast
from collections import defaultdict
import tensorflow as tf
tokenizer = BertTokenizerFast.from_pretrained('bert-base-multilingual-uncased')
tokens = defaultdict(list)
for text in ["Hello there", "Good morning"]:
t = tokenizer(text.split(), is_split_into_words=True, padding='max_length', truncation=True, max_length=10)
tokens['input_ids'].append(t['input_ids'])
tokens['token_type_ids'].append(t['token_type_ids'])
tokens['attention_mask'].append(t['attention_mask'])
t['labels'] = list(map(lambda x: 1, t.word_ids())) # fake labels to simplify example
tokens['labels'].append(t['labels'])
dataset = tf.data.Dataset.from_tensor_slices(dict(tokens))
for x in dataset:
print(x)
{'input_ids': <tf.Tensor: shape=(10,), dtype=int32, numpy=
array([ 101, 29155, 10768, 102, 0, 0, 0, 0, 0,
0], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'labels': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)>}
{'input_ids': <tf.Tensor: shape=(10,), dtype=int32, numpy=
array([ 101, 12050, 17577, 102, 0, 0, 0, 0, 0,
0], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'labels': <tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)>}