import torch
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
model_name = 'tuner007/pegasus_paraphrase'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
def get_response(input_text,num_return_sequences):
# batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
with tokenizer.as_target_tokenizer():
tokenized_text = tokenizer(input_text, truncation=True, padding='longest', max_length=60, return_tensors="pt")
batch = tokenized_text.to(torch_device)
translated = model.generate(**batch,max_length=60,num_beams=10, num_return_sequences=num_return_sequences, temperature=1.5)
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
return tgt_text
text = "In this video, I will be showing you how to build a stock price web application in Python using the Streamlit and yfinance library."
get_response(text, 5)
Error:
FutureWarning:
prepare_seq2seq_batch
is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular__call__
method to prepare your inputs and the tokenizer under theas_target_tokenizer
context manager to prepare your targets.
Kindly help to fix this error
CodePudding user response:
According to this issue, replace
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
with
with tokenizer.as_target_tokenizer():
tokenized_text = tokenizer(input_text, truncation=True,padding='longest',max_length=60, return_tensors="pt")
batch = tokenized_text.to(torch_device)