File size: 1,217 Bytes
14d3d86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")

def paraphrase_text(input_text, num_return_sequences=3, num_beams=5):
    input_ids = tokenizer.encode("paraphrase: " + input_text, return_tensors="pt", truncation=True)

    outputs = model.generate(
        input_ids,
        max_length=256,
        num_beams=num_beams,
        num_return_sequences=num_return_sequences,
        no_repeat_ngram_size=2,
        early_stopping=True
    )

    paraphrased_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
    return paraphrased_texts

iface = gr.Interface(
    fn=paraphrase_text,
    inputs=[
        gr.Textbox(lines=5, placeholder="Enter text to paraphrase here..."),
        gr.Slider(1, 5, value=3, label="Number of paraphrases"),
        gr.Slider(1, 10, value=5, label="Beam search size")
    ],
    outputs=gr.Textbox(label="Paraphrased Outputs"),
    title="Paraphrasing with T5 Model",
    description="Enter text to see paraphrased versions.",
)

iface.launch()