nnmthuw commited on
Commit
3cecaab
·
1 Parent(s): 5bb2cb4

Add application file

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import gradio as gr
4
+
5
+ def envit5_translation(text):
6
+ inputs = [f"en: {text}"]
7
+ outputs = model.generate(tokenizer(inputs, return_tensors="pt", padding=True).input_ids.to(device), max_length=512)
8
+ results = tokenizer.batch_decode(outputs, skip_special_tokens=True)
9
+ return results[0][4:]
10
+
11
+ def my_translation(text):
12
+ return "My Translation"
13
+
14
+ def finetune_BERT(text):
15
+ return "BERT"
16
+
17
+ def translation(text):
18
+ output1 = my_translation(text)
19
+ output2 = envit5_translation(text)
20
+ output3 = finetune_BERT(text)
21
+
22
+ return (output1, output2, output3)
23
+
24
+ if __name__ == "__main__":
25
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
26
+ model_name = "VietAI/envit5-translation"
27
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
28
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
29
+
30
+ inputs = [
31
+ "textbox"
32
+ ]
33
+
34
+ with gr.Blocks() as app:
35
+ gr.Interface(
36
+ fn=translation,
37
+ inputs=inputs,
38
+ outputs=["textbox", "textbox", "textbox"]
39
+ )
40
+
41
+ app.launch(shared = True)