TheTrigon commited on
Commit
dae59a8
·
verified ·
1 Parent(s): b8bafa3

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load model and tokenizer
5
+ MODEL_NAME = "Qwen/Qwen-7B-Chat"
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
8
+
9
+ # Define the refactor function
10
+ def refactor_code(message, code):
11
+ input_text = f"{message}\n\nCode:\n{code}"
12
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
13
+ outputs = model.generate(inputs["input_ids"], max_new_tokens=200)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
15
+
16
+ # Gradio Interface
17
+ interface = gr.Interface(
18
+ fn=refactor_code,
19
+ inputs=[
20
+ gr.Textbox(label="Message (Instruction)"),
21
+ gr.Textbox(label="Code", lines=15),
22
+ ],
23
+ outputs="text",
24
+ title="Code Refactor with Qwen Model",
25
+ description="Provide an instruction and code to refactor. The model will return the updated code."
26
+ )
27
+
28
+ # Launch the app
29
+ interface.launch()