Jenny991 commited on
Commit
28e30fe
·
verified ·
1 Parent(s): f987894

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -29
app.py CHANGED
@@ -1,33 +1,10 @@
 
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
- model_id = "MediaTek-Research/Breeze-7B-Instruct-v1_0" # Breeze7 模型名稱(請確認 Huggingface 上正確名稱)
5
 
6
- # 載入 tokenizer 和模型
7
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
8
- model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
9
 
10
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True)
11
-
12
- def chat_fn(user_input, chat_history):
13
- chat_history = chat_history or []
14
- chat_history.append(f"你: {user_input}")
15
-
16
- # 取最近幾句聊天作為 prompt,避免太長
17
- prompt = "\n".join(chat_history) + "\nAI:"
18
-
19
- response = generator(prompt, max_new_tokens=100, temperature=0.7)
20
- reply = response[0]['generated_text'][len(prompt):].strip()
21
-
22
- chat_history.append(f"AI: {reply}")
23
- return reply, chat_history
24
-
25
- with gr.Blocks() as demo:
26
- chat_history = gr.State([])
27
-
28
- chatbot = gr.Chatbot()
29
- user_input = gr.Textbox(show_label=False, placeholder="說點什麼...")
30
-
31
- user_input.submit(chat_fn, inputs=[user_input, chat_history], outputs=[chatbot, chat_history])
32
-
33
- demo.launch()
 
1
+ from transformers import pipeline
2
  import gradio as gr
 
3
 
4
+ generator = pipeline("text-generation", model="distilgpt2")
5
 
6
+ def chat_fn(message, history):
7
+ output = generator(message, max_new_tokens=50)[0]["generated_text"]
8
+ return output, history
9
 
10
+ gr.Interface(fn=chat_fn, inputs="text", outputs="text").launch()