File size: 1,372 Bytes
28e30fe
007c626
 
ea0fe2c
 
 
 
 
 
 
007c626
ea0fe2c
aa665ef
8b0b1ab
ea0fe2c
 
 
 
 
 
8b0b1ab
ea0fe2c
 
 
 
 
 
 
 
 
 
 
aa665ef
ea0fe2c
 
 
 
 
 
 
8b0b1ab
ea0fe2c
 
8b0b1ab
aa665ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from transformers import pipeline
import gradio as gr

# 使用中文 GPT2 對話模型(支援 CPU)
generator = pipeline(
    "text-generation",
    model="thu-coai/CDial-GPT2_LCCC-base",
    tokenizer="thu-coai/CDial-GPT2_LCCC-base",
    device=-1  # 使用 CPU
)

# 對話處理函式
def chat_fn(message, history):
    history = history or []
    
    # 將所有歷史訊息合併為 prompt
    prompt = ""
    for user_msg, bot_msg in history:
        prompt += f"你說:{user_msg}\nAI說:{bot_msg}\n"
    prompt += f"你說:{message}\nAI說:"

    # 生成新回應
    output = generator(prompt, max_new_tokens=80, pad_token_id=0)[0]["generated_text"]

    # 從模型輸出中擷取 AI 回覆
    response = output.split("AI說:")[-1].split("你說:")[-1].strip()

    # 更新歷史
    history.append((message, response))
    return history, history

# 建立 Gradio 介面
with gr.Blocks() as demo:
    gr.Markdown("## 🧠 中文聊天機器人(記住上下文)")

    chatbot = gr.Chatbot(label="GPT2 中文對話")
    msg = gr.Textbox(show_label=False, placeholder="請輸入訊息,Enter 送出")
    clear = gr.Button("🧹 清除對話")

    state = gr.State([])  # 儲存對話歷史

    msg.submit(chat_fn, inputs=[msg, state], outputs=[chatbot, state])
    clear.click(lambda: ([], []), outputs=[chatbot, state])

demo.launch()