from transformers import pipeline import gradio as gr # 使用中文 GPT2 對話模型(支援 CPU) generator = pipeline( "text-generation", model="thu-coai/CDial-GPT2_LCCC-base", tokenizer="thu-coai/CDial-GPT2_LCCC-base", device=-1 # 使用 CPU ) # 對話處理函式 def chat_fn(message, history): history = history or [] # 將所有歷史訊息合併為 prompt prompt = "" for user_msg, bot_msg in history: prompt += f"你說:{user_msg}\nAI說:{bot_msg}\n" prompt += f"你說:{message}\nAI說:" # 生成新回應 output = generator(prompt, max_new_tokens=80, pad_token_id=0)[0]["generated_text"] # 從模型輸出中擷取 AI 回覆 response = output.split("AI說:")[-1].split("你說:")[-1].strip() # 更新歷史 history.append((message, response)) return history, history # 建立 Gradio 介面 with gr.Blocks() as demo: gr.Markdown("## 🧠 中文聊天機器人(記住上下文)") chatbot = gr.Chatbot(label="GPT2 中文對話") msg = gr.Textbox(show_label=False, placeholder="請輸入訊息,Enter 送出") clear = gr.Button("🧹 清除對話") state = gr.State([]) # 儲存對話歷史 msg.submit(chat_fn, inputs=[msg, state], outputs=[chatbot, state]) clear.click(lambda: ([], []), outputs=[chatbot, state]) demo.launch()