# app.py import gradio as gr import logging # ==== ① 向量检索 & LLM ==== from langchain_community.vectorstores import Chroma from langchain_community.embeddings import HuggingFaceEmbeddings from langchain.chains import RetrievalQA from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from langchain.llms import HuggingFacePipeline logging.basicConfig(level=logging.INFO) # --- 1) 加载本地向量库 --- embedding_model = HuggingFaceEmbeddings( model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2" ) vector_store = Chroma( persist_directory="vector_store", embedding_function=embedding_model, ) # --- 2) 加载(较轻量)LLM --- # 如果 7B 跑不动,可以先用 openchat-mini 试试 model_id = "openchat/openchat-3.5-0106" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype="auto", ) gen_pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.7, top_p=0.9, ) llm = HuggingFacePipeline(pipeline=gen_pipe) # --- 3) 构建 RAG 问答链 --- qa_chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=vector_store.as_retriever(search_kwargs={"k": 3}), ) # ==== ② Gradio UI ==== def simple_qa(user_query): """智能问答:检索 + 生成(单轮演示版)""" if not user_query.strip(): return "⚠️ 请输入学习问题,例如:什么是定积分?" try: answer = qa_chain.run(user_query) return answer except Exception as e: logging.error(f"问答失败: {e}") return "抱歉,暂时无法回答,请稍后再试。" def placeholder_fn(*args, **kwargs): return "功能尚未实现,请等待后续更新。" with gr.Blocks() as demo: gr.Markdown("# 智能学习助手 v2.0\n— 大学生专业课学习助手 —") with gr.Tabs(): # ---------- 智能问答 ---------- with gr.TabItem("智能问答"): gr.Markdown("> **示例:** 什么是函数的定义域?") chatbot = gr.Chatbot() user_msg = gr.Textbox(placeholder="输入您的学习问题,然后按回车或点击发送") send_btn = gr.Button("发送") # 单轮:把问答显示在 Chatbot def update_chat(message, chat_history): reply = simple_qa(message) chat_history.append((message, reply)) return "", chat_history send_btn.click( fn=update_chat, inputs=[user_msg, chatbot], outputs=[user_msg, chatbot], ) user_msg.submit( fn=update_chat, inputs=[user_msg, chatbot], outputs=[user_msg, chatbot], ) # ---------- 生成学习大纲 ---------- with gr.TabItem("生成学习大纲"): gr.Markdown("(学习大纲模块,待开发)") topic_input = gr.Textbox(label="主题/章节名称", placeholder="如:线性代数 第五章 特征值") gen_outline_btn = gr.Button("生成大纲") gen_outline_btn.click(placeholder_fn, inputs=topic_input, outputs=topic_input) # ---------- 自动出题 ---------- with gr.TabItem("自动出题"): gr.Markdown("(出题模块,待开发)") topic2 = gr.Textbox(label="知识点/主题", placeholder="如:高数 第三章 多元函数") difficulty2 = gr.Dropdown(choices=["简单", "中等", "困难"], label="难度") count2 = gr.Slider(1, 10, step=1, label="题目数量") gen_q_btn = gr.Button("开始出题") gen_q_btn.click(placeholder_fn, inputs=[topic2, difficulty2, count2], outputs=topic2) # ---------- 答案批改 ---------- with gr.TabItem("答案批改"): gr.Markdown("(批改模块,待开发)") std_ans = gr.Textbox(label="标准答案", lines=5) user_ans = gr.Textbox(label="您的作答", lines=5) grade_btn = gr.Button("开始批改") grade_btn.click(placeholder_fn, inputs=[user_ans, std_ans], outputs=user_ans) gr.Markdown("---\n由 HuggingFace 提供支持 • 版本 2.0") if __name__ == "__main__": demo.launch()