pratikshahp commited on
Commit
9e5df6e
·
verified ·
1 Parent(s): 899c01f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py CHANGED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import streamlit as st
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from langchain_huggingface import HuggingFaceEndpoint
6
+
7
+ # Load HuggingFace API token
8
+ load_dotenv()
9
+ HF_TOKEN = os.getenv("HF_TOKEN")
10
+
11
+ # Initialize the HuggingFace LLM
12
+ llm = HuggingFaceEndpoint(
13
+ repo_id="mistralai/Mistral-7B-Instruct-v0.3",
14
+ huggingfacehub_api_token=HF_TOKEN.strip(),
15
+ temperature=0.7,
16
+ task="text-generation",
17
+ max_new_tokens=200
18
+ )
19
+
20
+ # Streamlit UI setup
21
+ st.set_page_config(page_title="🧠 HuggingFace Chatbot", page_icon="🤖")
22
+ st.title("🤖 HuggingFace Chatbot")
23
+ st.caption("Built with Streamlit + LangChain (No schema!)")
24
+
25
+ # Initialize session state for chat history
26
+ if "messages" not in st.session_state:
27
+ st.session_state.messages = [
28
+ {"role": "assistant", "content": "Hi there! Ask me anything."}
29
+ ]
30
+
31
+ # Display chat history
32
+ for msg in st.session_state.messages:
33
+ with st.chat_message(msg["role"]):
34
+ st.markdown(msg["content"])
35
+
36
+ # Handle user input
37
+ if prompt := st.chat_input("Type your message here..."):
38
+ # Add user message
39
+ st.session_state.messages.append({"role": "user", "content": prompt})
40
+ with st.chat_message("user"):
41
+ st.markdown(prompt)
42
+
43
+ # Prepare context as a plain string (no schema)
44
+ context = ""
45
+ for msg in st.session_state.messages:
46
+ role = "User" if msg["role"] == "user" else "Assistant"
47
+ context += f"{role}: {msg['content']}\n"
48
+ context += "Assistant:"
49
+
50
+ # Generate response from LLM
51
+ with st.chat_message("assistant"):
52
+ response = llm.invoke(context)
53
+ st.markdown(response)
54
+ st.session_state.messages.append({"role": "assistant", "content": response})