pratikshahp commited on
Commit
6b0d470
Β·
verified Β·
1 Parent(s): e5fa4c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -20,7 +20,7 @@ llm = HuggingFaceEndpoint(
20
  # Streamlit UI setup
21
  st.set_page_config(page_title="🧠 HuggingFace Chatbot", page_icon="πŸ€–")
22
  st.title("πŸ€– HuggingFace Chatbot")
23
- st.caption("Built with Streamlit + LangChain (No schema!)")
24
 
25
  # Initialize session state for chat history
26
  if "messages" not in st.session_state:
@@ -35,20 +35,19 @@ for msg in st.session_state.messages:
35
 
36
  # Handle user input
37
  if prompt := st.chat_input("Type your message here..."):
38
- # Add user message
39
  st.session_state.messages.append({"role": "user", "content": prompt})
40
  with st.chat_message("user"):
41
  st.markdown(prompt)
42
 
43
- context = ""
 
 
44
  for msg in st.session_state.messages:
45
  role = "User" if msg["role"] == "user" else "Assistant"
46
- context += f"{role}: {msg['content']}\n"
 
47
 
48
- # Add explicit instruction to limit response
49
- context += "Assistant: (Please answer in 50 words or fewer.)"
50
-
51
- # Generate response from LLM
52
  with st.chat_message("assistant"):
53
  response = llm.invoke(context)
54
  st.markdown(response)
 
20
  # Streamlit UI setup
21
  st.set_page_config(page_title="🧠 HuggingFace Chatbot", page_icon="πŸ€–")
22
  st.title("πŸ€– HuggingFace Chatbot")
23
+ st.caption("Built with Streamlit + LangChain (no schema, limited to 50 words per answer)")
24
 
25
  # Initialize session state for chat history
26
  if "messages" not in st.session_state:
 
35
 
36
  # Handle user input
37
  if prompt := st.chat_input("Type your message here..."):
 
38
  st.session_state.messages.append({"role": "user", "content": prompt})
39
  with st.chat_message("user"):
40
  st.markdown(prompt)
41
 
42
+ # Build the conversation history with a system-like instruction
43
+ instruction = "You are a helpful and concise assistant. Always respond in 50 words or fewer.\n\n"
44
+ chat_history = ""
45
  for msg in st.session_state.messages:
46
  role = "User" if msg["role"] == "user" else "Assistant"
47
+ chat_history += f"{role}: {msg['content']}\n"
48
+ context = instruction + chat_history + "Assistant:"
49
 
50
+ # Generate response
 
 
 
51
  with st.chat_message("assistant"):
52
  response = llm.invoke(context)
53
  st.markdown(response)