pratikshahp commited on
Commit
420143c
Β·
verified Β·
1 Parent(s): 6b0d470

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -20,35 +20,38 @@ llm = HuggingFaceEndpoint(
20
  # Streamlit UI setup
21
  st.set_page_config(page_title="🧠 HuggingFace Chatbot", page_icon="πŸ€–")
22
  st.title("πŸ€– HuggingFace Chatbot")
23
- st.caption("Built with Streamlit + LangChain (no schema, limited to 50 words per answer)")
24
 
25
- # Initialize session state for chat history
26
  if "messages" not in st.session_state:
27
  st.session_state.messages = [
28
  {"role": "assistant", "content": "Hi there! Ask me anything."}
29
  ]
30
 
31
- # Display chat history
32
  for msg in st.session_state.messages:
33
  with st.chat_message(msg["role"]):
34
  st.markdown(msg["content"])
35
 
36
- # Handle user input
37
  if prompt := st.chat_input("Type your message here..."):
 
38
  st.session_state.messages.append({"role": "user", "content": prompt})
39
  with st.chat_message("user"):
40
  st.markdown(prompt)
41
 
42
- # Build the conversation history with a system-like instruction
43
- instruction = "You are a helpful and concise assistant. Always respond in 50 words or fewer.\n\n"
44
- chat_history = ""
45
  for msg in st.session_state.messages:
46
- role = "User" if msg["role"] == "user" else "Assistant"
47
- chat_history += f"{role}: {msg['content']}\n"
48
- context = instruction + chat_history + "Assistant:"
 
49
 
50
- # Generate response
 
 
51
  with st.chat_message("assistant"):
52
- response = llm.invoke(context)
53
  st.markdown(response)
54
  st.session_state.messages.append({"role": "assistant", "content": response})
 
20
  # Streamlit UI setup
21
  st.set_page_config(page_title="🧠 HuggingFace Chatbot", page_icon="πŸ€–")
22
  st.title("πŸ€– HuggingFace Chatbot")
23
+ st.caption("Built with Streamlit + LangChain (50-word max answers)")
24
 
25
+ # Initialize chat history
26
  if "messages" not in st.session_state:
27
  st.session_state.messages = [
28
  {"role": "assistant", "content": "Hi there! Ask me anything."}
29
  ]
30
 
31
+ # Display chat messages
32
  for msg in st.session_state.messages:
33
  with st.chat_message(msg["role"]):
34
  st.markdown(msg["content"])
35
 
36
+ # Chat input
37
  if prompt := st.chat_input("Type your message here..."):
38
+ # Add user message
39
  st.session_state.messages.append({"role": "user", "content": prompt})
40
  with st.chat_message("user"):
41
  st.markdown(prompt)
42
 
43
+ # Construct prompt (only user + assistant, formatted)
44
+ conversation = "You are a helpful assistant. Keep replies within 50 words.\n\n"
 
45
  for msg in st.session_state.messages:
46
+ if msg["role"] == "user":
47
+ conversation += f"User: {msg['content']}\n"
48
+ elif msg["role"] == "assistant":
49
+ continue # Don't include previous assistant replies
50
 
51
+ conversation += "Assistant:" # Prompt the model to continue
52
+
53
+ # Generate model response
54
  with st.chat_message("assistant"):
55
+ response = llm.invoke(conversation).strip().split("Assistant:")[-1].strip()
56
  st.markdown(response)
57
  st.session_state.messages.append({"role": "assistant", "content": response})