Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -580,16 +580,13 @@ class NutritionBot:
|
|
580 |
#self.client = ChatOpenAI(
|
581 |
# model_name="gpt-4o", # Used gpt-4o to get improved results; Specify the model to use (e.g., GPT-4 optimized version)
|
582 |
# #api_key=my_api_key, # API key for authentication
|
583 |
-
# #api_key="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", # API key for authentication
|
584 |
-
# #api_key="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",
|
585 |
-
# api_key="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",
|
586 |
# temperature=0 # Controls randomness in responses; 0 ensures deterministic results
|
587 |
#)
|
588 |
|
589 |
self.client = ChatOpenAI(
|
590 |
openai_api_base=endpoint,
|
591 |
openai_api_key=api_key,
|
592 |
-
model="gpt-4o
|
593 |
streaming=False, # Explicitly disabling streaming
|
594 |
temperature=0
|
595 |
)
|
@@ -691,10 +688,10 @@ class NutritionBot:
|
|
691 |
|
692 |
# Build a context string from the relevant history
|
693 |
context = "Previous relevant interactions:\n"
|
694 |
-
for memory in relevant_history:
|
695 |
-
|
696 |
-
|
697 |
-
|
698 |
|
699 |
# Print context for debugging purposes
|
700 |
#st.write("Context: ", context)
|
@@ -790,7 +787,7 @@ def nutrition_disorder_streamlit():
|
|
790 |
else:
|
791 |
st.write("In st.session_state...")
|
792 |
|
793 |
-
st.write(f"Getting response...\
|
794 |
response = st.session_state.chatbot.handle_customer_query(st.session_state.user_id, user_query)
|
795 |
st.write(f"Got response: {response}")
|
796 |
# Blank #7: Fill in with the method to handle queries (e.g., handle_customer_query)
|
|
|
580 |
#self.client = ChatOpenAI(
|
581 |
# model_name="gpt-4o", # Used gpt-4o to get improved results; Specify the model to use (e.g., GPT-4 optimized version)
|
582 |
# #api_key=my_api_key, # API key for authentication
|
|
|
|
|
|
|
583 |
# temperature=0 # Controls randomness in responses; 0 ensures deterministic results
|
584 |
#)
|
585 |
|
586 |
self.client = ChatOpenAI(
|
587 |
openai_api_base=endpoint,
|
588 |
openai_api_key=api_key,
|
589 |
+
model="gpt-4o",
|
590 |
streaming=False, # Explicitly disabling streaming
|
591 |
temperature=0
|
592 |
)
|
|
|
688 |
|
689 |
# Build a context string from the relevant history
|
690 |
context = "Previous relevant interactions:\n"
|
691 |
+
#for memory in relevant_history:
|
692 |
+
# context += f"Customer: {memory['memory']}\n" # Customer's past messages
|
693 |
+
# context += f"Support: {memory['memory']}\n" # Chatbot's past responses
|
694 |
+
# context += "---\n"
|
695 |
|
696 |
# Print context for debugging purposes
|
697 |
#st.write("Context: ", context)
|
|
|
787 |
else:
|
788 |
st.write("In st.session_state...")
|
789 |
|
790 |
+
st.write(f"Getting response...\nUser ID: {st.session_state.user_id}\nUser query: {user_query}")
|
791 |
response = st.session_state.chatbot.handle_customer_query(st.session_state.user_id, user_query)
|
792 |
st.write(f"Got response: {response}")
|
793 |
# Blank #7: Fill in with the method to handle queries (e.g., handle_customer_query)
|