jenngang commited on
Commit
5adfece
·
verified ·
1 Parent(s): c504329

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +614 -0
app.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import json
3
+ import os
4
+ import uuid
5
+
6
+ import pandas as pd
7
+ from datetime import datetime
8
+ import sqlite3
9
+ import weave
10
+
11
+ from langchain.memory import ConversationSummaryBufferMemory
12
+ from openai import OpenAI
13
+ from langchain_openai import ChatOpenAI
14
+ #from langchain.embeddings import OpenAIEmbeddings
15
+ from langchain_community.embeddings import OpenAIEmbeddings
16
+ from langchain_openai import OpenAIEmbeddings
17
+ from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
18
+ from langchain_community.vectorstores import Chroma
19
+ from langchain_core.caches import BaseCache
20
+
21
+
22
+
23
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
24
+ from langchain_community.utilities.sql_database import SQLDatabase
25
+ from langchain_community.agent_toolkits import create_sql_agent
26
+ from langchain.agents import create_tool_calling_agent, AgentExecutor
27
+ from langchain_core.tools import tool
28
+
29
+
30
+ from huggingface_hub import CommitScheduler
31
+ from pathlib import Path
32
+
33
+
34
+ #BaseCache.register_cache_type("memory", lambda: None)
35
+ #ChatOpenAI.model_rebuild()
36
+ #====================================SETUP=====================================#
37
+ # Fetch secrets from Hugging Face Spaces
38
+
39
+ model_name = "gpt-4o"
40
+
41
+ # Extract the OpenAI key and endpoint from the configuration
42
+
43
+ api_key = os.environ["API_KEY"]
44
+ endpoint = os.environ["OPENAI_API_BASE"]
45
+
46
+
47
+ # Define the location of the SQLite database
48
+ db_loc = 'ecomm.db'
49
+
50
+ # Create a SQLDatabase instance from the SQLite database URI
51
+ db = SQLDatabase.from_uri(f"sqlite:///{db_loc}")
52
+
53
+ # Retrieve the schema information of the database tables
54
+ database_schema = db.get_table_info()
55
+
56
+
57
+ # Let's initiate w&b weave with a project name - this will automatically save all the llm calls made using openai or gemini
58
+ # Make sure to save your w&b api key in secrets as WANDB_API_KEY
59
+ weave.init('ecomm_support')
60
+ # <--------------------------------------------------------- Uncomment to log to WANDB
61
+
62
+
63
+ #=================================Setup Logging=====================================#
64
+
65
+
66
+ log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
67
+ log_folder = log_file.parent
68
+
69
+ log_scheduler = CommitScheduler(
70
+ repo_id="chatbot-logs", #Dataset name where we want to save the logs.
71
+ repo_type="dataset",
72
+ folder_path=log_folder,
73
+ path_in_repo="data",
74
+ every=5 # Saves data every x minute
75
+ )
76
+
77
+
78
+ history_file = Path("history/")/f"data_{uuid.uuid4()}.json"
79
+ history_folder = history_file.parent
80
+
81
+ history_scheduler = CommitScheduler(
82
+ repo_id="chatbot-history", #Dataset name where we want to save the logs.
83
+ repo_type="dataset",
84
+ folder_path=history_folder,
85
+ path_in_repo="data",
86
+ every=5 # Saves data every x minute
87
+ )
88
+
89
+ #=================================SQL_AGENT=====================================#
90
+
91
+ # Define the system message for the agent, including instructions and available tables
92
+ system_message = f"""You are a SQLite expert agent designed to interact with a SQLite database.
93
+ Given an input question, create a syntactically correct SQLite query to run, then look at the results of the query and return the answer.
94
+ Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 100 results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database..
95
+ You can order the results by a relevant column to return the most interesting examples in the database.
96
+ You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
97
+ You have access to tools for interacting with the database.
98
+ Only use the given tools. Only use the information returned by the tools to construct your final answer.
99
+ You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
100
+ DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
101
+ You are not allowed to make dummy data.
102
+ If the question does not seem related to the database, just return "I don't know" as the answer.
103
+ Before you execute the query, tell us why you are executing it and what you expect to find briefly.
104
+ Only use the following tables:
105
+ {database_schema}
106
+ """
107
+
108
+ # Create a full prompt template for the agent using the system message and placeholders
109
+ full_prompt = ChatPromptTemplate.from_messages(
110
+ [
111
+ ("system", system_message),
112
+ ("human", '{input}'),
113
+ MessagesPlaceholder("agent_scratchpad")
114
+ ]
115
+ )
116
+
117
+ # Initialize the ChatOpenAI model with the extracted configuration
118
+ llm = ChatOpenAI(
119
+ openai_api_base=endpoint,
120
+ openai_api_key=api_key,
121
+ model="gpt-4o",
122
+ streaming=False # Explicitly disabling streaming
123
+ )
124
+
125
+ # Create the SQL agent using the ChatOpenAI model, database, and prompt template
126
+ sqlite_agent = create_sql_agent(
127
+ llm=llm,
128
+ db=db,
129
+ prompt=full_prompt,
130
+ agent_type="openai-tools",
131
+ agent_executor_kwargs={'handle_parsing_errors': True},
132
+ max_iterations=5,
133
+ verbose=True
134
+ )
135
+ #### Let's convert the sql agent into a tool that our fin agent can use.
136
+
137
+ @tool
138
+ def sql_tool(user_input):
139
+ """
140
+ Gathers information regarding purchases, transactions, returns, refunds, etc.
141
+ Executes a SQL query using the sqlite_agent and returns the result.
142
+ Args:
143
+ user_input (str): a natural language query string explaining what information is required while also providing the necessary details to get the information.
144
+ Returns:
145
+ str: The result of the SQL query execution. If an error occurs, the exception is returned as a string.
146
+ """
147
+ try:
148
+ # Invoke the sqlite_agent with the user input (SQL query)
149
+ response = sqlite_agent.invoke(user_input)
150
+
151
+ # Extract the output from the response
152
+ prediction = response['output']
153
+
154
+ except Exception as e:
155
+ # If an exception occurs, capture the exception message
156
+ prediction = e
157
+
158
+ # Return the result or the exception message
159
+ return prediction
160
+
161
+ #=================================== RAG TOOL======================================#
162
+ qna_system_message = """
163
+ You are an assistant to a support agent. Your task is to provide relevant information about the Python package Streamlit.
164
+ User input will include the necessary context for you to answer their questions. This context will begin with the token: ###Context.
165
+ The context contains references to specific portions of documents relevant to the user's query, along with source links.
166
+ The source for a context will begin with the token ###Source
167
+ When crafting your response:
168
+ 1. Select only context relevant to answer the question.
169
+ 2. User questions will begin with the token: ###Question.
170
+ 3. If the context provided doesn't answer the question respond with - "I do not have sufficient information to answer that"
171
+ 4. If user asks for product - list all the products that are relevant to his query. If you don't have that product try to cross sell with one of the products we have that is related to what they are interested in.
172
+ You should get information about similar products in the context.
173
+ Please adhere to the following guidelines:
174
+ - Your response should only be about the question asked and nothing else.
175
+ - Answer only using the context provided.
176
+ - Do not mention anything about the context in your final answer.
177
+ - If the answer is not found in the context, it is very very important for you to respond with "I don't know."
178
+ - Always quote the source when you use the context. Cite the relevant source at the end of your response under the section - Source:
179
+ - Do not make up sources. Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources.
180
+ Here is an example of how to structure your response:
181
+ Answer:
182
+ [Answer]
183
+ Source:
184
+ [Source]
185
+ """
186
+
187
+ qna_user_message_template = """
188
+ ###Context
189
+ Here are some documents and their source that may be relevant to the question mentioned below.
190
+ {context}
191
+ ###Question
192
+ {question}
193
+ """
194
+ # Load the persisted DB
195
+ persisted_vectordb_location = 'policy_docs/policy_docs'
196
+ #Create a Colelction Name
197
+ collection_name = 'policy_docs'
198
+
199
+ embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large')
200
+ # Load the persisted DB
201
+ vector_store = Chroma(
202
+ collection_name=collection_name,
203
+ persist_directory=persisted_vectordb_location,
204
+ embedding_function=embedding_model
205
+
206
+ )
207
+
208
+ retriever = vector_store.as_retriever(
209
+ search_type='similarity',
210
+ search_kwargs={'k': 5}
211
+ )
212
+
213
+
214
+ client = OpenAI(
215
+ api_key=api_key,
216
+ base_url=endpoint
217
+ )
218
+
219
+
220
+ @tool
221
+ def rag(user_input: str) -> str:
222
+
223
+ """
224
+ Answers questions regarding products, and policies using product descriptions, product policies, and general policies of business using RAG.
225
+ Args:
226
+ user_input (str): The input question or query from the user.
227
+ Returns:
228
+ response (str): Return the generated response or an error message if an exception occurs.
229
+ """
230
+
231
+ relevant_document_chunks = retriever.invoke(user_input)
232
+ context_list = [d.page_content + "\n ###Source: " + d.metadata['source'] + "\n\n " for d in relevant_document_chunks]
233
+
234
+ context_for_query = ". ".join(context_list)
235
+
236
+ prompt = [
237
+ {'role':'system', 'content': qna_system_message},
238
+ {'role': 'user', 'content': qna_user_message_template.format(
239
+ context=context_for_query,
240
+ question=user_input
241
+ )
242
+ }
243
+ ]
244
+ try:
245
+ response = client.chat.completions.create(
246
+ model="gpt-4o",
247
+ messages=prompt
248
+ )
249
+
250
+ prediction = response.choices[0].message.content
251
+ except Exception as e:
252
+ prediction = f'Sorry, I encountered the following error: \n {e}'
253
+
254
+
255
+ return prediction
256
+
257
+
258
+ #=================================== Other TOOLS======================================#
259
+
260
+ # Function to log actions
261
+ def log_history(email: str,chat_history: list) -> None:
262
+ # Save the log to the file
263
+ with history_scheduler.lock:
264
+ # Open the log file in append mode
265
+ with history_file.open("a") as f:
266
+ f.write(json.dumps({
267
+ "email": email,
268
+ "chat_history": chat_history,
269
+ "timestamp": str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
270
+ }))
271
+
272
+ #st.write("chat_recorded")
273
+
274
+
275
+ def log_action(customer_id: str,task: str, details: str) -> None:
276
+ # Save the log to the file
277
+ with log_scheduler.lock:
278
+ # Open the log file in append mode
279
+ with log_file.open("a") as f:
280
+ f.write(json.dumps({
281
+ "customer_id": customer_id,
282
+ "task": task,
283
+ "details": details
284
+ }))
285
+
286
+
287
+ @tool
288
+ def register_feedback(intent, customer_id, feedback, rating):
289
+ """
290
+ Logs customer feedback into the feedback log.
291
+ Args:
292
+ intent (str): The category of the support query (e.g., "cancel_order", "get_refund").
293
+ customer_id (int): The unique ID of the customer.
294
+ feedback (str): The feedback provided by the customer.
295
+ rating(int): The rating provided by the customer out of 5
296
+ Returns:
297
+ str: Success message.
298
+ """
299
+ details = {
300
+ "intent": intent,
301
+ "customer_id": customer_id,
302
+ "feedback": feedback,
303
+ "rating": rating
304
+ }
305
+ log_action(customer_id,"register_feedback", details)
306
+ #print("register_feedback success")
307
+ #return "Feedback registered successfully!"
308
+
309
+ @tool
310
+ def defer_to_human(customer_id, query, intent, reason):
311
+ """
312
+ Logs customer details and the reason for deferring to a human agent.
313
+ Args:
314
+ customer_id (int): The unique ID of the customer whose query is being deferred.
315
+ query (str): The customer's query or issue that needs human intervention.
316
+ reason (str): The reason why the query cannot be resolved by the chatbot.
317
+ Returns:
318
+ str: Success message indicating the deferral was logged.
319
+ """
320
+
321
+ details = {
322
+ "customer_id": customer_id,
323
+ "query": query,
324
+ "reason": reason,
325
+ "intent": intent
326
+ }
327
+
328
+ log_action(customer_id,"defer_to_human", details)
329
+ #return "Case deferred to human agent and logged successfully!"
330
+
331
+
332
+ @tool
333
+ def days_since(delivered_date: str) ->str:
334
+ """
335
+ Calculates the number of days since the product was delivered. This helps in determining whether the product is within return period or not.
336
+ Args:
337
+ delivered_date (str): The date when the product was delivered in the format 'YYYY-MM-DD'.
338
+ """
339
+ try:
340
+ # Convert the delivered_date string to a datetime object
341
+ delivered_date = datetime.strptime(delivered_date, '%Y-%m-%d')
342
+ today = datetime.today()
343
+
344
+ # Calculate the difference in days
345
+ days_difference = (today - delivered_date).days
346
+
347
+ return str(days_difference)
348
+ except ValueError as e:
349
+ return f"Error: {e}"
350
+
351
+ def build_prompt(df):
352
+
353
+ system_message = f"""
354
+
355
+ You are an intelligent e-commerce chatbot designed to assist users with pre-order and post-order queries. Your job is to
356
+
357
+ Gather necessary information from the user to help them with their query.
358
+ If at any point you cannot determine the next steps - defer to human. you do not have clearance to go beyond the scope the following flow.
359
+ Do not provide sql inputs to the sql tool - you only need to ask in natural language what information you need.
360
+ You are only allowed to provide information relevant to the particular customer and the customer information is provided below. you can provide information of this customer only. Following is the information about the customer from the last 2 weeks:
361
+
362
+ {df}
363
+
364
+ If this information is not enough to answer question, identify the customer from data above and fetch necessary information usign the sql_tool or rag tool - do not fetch information of other customers.
365
+ use the details provided in the above file to fetch information from sql tool - like customer id, email and phone. Refrain from asking customers details unless necessary.
366
+ If customer asks about a product, you should act as a sales representative and help them understand the product as much as possible and provide all the necessary information for them. You should also provide them the link to the product which you can get from the source of the information.
367
+ If a customer asks a query about a policy, be grounded to the context provided to you. if at any point you don't the right thing to say, politely tell the customer that you are not the right person to answer this and defer it to a human.
368
+ Any time you defer it to a human, you should tell the customer why you did it in a polite manner.
369
+ MANDATORY STEP:
370
+ After helping the customer with their concern,
371
+ - Ask if the customer needs help with anything else. If they ask for anything from the above list help them and along with that,
372
+ 1. Ask for their feedback and rating out of 5.
373
+ 2. then, Use the `register_feedback` tool to log it. - you MUST ask customer feedback along with asking customer what else they need help with.
374
+ 3. After receving customer feedback exit the chat by responding with 'Bye'.
375
+
376
+ ---
377
+ ### **Handling Out-of-Scope Queries:**
378
+ If the user's query, at any point is not covered by the workflows above:
379
+ - Respond:
380
+ > "This is beyond my skill. Let me connect you to a customer service agent" and get necessary details from the customer and use the defer_to_human tool.
381
+ - Get customer feedback and rating out of 5.
382
+ - After getting feedback, end the conversation by saying 'Bye'.
383
+ ---
384
+ ### **IMPORTANT Notes for the Model:**
385
+ - Always fetch additional required details from the database and do not blindly believe details provided by the customer like customer id, email and phone number. You should get the customer id from the system prompt. Cross check with the database and stay loyal to the database.
386
+ - Be empathetic to the customer but loyal to the instructions provided to you. Try to deescalate a situation before deferring it to human and defer to human only once.
387
+ - Always aim to minimize the number of questions asked by retrieving as much information as possible from `sql_tool` and `rag` tool.
388
+ - Follow the exact workflows for each query category.
389
+ - You will always confirm the order id even if the customer has only one order before you fetch any details.
390
+ """
391
+
392
+ #st.write(system_message)
393
+ prompt = ChatPromptTemplate.from_messages([
394
+ ("system", system_message),
395
+ ("human", "{input}"),
396
+ ("placeholder", "{agent_scratchpad}"),
397
+ ])
398
+
399
+ return prompt
400
+
401
+
402
+ #===============================================Streamlit=========================================#
403
+
404
+
405
+ def login_page():
406
+ st.title("Login Page")
407
+
408
+ email = st.text_input("Email")
409
+ password = st.text_input("Password", type="password")
410
+
411
+ login_button = st.button("Login")
412
+
413
+ if login_button:
414
+ if authenticate_user(email, password):
415
+ st.session_state.logged_in = True
416
+ st.session_state.email = email
417
+ st.success("Login successful! Redirecting to Chatbot...")
418
+ st.rerun()
419
+ else:
420
+ st.error("Invalid email or password.")
421
+
422
+ def authenticate_user(email, phone):
423
+ connection = sqlite3.connect("ecomm.db") # Replace with your .db file path
424
+ cursor = connection.cursor()
425
+
426
+ query = "SELECT first_name FROM customers WHERE email = ? AND phone = ?"
427
+ cursor.execute(query, (email, phone))
428
+ user = cursor.fetchone()
429
+
430
+ if user:
431
+ return True # Login successful
432
+ return False # Login failed
433
+
434
+ ### Prefetch details
435
+
436
+ def fetch_details(email):
437
+ try:
438
+
439
+ # Connect to the SQLite database
440
+ connection = sqlite3.connect("ecomm.db") # Replace with your .db file path
441
+ cursor = connection.cursor()
442
+
443
+ query = f"""
444
+ SELECT
445
+ c.customer_id,
446
+ c.first_name || ' ' || c.last_name AS customer_name,
447
+ c.email,
448
+ c.phone,
449
+ c.address AS customer_address,
450
+ o.order_id,
451
+ o.order_date,
452
+ o.status AS order_status,
453
+ o.price AS order_price,
454
+ p.name AS product_name,
455
+ p.price AS product_price,
456
+ i.invoice_date,
457
+ i.amount AS invoice_amount,
458
+ i.invoice_url,
459
+ s.delivery_date,
460
+ s.shipping_status,
461
+ s.shipping_address,
462
+ r.refund_amount,
463
+ r.refund_status
464
+ FROM Customers c
465
+ LEFT JOIN Orders o ON c.customer_id = o.customer_id
466
+ LEFT JOIN Products p ON o.product_id = p.product_id
467
+ LEFT JOIN Invoices i ON o.order_id = i.order_id
468
+ LEFT JOIN Shipping s ON o.order_id = s.order_id
469
+ LEFT JOIN Refund r ON o.order_id = r.order_id
470
+ WHERE o.order_date >= datetime('now', '-30 days')
471
+ AND c.email = ?
472
+ ORDER BY o.order_date DESC;
473
+ """
474
+
475
+ cursor.execute(query, (email,))
476
+ columns = [description[0] for description in cursor.description] # Extract column names
477
+ results = cursor.fetchall() # Fetch all rows
478
+ #st.write(results)
479
+ # Convert results into a list of dictionaries
480
+ details = [dict(zip(columns, row)) for row in results]
481
+ #st.write(details)
482
+ return str(details).replace("{","/").replace("}","/")
483
+
484
+ except Exception as e:
485
+ st.write(f"Error: {e}")
486
+ finally:
487
+ # Close the connection
488
+ if connection:
489
+ cursor.close()
490
+ connection.close()
491
+
492
+ # Function to process user input and generate a chatbot response
493
+
494
+ def chatbot_interface():
495
+ st.title("E-Commerce Chatbot")
496
+
497
+ if 'conversation_history' not in st.session_state:
498
+ st.session_state.conversation_history = [{"role": "assistant", "content": "welcome! I am Raha, how can I help you on this beautiful day?"}]
499
+
500
+
501
+ details = fetch_details(st.session_state.email)
502
+ # st.write(details)
503
+ prompt = build_prompt(details)
504
+ tools = []
505
+ #[sql_tool,defer_to_human, rag, register_feedback, days_since]
506
+
507
+
508
+ chatbot = ChatOpenAI(
509
+ openai_api_base=endpoint,
510
+ openai_api_key=api_key,
511
+ model="gpt-4o",
512
+ streaming=False, # Explicitly disabling streaming
513
+ temperature=0
514
+ )
515
+
516
+ try:
517
+ st.write("Attempting direct LLM test...")
518
+ test_response = chatbot.invoke("Hello, can you hear me?")
519
+ st.success(f"Direct LLM Test OK: Received response.")
520
+ # Optionally display part of the response if needed for confirmation
521
+ # st.write(test_response.content[:100])
522
+ except Exception as e:
523
+ st.error(f"Direct LLM Test FAILED: {e}")
524
+ st.error("The basic connection to the LLM endpoint might be failing. Check API Key, Endpoint URL, and Network.")
525
+ # You might want to stop execution here if the basic test fails
526
+ st.stop()
527
+
528
+ agent = create_tool_calling_agent(chatbot, tools, prompt)
529
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
530
+
531
+
532
+
533
+ # Display chat messages from history on app rerun
534
+ for message in st.session_state.conversation_history:
535
+ with st.chat_message(message["role"]):
536
+ st.markdown(message["content"])
537
+
538
+ # React to user input
539
+ if user_input := st.chat_input("You: ", key="chat_input"):
540
+ # Display user message in chat message container
541
+ st.chat_message("user").markdown(user_input)
542
+ with st.spinner("Processing..."):
543
+
544
+ # Add user message to conversation history
545
+ st.session_state.conversation_history.append({"role": "user", "content": user_input})
546
+
547
+ conversation_input = "\n".join(
548
+ [f"{turn['role'].capitalize()}: {turn['content']}" for turn in st.session_state.conversation_history]
549
+ )
550
+
551
+ try:
552
+ # Pass the history to the agent
553
+ response = agent_executor.invoke({"input": conversation_input})
554
+
555
+ # Add the chatbot's response to the history
556
+ chatbot_response = response['output']
557
+ st.session_state.conversation_history.append({"role": "assistant", "content": chatbot_response})
558
+ # Check if the assistant's response contains "exit"
559
+ if "bye" in chatbot_response.lower():
560
+ log_history(st.session_state.email,st.session_state.conversation_history)
561
+
562
+ # Display the chatbot's response
563
+ with st.chat_message("assistant"):
564
+ st.markdown(chatbot_response)
565
+
566
+ except ValueError as ve:
567
+ if "No generation chunks were returned" in str(ve):
568
+ st.error(f"Agent Error: Failed to get a response from the LLM during agent execution (ValueError: {ve}).")
569
+ st.error("This often indicates an issue with the custom API endpoint (compatibility, content filtering, or error) or network problems.")
570
+ st.error("Check the Hugging Face Space logs for more details, especially network errors or messages related to content policy.")
571
+ # Log the error for debugging
572
+ print(f"Caught ValueError: {ve}") # This will print to HF Space logs
573
+ # Optionally add the error to chat history for visibility
574
+ st.session_state.conversation_history.append({"role": "assistant", "content": f"Sorry, I encountered an internal error (ValueError: No generation chunks). Please check logs or try again later."})
575
+ with st.chat_message("assistant"):
576
+ st.markdown("Sorry, I encountered an internal error (ValueError: No generation chunks). Please check logs or try again later.")
577
+
578
+ else:
579
+ # Handle other ValueErrors if necessary
580
+ st.error(f"Agent Error: An unexpected ValueError occurred: {ve}")
581
+ print(f"Caught other ValueError: {ve}")
582
+ st.session_state.conversation_history.append({"role": "assistant", "content": f"Sorry, I encountered an unexpected internal error (ValueError)."})
583
+ with st.chat_message("assistant"):
584
+ st.markdown("Sorry, I encountered an unexpected internal error (ValueError).")
585
+
586
+ except Exception as e:
587
+ st.error(f"Agent Error: An unexpected error occurred: {e}")
588
+ st.error("Check Hugging Face Space logs for the full traceback.")
589
+ # Log the full error and traceback for debugging
590
+ import traceback
591
+ print(f"Caught Exception: {e}")
592
+ print(traceback.format_exc()) # Print full traceback to logs
593
+ # Check if it looks like a content policy error based on the message
594
+ if "policy" in str(e).lower() or "safety" in str(e).lower() or "blocked" in str(e).lower():
595
+ st.warning("The error message suggests the request might have been blocked by a content policy.")
596
+ st.session_state.conversation_history.append({"role": "assistant", "content": f"Sorry, my response might have been blocked by a content policy. Error: {e}"})
597
+ with st.chat_message("assistant"):
598
+ st.markdown(f"Sorry, my response might have been blocked by a content policy. Error: {e}")
599
+ else:
600
+ st.session_state.conversation_history.append({"role": "assistant", "content": f"Sorry, I encountered an unexpected error: {e}"})
601
+ with st.chat_message("assistant"):
602
+ st.markdown(f"Sorry, I encountered an unexpected error: {e}")
603
+
604
+ def main():
605
+ # Check if the user is logged in
606
+ if "logged_in" in st.session_state and st.session_state["logged_in"]:
607
+ # Show chatbot page if logged in
608
+ chatbot_interface()
609
+ else:
610
+ # Show login page if not logged in
611
+ login_page()
612
+
613
+ if __name__ == "__main__":
614
+ main()