Spaces:
Running
Running
File size: 4,330 Bytes
4421377 67ac5b4 4421377 480d3e2 4421377 480d3e2 4421377 480d3e2 67ac5b4 4421377 480d3e2 4421377 67ac5b4 4421377 67ac5b4 4421377 67ac5b4 480d3e2 67ac5b4 480d3e2 67ac5b4 480d3e2 67ac5b4 4421377 67ac5b4 4421377 67ac5b4 480d3e2 67ac5b4 480d3e2 4421377 480d3e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import streamlit as st
import pandas as pd
import os
from datetime import datetime
try:
import google.generativeai as genai
GEMINI_AVAILABLE = True
except ImportError:
GEMINI_AVAILABLE = False
class ChatbotManager:
def __init__(self):
if GEMINI_AVAILABLE and 'GEMINI_API_KEY' in os.environ:
genai.configure(api_key=os.environ['GEMINI_API_KEY'])
self.model = genai.GenerativeModel('gemini-pro')
else:
self.model = None
if 'uploaded_df' not in st.session_state:
st.session_state.uploaded_df = None
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
def render_chat_interface(self):
"""Render the main chat interface"""
st.header("π Data Analysis Chatbot")
if not GEMINI_AVAILABLE:
st.warning("Gemini API not available - running in limited mode")
# File upload section
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if uploaded_file is not None:
self._process_uploaded_file(uploaded_file)
# Chat interface
if st.session_state.uploaded_df is not None:
self._render_chat_window()
def _process_uploaded_file(self, uploaded_file):
"""Process the uploaded CSV file"""
try:
df = pd.read_csv(uploaded_file)
st.session_state.uploaded_df = df
st.success("Data successfully loaded!")
with st.expander("View Data Preview"):
st.dataframe(df.head())
# Initial analysis
if self.model:
initial_prompt = f"Briefly describe this dataset with {len(df)} rows and {len(df.columns)} columns."
response = self._generate_response(initial_prompt)
st.session_state.chat_history.append({
"role": "assistant",
"content": response
})
except Exception as e:
st.error(f"Error processing file: {str(e)}")
def _render_chat_window(self):
"""Render the chat conversation window"""
st.subheader("Chat About Your Data")
# Display chat history
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input
if prompt := st.chat_input("Ask about your data..."):
self._handle_user_input(prompt)
def _handle_user_input(self, prompt):
"""Handle user input and generate response"""
# Add user message to chat history
st.session_state.chat_history.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Generate and display assistant response
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = self._generate_response(prompt)
st.markdown(response)
# Add assistant response to chat history
st.session_state.chat_history.append({"role": "assistant", "content": response})
def _generate_response(self, prompt: str) -> str:
"""Generate response using available backend"""
df = st.session_state.uploaded_df
if self.model:
# Use Gemini if available
try:
data_summary = f"Data: {len(df)} rows, columns: {', '.join(df.columns)}"
full_prompt = f"{data_summary}\n\nUser question: {prompt}"
response = self.model.generate_content(full_prompt)
return response.text
except Exception as e:
return f"Gemini error: {str(e)}"
else:
# Fallback basic analysis
if "summary" in prompt.lower():
return f"Basic summary:\n{df.describe().to_markdown()}"
elif "columns" in prompt.lower():
return f"Columns: {', '.join(df.columns)}"
else:
return "I can provide basic info about columns and summary statistics." |