Spaces:
Sleeping
Sleeping
Update chatbot.py
Browse files- chatbot.py +61 -98
chatbot.py
CHANGED
@@ -1,123 +1,86 @@
|
|
1 |
# chatbot.py
|
2 |
import streamlit as st
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import
|
6 |
-
import google.generativeai as genai
|
7 |
-
from typing import List, Dict, Optional
|
8 |
|
9 |
class ChatbotManager:
|
10 |
def __init__(self):
|
11 |
-
|
12 |
-
self.
|
|
|
|
|
13 |
self.initialize_chat()
|
14 |
|
15 |
-
def
|
16 |
-
"""
|
17 |
try:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
api_key = st.secrets['GEMINI_API_KEY']
|
24 |
-
except:
|
25 |
-
pass
|
26 |
-
|
27 |
-
if api_key:
|
28 |
-
genai.configure(api_key=api_key)
|
29 |
-
# Updated model name to 'gemini-1.0-pro'
|
30 |
-
self.model = genai.GenerativeModel('gemini-1.0-pro')
|
31 |
-
self.chat = self.model.start_chat(history=[])
|
32 |
-
else:
|
33 |
-
self.model = None
|
34 |
-
self.chat = None
|
35 |
-
st.warning("Gemini API key not found. Using limited functionality mode.")
|
36 |
except Exception as e:
|
37 |
-
st.error(f"
|
38 |
self.model = None
|
39 |
-
self.chat = None
|
40 |
|
41 |
def initialize_chat(self):
|
42 |
-
"""Initialize chat session state
|
43 |
-
if
|
44 |
st.session_state.chat_history = []
|
45 |
-
|
46 |
-
# Initialize Gemini chat if not already done
|
47 |
-
if self.model and not hasattr(self, 'chat'):
|
48 |
-
self.chat = self.model.start_chat(history=[])
|
49 |
|
50 |
def clear_chat(self):
|
51 |
-
"""
|
52 |
st.session_state.chat_history = []
|
53 |
-
if self.model:
|
54 |
-
self.chat = self.model.start_chat(history=[])
|
55 |
st.success("Chat history cleared!")
|
56 |
|
57 |
def add_message(self, role: str, content: str):
|
58 |
-
"""Add a message to
|
59 |
-
st.session_state.chat_history.append({
|
60 |
-
"role": role,
|
61 |
-
"content": content
|
62 |
-
})
|
63 |
|
64 |
def get_chat_history(self) -> List[Dict]:
|
65 |
-
"""
|
66 |
return st.session_state.chat_history
|
67 |
|
68 |
-
def
|
69 |
-
"""Generate
|
|
|
|
|
|
|
70 |
try:
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
83 |
except Exception as e:
|
84 |
return f"⚠️ Error generating response: {str(e)}"
|
85 |
|
86 |
-
def _create_business_prompt(self, user_input: str) -> str:
|
87 |
-
"""Create a detailed prompt for business-related queries"""
|
88 |
-
return f"""You are an expert business advisor AI. Provide detailed, actionable advice in response to the following query.
|
89 |
-
|
90 |
-
Rules:
|
91 |
-
- Always maintain a professional tone
|
92 |
-
- Break complex concepts into simple terms
|
93 |
-
- Provide concrete examples when possible
|
94 |
-
- Structure responses with clear sections when appropriate
|
95 |
-
- Suggest next steps or additional considerations
|
96 |
-
- Never provide financial, legal, or medical advice
|
97 |
-
|
98 |
-
User query: {user_input}
|
99 |
-
|
100 |
-
Please provide a comprehensive response that addresses the user's needs:"""
|
101 |
-
|
102 |
-
def _generate_fallback_response(self, prompt: str) -> str:
|
103 |
-
"""Generate a fallback response when Gemini isn't available"""
|
104 |
-
business_topics = {
|
105 |
-
"strategy": "For business strategy, consider analyzing your market position, competitors, and unique value proposition.",
|
106 |
-
"marketing": "Marketing tips: Focus on your target audience, create valuable content, and measure campaign performance.",
|
107 |
-
"finance": "For financial questions, consult with a qualified financial advisor.",
|
108 |
-
"product": "Product development should start with customer needs validation before building."
|
109 |
-
}
|
110 |
-
|
111 |
-
prompt_lower = prompt.lower()
|
112 |
-
for topic, response in business_topics.items():
|
113 |
-
if topic in prompt_lower:
|
114 |
-
return response
|
115 |
-
|
116 |
-
return "I can provide advice on business strategy, marketing, and product development. Please ask a specific question about one of these areas."
|
117 |
-
|
118 |
def render_chat_interface(self):
|
119 |
-
"""Render the complete chat
|
120 |
-
st.header("💬 AI Business Mentor")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
# Display chat history
|
123 |
for message in self.get_chat_history():
|
@@ -125,21 +88,21 @@ Please provide a comprehensive response that addresses the user's needs:"""
|
|
125 |
st.markdown(message["content"])
|
126 |
|
127 |
# User input
|
128 |
-
if prompt := st.chat_input("Ask about business
|
129 |
self.add_message("user", prompt)
|
130 |
|
131 |
# Display user message immediately
|
132 |
with st.chat_message("user"):
|
133 |
st.markdown(prompt)
|
134 |
|
135 |
-
# Generate and display
|
136 |
with st.chat_message("assistant"):
|
137 |
-
with st.spinner("
|
138 |
-
response = self.
|
139 |
st.markdown(response)
|
140 |
|
141 |
-
# Add
|
142 |
self.add_message("assistant", response)
|
143 |
|
144 |
-
# Auto-
|
145 |
st.rerun()
|
|
|
1 |
# chatbot.py
|
2 |
import streamlit as st
|
3 |
+
from transformers import pipeline, BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
4 |
+
import torch
|
5 |
+
from typing import List, Dict
|
|
|
|
|
6 |
|
7 |
class ChatbotManager:
|
8 |
def __init__(self):
|
9 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
self.model = None
|
11 |
+
self.tokenizer = None
|
12 |
+
self.load_model()
|
13 |
self.initialize_chat()
|
14 |
|
15 |
+
def load_model(self):
|
16 |
+
"""Load Blenderbot model locally"""
|
17 |
try:
|
18 |
+
with st.spinner("Loading AI model (this may take a minute)..."):
|
19 |
+
model_name = "facebook/blenderbot-400M-distill"
|
20 |
+
self.tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
|
21 |
+
self.model = BlenderbotForConditionalGeneration.from_pretrained(model_name).to(self.device)
|
22 |
+
st.success("Model loaded successfully!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
except Exception as e:
|
24 |
+
st.error(f"⚠️ Failed to load model: {str(e)}")
|
25 |
self.model = None
|
|
|
26 |
|
27 |
def initialize_chat(self):
|
28 |
+
"""Initialize chat session state"""
|
29 |
+
if "chat_history" not in st.session_state:
|
30 |
st.session_state.chat_history = []
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def clear_chat(self):
|
33 |
+
"""Reset chat history"""
|
34 |
st.session_state.chat_history = []
|
|
|
|
|
35 |
st.success("Chat history cleared!")
|
36 |
|
37 |
def add_message(self, role: str, content: str):
|
38 |
+
"""Add a message to chat history"""
|
39 |
+
st.session_state.chat_history.append({"role": role, "content": content})
|
|
|
|
|
|
|
40 |
|
41 |
def get_chat_history(self) -> List[Dict]:
|
42 |
+
"""Retrieve chat history"""
|
43 |
return st.session_state.chat_history
|
44 |
|
45 |
+
def generate_response(self, prompt: str) -> str:
|
46 |
+
"""Generate AI response using Blenderbot"""
|
47 |
+
if not self.model:
|
48 |
+
return "Model not loaded. Please try again later."
|
49 |
+
|
50 |
try:
|
51 |
+
# Format prompt with business context
|
52 |
+
business_prompt = f"""You are a professional business advisor. Provide helpful, concise advice on:
|
53 |
+
- Business strategy
|
54 |
+
- Marketing
|
55 |
+
- Product development
|
56 |
+
- Startup growth
|
57 |
+
|
58 |
+
User Question: {prompt}
|
59 |
+
|
60 |
+
Answer:"""
|
61 |
+
|
62 |
+
inputs = self.tokenizer([business_prompt], return_tensors="pt").to(self.device)
|
63 |
+
reply_ids = self.model.generate(**inputs, max_length=200)
|
64 |
+
response = self.tokenizer.decode(reply_ids[0], skip_special_tokens=True)
|
65 |
+
|
66 |
+
return response
|
67 |
except Exception as e:
|
68 |
return f"⚠️ Error generating response: {str(e)}"
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
def render_chat_interface(self):
|
71 |
+
"""Render the complete chat UI"""
|
72 |
+
st.header("💬 AI Business Mentor (Blenderbot)")
|
73 |
+
|
74 |
+
# Sidebar controls
|
75 |
+
with st.sidebar:
|
76 |
+
st.subheader("Settings")
|
77 |
+
if st.button("Clear Chat"):
|
78 |
+
self.clear_chat()
|
79 |
+
st.rerun()
|
80 |
+
|
81 |
+
st.markdown("---")
|
82 |
+
st.caption("Model: facebook/blenderbot-400M-distill")
|
83 |
+
st.caption(f"Device: {self.device.upper()}")
|
84 |
|
85 |
# Display chat history
|
86 |
for message in self.get_chat_history():
|
|
|
88 |
st.markdown(message["content"])
|
89 |
|
90 |
# User input
|
91 |
+
if prompt := st.chat_input("Ask about business..."):
|
92 |
self.add_message("user", prompt)
|
93 |
|
94 |
# Display user message immediately
|
95 |
with st.chat_message("user"):
|
96 |
st.markdown(prompt)
|
97 |
|
98 |
+
# Generate and display AI response
|
99 |
with st.chat_message("assistant"):
|
100 |
+
with st.spinner("Thinking..."):
|
101 |
+
response = self.generate_response(prompt)
|
102 |
st.markdown(response)
|
103 |
|
104 |
+
# Add response to history
|
105 |
self.add_message("assistant", response)
|
106 |
|
107 |
+
# Auto-refresh to show new messages
|
108 |
st.rerun()
|