Spaces:
Running
Running
Update chatbot.py
Browse files- chatbot.py +48 -75
chatbot.py
CHANGED
@@ -1,70 +1,58 @@
|
|
1 |
-
|
|
|
2 |
import cohere
|
3 |
import requests
|
4 |
from io import BytesIO
|
5 |
-
import base64
|
6 |
-
|
7 |
-
|
8 |
-
# Configuration de la clé API
|
9 |
-
COHERE_API_KEY = "moZJbgxiW9cW8Wqo0ecce0pa84uf3eT6F2oL1whB" # Remplace par ta clé API
|
10 |
-
co = cohere.Client(COHERE_API_KEY)
|
11 |
-
|
12 |
-
# Initialisation du contexte de chat
|
13 |
-
if "chat_history" not in st.session_state:
|
14 |
-
st.session_state.chat_history = []
|
15 |
-
|
16 |
-
st.set_page_config(page_title="Chatbot Cohere", page_icon="💬")
|
17 |
-
st.title("💬 Chat avec Cohere (Command R+)")
|
18 |
-
|
19 |
-
# Boîte d'entrée utilisateur
|
20 |
-
user_input = st.chat_input("Pose ta question...")
|
21 |
-
|
22 |
-
# Affichage de l'historique visuel
|
23 |
-
for msg in st.session_state.chat_history:
|
24 |
-
with st.chat_message("user" if msg["role"] == "USER" else "assistant"):
|
25 |
-
st.markdown(msg["text"])
|
26 |
-
|
27 |
-
# Quand l'utilisateur envoie un message
|
28 |
-
if user_input:
|
29 |
-
with st.chat_message("user"):
|
30 |
-
st.markdown(user_input)
|
31 |
-
|
32 |
-
st.session_state.chat_history.append({"role": "USER", "text": user_input})
|
33 |
-
|
34 |
-
# Création de l'historique pour Cohere
|
35 |
-
cohere_history = [
|
36 |
-
{"role": msg["role"], "message": msg["text"]}
|
37 |
-
for msg in st.session_state.chat_history
|
38 |
-
if msg["role"] in ["USER", "CHATBOT"]
|
39 |
-
]
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
61 |
headers = {
|
62 |
-
"xi-api-key":
|
63 |
"Content-Type": "application/json"
|
64 |
}
|
65 |
|
66 |
data = {
|
67 |
-
"text":
|
68 |
"model_id": "eleven_multilingual_v2",
|
69 |
"voice_settings": {
|
70 |
"stability": 0.5,
|
@@ -72,25 +60,10 @@ if user_input:
|
|
72 |
}
|
73 |
}
|
74 |
|
75 |
-
|
76 |
-
f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}",
|
77 |
headers=headers,
|
78 |
json=data
|
79 |
)
|
80 |
|
81 |
-
|
82 |
-
with st.chat_message("assistant"):
|
83 |
-
st.markdown(reply)
|
84 |
-
st.markdown(f"🔊 **Résumé vocal :** _{summary}_")
|
85 |
-
|
86 |
-
if response_audio.status_code == 200:
|
87 |
-
audio_bytes = BytesIO(response_audio.content)
|
88 |
-
st.audio(audio_bytes, format="audio/mp3")
|
89 |
-
else:
|
90 |
-
st.warning("Erreur dans la synthèse vocale (ElevenLabs).")
|
91 |
-
|
92 |
-
st.session_state.chat_history.append({"role": "CHATBOT", "text": reply})
|
93 |
-
|
94 |
-
except Exception as e:
|
95 |
-
st.error(f"Erreur : {str(e)}")
|
96 |
-
|
|
|
1 |
+
# chatbot.py
|
2 |
+
|
3 |
import cohere
|
4 |
import requests
|
5 |
from io import BytesIO
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
class InflationChatbot:
|
8 |
+
def __init__(self, cohere_api_key, eleven_api_key):
|
9 |
+
self.co = cohere.Client(cohere_api_key)
|
10 |
+
self.eleven_api_key = eleven_api_key
|
11 |
+
self.voice_id = "TxGEqnHWrfWFTfGW9XjX" # Antoine (voix française)
|
12 |
+
|
13 |
+
def ask(self, user_input, chat_history=[]):
|
14 |
+
try:
|
15 |
+
# Reformatage de l’historique
|
16 |
+
cohere_history = [
|
17 |
+
{"role": msg["role"], "message": msg["text"]}
|
18 |
+
for msg in chat_history
|
19 |
+
if msg["role"] in ["USER", "CHATBOT"]
|
20 |
+
]
|
21 |
+
|
22 |
+
# Réponse du modèle Cohere
|
23 |
+
response = self.co.chat(
|
24 |
+
message=user_input,
|
25 |
+
chat_history=cohere_history,
|
26 |
+
model="command-r-plus",
|
27 |
+
temperature=0.7,
|
28 |
+
max_tokens=300
|
29 |
+
)
|
30 |
+
reply = response.text.strip()
|
31 |
+
|
32 |
+
# Résumé
|
33 |
+
summary_prompt = f"Résume en une seule phrase simple et claire en français ce texte : {reply}"
|
34 |
+
summary_response = self.co.generate(prompt=summary_prompt, max_tokens=100)
|
35 |
+
summary = summary_response.generations[0].text.strip()
|
36 |
+
|
37 |
+
# Synthèse vocale via ElevenLabs
|
38 |
+
audio = self.text_to_speech(summary)
|
39 |
+
|
40 |
+
return {
|
41 |
+
"reply": reply,
|
42 |
+
"summary": summary,
|
43 |
+
"audio": audio
|
44 |
+
}
|
45 |
+
except Exception as e:
|
46 |
+
return {"error": str(e)}
|
47 |
|
48 |
+
def text_to_speech(self, text):
|
49 |
headers = {
|
50 |
+
"xi-api-key": self.eleven_api_key,
|
51 |
"Content-Type": "application/json"
|
52 |
}
|
53 |
|
54 |
data = {
|
55 |
+
"text": text,
|
56 |
"model_id": "eleven_multilingual_v2",
|
57 |
"voice_settings": {
|
58 |
"stability": 0.5,
|
|
|
60 |
}
|
61 |
}
|
62 |
|
63 |
+
response = requests.post(
|
64 |
+
f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}",
|
65 |
headers=headers,
|
66 |
json=data
|
67 |
)
|
68 |
|
69 |
+
return BytesIO(response.content) if response.status_code == 200 else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|