Spaces:
Sleeping
Sleeping
Update LLM.py
Browse files
LLM.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
from langchain_core.output_parsers import StrOutputParser
|
2 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
3 |
from langchain_groq import ChatGroq
|
4 |
-
from
|
5 |
-
from
|
6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
from dotenv import load_dotenv
|
8 |
from huggingface_hub import login
|
@@ -15,28 +15,18 @@ os.environ['CURL_CA_BUNDLE'] = ''
|
|
15 |
|
16 |
class Bot:
|
17 |
def __init__(self):
|
18 |
-
#
|
19 |
-
self.
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
'mixtral-8x22b'
|
24 |
-
]
|
25 |
-
self.hf_models = [
|
26 |
-
"01-ai/Yi-1.5-34B-Chat",
|
27 |
-
"google/gemma-1.1-2b-it"
|
28 |
-
]
|
29 |
-
# Use supported Google GenAI model names
|
30 |
-
self.google_models = [
|
31 |
-
"gemini-pro",
|
32 |
-
"gemini-pro-vision"
|
33 |
-
]
|
34 |
-
# Master list for sampling (only include accessible models)
|
35 |
-
self.models = self.google_models + self.hf_models + self.groq_models
|
36 |
|
37 |
-
|
|
|
|
|
|
|
38 |
try:
|
39 |
-
llm =
|
40 |
prompt = ChatPromptTemplate.from_messages([
|
41 |
("system", "You are a helpful assistant."),
|
42 |
("human", "{text}")
|
@@ -44,47 +34,56 @@ class Bot:
|
|
44 |
chain = prompt | llm | StrOutputParser()
|
45 |
return chain.invoke({"text": given_prompt})
|
46 |
except Exception as e:
|
47 |
-
return f"⚠️ [
|
48 |
|
49 |
-
def
|
50 |
try:
|
51 |
-
llm =
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
Answer:
|
59 |
-
"""
|
60 |
-
prompt = PromptTemplate(template=template, input_variables=["query"])
|
61 |
-
chain = prompt | chat | StrOutputParser()
|
62 |
-
return chain.invoke({"query": given_prompt})
|
63 |
except Exception as e:
|
64 |
-
return f"⚠️ [
|
65 |
|
66 |
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
67 |
try:
|
68 |
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
|
69 |
-
prompt = ChatPromptTemplate.from_messages([
|
|
|
|
|
|
|
70 |
chain = prompt | gm | StrOutputParser()
|
71 |
return chain.invoke({"text": given_prompt})
|
72 |
except Exception as e:
|
73 |
return f"⚠️ [Google:{model}] {str(e)}"
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
def response(self, model, prompt="Hi", temperature=0.7):
|
76 |
-
# Route to the correct provider and catch errors
|
77 |
try:
|
78 |
-
if model in self.
|
79 |
-
return self.
|
80 |
-
|
81 |
-
return self.
|
82 |
-
|
83 |
return self.call_google(model, temp=temperature, given_prompt=prompt)
|
84 |
-
|
|
|
|
|
|
|
85 |
except Exception as e:
|
86 |
return f"⚠️ Skipping `{model}` due to error: {str(e)}"
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
1 |
from langchain_core.output_parsers import StrOutputParser
|
2 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
3 |
from langchain_groq import ChatGroq
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain_community.chat_models import ChatAnthropic
|
6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
from dotenv import load_dotenv
|
8 |
from huggingface_hub import login
|
|
|
15 |
|
16 |
class Bot:
|
17 |
def __init__(self):
|
18 |
+
# Verified, stable models
|
19 |
+
self.openai_models = ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"]
|
20 |
+
self.anthropic_models = ["claude-3-opus-20240229", "claude-3-sonnet-20240229"]
|
21 |
+
self.google_models = ["gemini-pro"]
|
22 |
+
self.groq_models = ["llama3-8b-8192", "llama3-70b-8192"] # Keep only working ones
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
# Final model list
|
25 |
+
self.models = self.openai_models + self.anthropic_models + self.google_models + self.groq_models
|
26 |
+
|
27 |
+
def call_openai(self, model, temp=0.7, given_prompt="Hi"):
|
28 |
try:
|
29 |
+
llm = ChatOpenAI(model=model, temperature=temp)
|
30 |
prompt = ChatPromptTemplate.from_messages([
|
31 |
("system", "You are a helpful assistant."),
|
32 |
("human", "{text}")
|
|
|
34 |
chain = prompt | llm | StrOutputParser()
|
35 |
return chain.invoke({"text": given_prompt})
|
36 |
except Exception as e:
|
37 |
+
return f"⚠️ [OpenAI:{model}] {str(e)}"
|
38 |
|
39 |
+
def call_anthropic(self, model, temp=0.7, given_prompt="Hi"):
|
40 |
try:
|
41 |
+
llm = ChatAnthropic(model=model, temperature=temp)
|
42 |
+
prompt = ChatPromptTemplate.from_messages([
|
43 |
+
("system", "You are a helpful assistant."),
|
44 |
+
("human", "{text}")
|
45 |
+
])
|
46 |
+
chain = prompt | llm | StrOutputParser()
|
47 |
+
return chain.invoke({"text": given_prompt})
|
|
|
|
|
|
|
|
|
|
|
48 |
except Exception as e:
|
49 |
+
return f"⚠️ [Anthropic:{model}] {str(e)}"
|
50 |
|
51 |
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
52 |
try:
|
53 |
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
|
54 |
+
prompt = ChatPromptTemplate.from_messages([
|
55 |
+
("system", "You are a helpful assistant."),
|
56 |
+
("human", "{text}")
|
57 |
+
])
|
58 |
chain = prompt | gm | StrOutputParser()
|
59 |
return chain.invoke({"text": given_prompt})
|
60 |
except Exception as e:
|
61 |
return f"⚠️ [Google:{model}] {str(e)}"
|
62 |
|
63 |
+
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
|
64 |
+
try:
|
65 |
+
llm = ChatGroq(model=model, temperature=temp)
|
66 |
+
prompt = ChatPromptTemplate.from_messages([
|
67 |
+
("system", "You are a helpful assistant."),
|
68 |
+
("human", "{text}")
|
69 |
+
])
|
70 |
+
chain = prompt | llm | StrOutputParser()
|
71 |
+
return chain.invoke({"text": given_prompt})
|
72 |
+
except Exception as e:
|
73 |
+
return f"⚠️ [Groq:{model}] {str(e)}"
|
74 |
+
|
75 |
def response(self, model, prompt="Hi", temperature=0.7):
|
|
|
76 |
try:
|
77 |
+
if model in self.openai_models:
|
78 |
+
return self.call_openai(model, temp=temperature, given_prompt=prompt)
|
79 |
+
elif model in self.anthropic_models:
|
80 |
+
return self.call_anthropic(model, temp=temperature, given_prompt=prompt)
|
81 |
+
elif model in self.google_models:
|
82 |
return self.call_google(model, temp=temperature, given_prompt=prompt)
|
83 |
+
elif model in self.groq_models:
|
84 |
+
return self.call_groq(model, temp=temperature, given_prompt=prompt)
|
85 |
+
else:
|
86 |
+
return f"❌ Unsupported model: {model}"
|
87 |
except Exception as e:
|
88 |
return f"⚠️ Skipping `{model}` due to error: {str(e)}"
|
89 |
|
|
|
|
|
|