Spaces:
Sleeping
Sleeping
Update LLM.py
Browse files
LLM.py
CHANGED
@@ -2,102 +2,86 @@ from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
|
2 |
from langchain_groq import ChatGroq
|
3 |
from langchain_huggingface import ChatHuggingFace
|
4 |
from langchain_huggingface import HuggingFaceEndpoint
|
5 |
-
from dotenv import load_dotenv
|
6 |
-
from langchain.schema.output_parser import StrOutputParser
|
7 |
-
from langchain_huggingface import ChatHuggingFace
|
8 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
9 |
-
import
|
10 |
from huggingface_hub import login
|
|
|
11 |
|
|
|
12 |
load_dotenv()
|
13 |
-
|
14 |
-
login(token=os.environ["HUGGING_FACE_API_KEY"])
|
15 |
os.environ['CURL_CA_BUNDLE'] = ''
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
class Bot():
|
20 |
def __init__(self):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
self.
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
try:
|
35 |
-
llm = ChatGroq(
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
system = "You are a helpful assistant."
|
41 |
-
human = "{text}"
|
42 |
-
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
|
43 |
-
|
44 |
chain = prompt | llm | StrOutputParser()
|
45 |
return chain.invoke({"text": given_prompt})
|
46 |
-
|
47 |
except Exception as e:
|
48 |
-
return f"
|
49 |
|
50 |
-
def call_hf(self,model, temp
|
51 |
try:
|
52 |
-
llm = HuggingFaceEndpoint(
|
53 |
-
repo_id=model,
|
54 |
-
temperature=temp
|
55 |
-
)
|
56 |
-
|
57 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
58 |
-
|
59 |
template = """
|
60 |
-
|
61 |
-
|
62 |
-
User: {query}
|
63 |
-
|
64 |
-
Answer:
|
65 |
-
"""
|
66 |
|
67 |
-
|
68 |
-
template=template,
|
69 |
-
input_variables=["query"]
|
70 |
-
)
|
71 |
-
|
72 |
-
chain =prompt | chat | StrOutputParser()
|
73 |
|
|
|
|
|
|
|
|
|
74 |
return chain.invoke({"query": given_prompt})
|
75 |
-
|
76 |
except Exception as e:
|
77 |
-
return f"
|
78 |
|
79 |
-
def call_google(self,model, temp=0.7, given_prompt
|
80 |
try:
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
prompt = ChatPromptTemplate.from_messages([("human", human)])
|
85 |
-
chain = prompt | model | StrOutputParser()
|
86 |
return chain.invoke({"text": given_prompt})
|
87 |
except Exception as e:
|
88 |
-
return f"
|
89 |
-
|
90 |
-
def response(self, model, prompt="Hi", temprature = 0.7):
|
91 |
-
if model in self.groq_models:
|
92 |
-
res_show = self.call_groq(temp = temprature, given_prompt = prompt, model= model)
|
93 |
-
elif model in self.hf_models:
|
94 |
-
res_show = self.call_hf(given_prompt = prompt, temp = temprature, model = model)
|
95 |
-
elif model in self.google_models:
|
96 |
-
res_show = self.call_google(given_prompt = prompt, temp = temprature, model = model)
|
97 |
-
else:
|
98 |
-
return "Sorry! App not working properly"
|
99 |
-
return res_show
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
|
103 |
|
|
|
2 |
from langchain_groq import ChatGroq
|
3 |
from langchain_huggingface import ChatHuggingFace
|
4 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
|
|
|
|
5 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
6 |
+
from dotenv import load_dotenv
|
7 |
from huggingface_hub import login
|
8 |
+
import os
|
9 |
|
10 |
+
# Load environment variables and authenticate
|
11 |
load_dotenv()
|
12 |
+
login(token=os.environ.get("HUGGING_FACE_API_KEY", ""))
|
|
|
13 |
os.environ['CURL_CA_BUNDLE'] = ''
|
14 |
|
15 |
+
class Bot:
|
|
|
|
|
16 |
def __init__(self):
|
17 |
+
# Updated model lists: Remove gated or unsupported IDs .groq_models = [
|
18 |
+
'gemma-7b-it',
|
19 |
+
'llama3-70b-8192',
|
20 |
+
'llama3-8b-8192',
|
21 |
+
'mixtral-8x22b'
|
22 |
+
]
|
23 |
+
self.hf_models = [
|
24 |
+
"01-ai/Yi-1.5-34B-Chat",
|
25 |
+
"google/gemma-1.1-2b-it"
|
26 |
+
]
|
27 |
+
# Use supported Google GenAI model names
|
28 |
+
self.google_models = [
|
29 |
+
"gemini-pro",
|
30 |
+
"gemini-pro-vision"
|
31 |
+
]
|
32 |
+
# Master list for sampling (only include accessible models)
|
33 |
+
self.models = self.google_models + self.hf_models + self.groq_models
|
34 |
+
|
35 |
+
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
|
36 |
try:
|
37 |
+
llm = ChatGroq(model=model, temperature=temp)
|
38 |
+
prompt = ChatPromptTemplate.from_messages([
|
39 |
+
("system", "You are a helpful assistant."),
|
40 |
+
("human", "{text}")
|
41 |
+
])
|
|
|
|
|
|
|
|
|
42 |
chain = prompt | llm | StrOutputParser()
|
43 |
return chain.invoke({"text": given_prompt})
|
|
|
44 |
except Exception as e:
|
45 |
+
return f"⚠️ [Groq:{model}] {str(e)}"
|
46 |
|
47 |
+
def call_hf(self, model, temp=0.7, given_prompt="Hi"):
|
48 |
try:
|
49 |
+
llm = HuggingFaceEndpoint(repo_id=model, temperature=temp)
|
|
|
|
|
|
|
|
|
50 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
|
|
51 |
template = """
|
52 |
+
You are a helpful assistant.
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
User: {query}
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
Answer:
|
57 |
+
"""
|
58 |
+
prompt = PromptTemplate(template=template, input_variables=["query"])
|
59 |
+
chain = prompt | chat | StrOutputParser()
|
60 |
return chain.invoke({"query": given_prompt})
|
|
|
61 |
except Exception as e:
|
62 |
+
return f"⚠️ [HF:{model}] {str(e)}"
|
63 |
|
64 |
+
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
65 |
try:
|
66 |
+
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
|
67 |
+
prompt = ChatPromptTemplate.from_messages([("system", "You are a helpful assistant."), ("human", "{text}")])
|
68 |
+
chain = prompt | gm | StrOutputParser()
|
|
|
|
|
69 |
return chain.invoke({"text": given_prompt})
|
70 |
except Exception as e:
|
71 |
+
return f"⚠️ [Google:{model}] {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
+
def response(self, model, prompt="Hi", temperature=0.7):
|
74 |
+
# Route to the correct provider and catch errors
|
75 |
+
try:
|
76 |
+
if model in self.groq_models:
|
77 |
+
return self.call_groq(model, temp=temperature, given_prompt=prompt)
|
78 |
+
if model in self.hf_models:
|
79 |
+
return self.call_hf(model, temp=temperature, given_prompt=prompt)
|
80 |
+
if model in self.google_models:
|
81 |
+
return self.call_google(model, temp=temperature, given_prompt=prompt)
|
82 |
+
return f"❌ Unsupported model: {model}"
|
83 |
+
except Exception as e:
|
84 |
+
return f"⚠️ Skipping `{model}` due to error: {str(e)}"
|
85 |
|
86 |
|
87 |
|