Spaces:
Sleeping
Sleeping
Update LLM.py
Browse files
LLM.py
CHANGED
@@ -1,101 +1,92 @@
|
|
1 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
2 |
from langchain_groq import ChatGroq
|
3 |
-
from langchain_huggingface import ChatHuggingFace
|
4 |
-
from langchain_huggingface import HuggingFaceEndpoint
|
5 |
-
from dotenv import load_dotenv
|
6 |
-
from langchain.schema.output_parser import StrOutputParser
|
7 |
-
from langchain_huggingface import ChatHuggingFace
|
8 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
9 |
-
import
|
|
|
10 |
from huggingface_hub import login
|
|
|
11 |
|
|
|
12 |
load_dotenv()
|
13 |
-
|
14 |
login(token=os.environ["HUGGING_FACE_API_KEY"])
|
15 |
-
os.environ['CURL_CA_BUNDLE'] = ''
|
16 |
-
|
17 |
-
load_dotenv()
|
18 |
|
19 |
class Bot():
|
20 |
def __init__(self):
|
21 |
-
self.groq_models = ['gemma-7b-it', 'llama3-70b-8192'
|
22 |
-
'llama3-8b-8192', 'mixtral-
|
23 |
-
self.hf_models = ["01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it"
|
24 |
"google/gemma-1.1-7b-it"]
|
25 |
-
self.google_models = ["gemini-1.0-pro", "gemini-1.5-flash"
|
26 |
"gemini-1.5-pro"]
|
27 |
-
self.models = ["gemini-1.0-pro", "gemini-1.5-flash", "gemini-1.5-pro", "01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",\
|
28 |
-
"google/gemma-1.1-7b-it", 'gemma-7b-it', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768']
|
29 |
|
30 |
-
|
|
|
|
|
|
|
31 |
try:
|
32 |
llm = ChatGroq(
|
|
|
33 |
temperature=temp,
|
34 |
-
|
35 |
)
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
chain = prompt | llm | StrOutputParser()
|
42 |
return chain.invoke({"text": given_prompt})
|
43 |
|
44 |
except Exception as e:
|
45 |
-
return f"Error: {str(e)}"
|
46 |
|
47 |
-
def call_hf(self,model, temp
|
48 |
try:
|
49 |
llm = HuggingFaceEndpoint(
|
50 |
repo_id=model,
|
51 |
temperature=temp
|
52 |
-
|
53 |
-
|
54 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
You are a helpful assistant
|
58 |
-
|
59 |
-
User: {query}
|
60 |
-
|
61 |
-
Answer:
|
62 |
-
"""
|
63 |
|
64 |
-
|
65 |
-
|
66 |
input_variables=["query"]
|
67 |
)
|
68 |
-
|
69 |
-
chain =prompt | chat | StrOutputParser()
|
70 |
-
|
71 |
return chain.invoke({"query": given_prompt})
|
72 |
|
73 |
except Exception as e:
|
74 |
-
return f"Error: {str(e)}"
|
75 |
|
76 |
-
def call_google(self,model, temp=0.7, given_prompt
|
77 |
try:
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
83 |
return chain.invoke({"text": given_prompt})
|
|
|
84 |
except Exception as e:
|
85 |
-
return f"Error: {str(e)}"
|
86 |
|
87 |
-
def response(self, model, prompt="Hi",
|
88 |
if model in self.groq_models:
|
89 |
-
|
90 |
elif model in self.hf_models:
|
91 |
-
|
92 |
elif model in self.google_models:
|
93 |
-
|
94 |
else:
|
95 |
-
return "Sorry! App not working properly"
|
96 |
-
return res_show
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
|
|
|
1 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
2 |
from langchain_groq import ChatGroq
|
3 |
+
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
|
|
|
|
|
|
|
|
4 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
+
from langchain.schema.output_parser import StrOutputParser
|
6 |
+
from dotenv import load_dotenv
|
7 |
from huggingface_hub import login
|
8 |
+
import os
|
9 |
|
10 |
+
# Load environment variables
|
11 |
load_dotenv()
|
|
|
12 |
login(token=os.environ["HUGGING_FACE_API_KEY"])
|
13 |
+
os.environ['CURL_CA_BUNDLE'] = '' # Optional workaround for certificate issues
|
|
|
|
|
14 |
|
15 |
class Bot():
|
16 |
def __init__(self):
|
17 |
+
self.groq_models = ['gemma-7b-it', 'llama3-70b-8192',
|
18 |
+
'llama3-8b-8192', 'mixtral-8x22b'] # Updated here
|
19 |
+
self.hf_models = ["01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",
|
20 |
"google/gemma-1.1-7b-it"]
|
21 |
+
self.google_models = ["gemini-1.0-pro", "gemini-1.5-flash",
|
22 |
"gemini-1.5-pro"]
|
|
|
|
|
23 |
|
24 |
+
# Main model pool
|
25 |
+
self.models = self.google_models + self.hf_models + self.groq_models
|
26 |
+
|
27 |
+
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
|
28 |
try:
|
29 |
llm = ChatGroq(
|
30 |
+
model=model,
|
31 |
temperature=temp,
|
32 |
+
groq_api_key=os.environ["GROQ_API_KEY"]
|
33 |
)
|
34 |
+
prompt = ChatPromptTemplate.from_messages([
|
35 |
+
("system", "You are a helpful assistant."),
|
36 |
+
("human", "{text}")
|
37 |
+
])
|
|
|
38 |
chain = prompt | llm | StrOutputParser()
|
39 |
return chain.invoke({"text": given_prompt})
|
40 |
|
41 |
except Exception as e:
|
42 |
+
return f"Error (Groq): {str(e)}"
|
43 |
|
44 |
+
def call_hf(self, model, temp=0.7, given_prompt="Hi"):
|
45 |
try:
|
46 |
llm = HuggingFaceEndpoint(
|
47 |
repo_id=model,
|
48 |
temperature=temp
|
49 |
+
)
|
|
|
50 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
51 |
+
prompt = PromptTemplate(
|
52 |
+
template="""
|
53 |
+
You are a helpful assistant
|
54 |
|
55 |
+
User: {query}
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
+
Answer:
|
58 |
+
""",
|
59 |
input_variables=["query"]
|
60 |
)
|
61 |
+
chain = prompt | chat | StrOutputParser()
|
|
|
|
|
62 |
return chain.invoke({"query": given_prompt})
|
63 |
|
64 |
except Exception as e:
|
65 |
+
return f"Error (HF): {str(e)}"
|
66 |
|
67 |
+
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
68 |
try:
|
69 |
+
llm = ChatGoogleGenerativeAI(
|
70 |
+
model=model,
|
71 |
+
temperature=temp,
|
72 |
+
google_api_key=os.environ["GOOGLE_API_KEY"]
|
73 |
+
)
|
74 |
+
prompt = ChatPromptTemplate.from_messages([
|
75 |
+
("human", "{text}")
|
76 |
+
])
|
77 |
+
chain = prompt | llm | StrOutputParser()
|
78 |
return chain.invoke({"text": given_prompt})
|
79 |
+
|
80 |
except Exception as e:
|
81 |
+
return f"Error (Google): {str(e)}"
|
82 |
|
83 |
+
def response(self, model, prompt="Hi", temperature=0.7):
|
84 |
if model in self.groq_models:
|
85 |
+
return self.call_groq(model=model, temp=temperature, given_prompt=prompt)
|
86 |
elif model in self.hf_models:
|
87 |
+
return self.call_hf(model=model, temp=temperature, given_prompt=prompt)
|
88 |
elif model in self.google_models:
|
89 |
+
return self.call_google(model=model, temp=temperature, given_prompt=prompt)
|
90 |
else:
|
91 |
+
return "Sorry! App not working properly – unknown model"
|
|
|
|
|
|
|
|
|
|
|
92 |
|