Spaces:
Sleeping
Sleeping
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate | |
from langchain_groq import ChatGroq | |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain.schema.output_parser import StrOutputParser | |
from dotenv import load_dotenv | |
from huggingface_hub import login | |
import os | |
# Load environment variables | |
load_dotenv() | |
login(token=os.environ["HUGGING_FACE_API_KEY"]) | |
os.environ['CURL_CA_BUNDLE'] = '' # Optional workaround for certificate issues | |
class Bot(): | |
def __init__(self): | |
self.groq_models = ['gemma-7b-it', 'llama3-70b-8192', | |
'llama3-8b-8192', 'mixtral-8x22b'] # Updated here | |
self.hf_models = ["01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it", | |
"google/gemma-1.1-7b-it"] | |
self.google_models = ["gemini-1.0-pro", "gemini-1.5-flash", | |
"gemini-1.5-pro"] | |
# Main model pool | |
self.models = self.google_models + self.hf_models + self.groq_models | |
def call_groq(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = ChatGroq( | |
model=model, | |
temperature=temp, | |
groq_api_key=os.environ["GROQ_API_KEY"] | |
) | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", "You are a helpful assistant."), | |
("human", "{text}") | |
]) | |
chain = prompt | llm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"Error (Groq): {str(e)}" | |
def call_hf(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = HuggingFaceEndpoint( | |
repo_id=model, | |
temperature=temp | |
) | |
chat = ChatHuggingFace(llm=llm, verbose=True) | |
prompt = PromptTemplate( | |
template=""" | |
You are a helpful assistant | |
User: {query} | |
Answer: | |
""", | |
input_variables=["query"] | |
) | |
chain = prompt | chat | StrOutputParser() | |
return chain.invoke({"query": given_prompt}) | |
except Exception as e: | |
return f"Error (HF): {str(e)}" | |
def call_google(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = ChatGoogleGenerativeAI( | |
model=model, | |
temperature=temp, | |
google_api_key=os.environ["GOOGLE_API_KEY"] | |
) | |
prompt = ChatPromptTemplate.from_messages([ | |
("human", "{text}") | |
]) | |
chain = prompt | llm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"Error (Google): {str(e)}" | |
def response(self, model, prompt="Hi", temperature=0.7): | |
if model in self.groq_models: | |
return self.call_groq(model=model, temp=temperature, given_prompt=prompt) | |
elif model in self.hf_models: | |
return self.call_hf(model=model, temp=temperature, given_prompt=prompt) | |
elif model in self.google_models: | |
return self.call_google(model=model, temp=temperature, given_prompt=prompt) | |
else: | |
return "Sorry! App not working properly β unknown model" | |