Spaces:
Sleeping
Sleeping
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate | |
from langchain_groq import ChatGroq | |
from langchain_openai import ChatOpenAI | |
from langchain_community.chat_models import ChatAnthropic | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from dotenv import load_dotenv | |
from huggingface_hub import login | |
import os | |
# Load environment variables and authenticate | |
load_dotenv() | |
login(token=os.environ.get("HUGGING_FACE_API_KEY", "")) | |
os.environ['CURL_CA_BUNDLE'] = '' | |
class Bot: | |
def __init__(self): | |
# Verified, stable models | |
self.openai_models = ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"] | |
self.anthropic_models = ["claude-3-opus-20240229", "claude-3-sonnet-20240229"] | |
self.google_models = ["gemini-pro"] | |
self.groq_models = ["llama3-8b-8192", "llama3-70b-8192"] # Keep only working ones | |
# Final model list | |
self.models = self.openai_models + self.anthropic_models + self.google_models + self.groq_models | |
def call_openai(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = ChatOpenAI(model=model, temperature=temp) | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", "You are a helpful assistant."), | |
("human", "{text}") | |
]) | |
chain = prompt | llm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"⚠️ [OpenAI:{model}] {str(e)}" | |
def call_anthropic(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = ChatAnthropic(model=model, temperature=temp) | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", "You are a helpful assistant."), | |
("human", "{text}") | |
]) | |
chain = prompt | llm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"⚠️ [Anthropic:{model}] {str(e)}" | |
def call_google(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
gm = ChatGoogleGenerativeAI(model=model, temperature=temp) | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", "You are a helpful assistant."), | |
("human", "{text}") | |
]) | |
chain = prompt | gm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"⚠️ [Google:{model}] {str(e)}" | |
def call_groq(self, model, temp=0.7, given_prompt="Hi"): | |
try: | |
llm = ChatGroq(model=model, temperature=temp) | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", "You are a helpful assistant."), | |
("human", "{text}") | |
]) | |
chain = prompt | llm | StrOutputParser() | |
return chain.invoke({"text": given_prompt}) | |
except Exception as e: | |
return f"⚠️ [Groq:{model}] {str(e)}" | |
def response(self, model, prompt="Hi", temperature=0.7): | |
try: | |
if model in self.openai_models: | |
return self.call_openai(model, temp=temperature, given_prompt=prompt) | |
elif model in self.anthropic_models: | |
return self.call_anthropic(model, temp=temperature, given_prompt=prompt) | |
elif model in self.google_models: | |
return self.call_google(model, temp=temperature, given_prompt=prompt) | |
elif model in self.groq_models: | |
return self.call_groq(model, temp=temperature, given_prompt=prompt) | |
else: | |
return f"❌ Unsupported model: {model}" | |
except Exception as e: | |
return f"⚠️ Skipping `{model}` due to error: {str(e)}" | |