Spaces:
Sleeping
Sleeping
File size: 3,397 Bytes
7c53e62 eab216c 4469f63 eab216c d75090c eab216c d75090c eab216c d75090c eab216c d75090c eab216c d75090c eab216c 4469f63 7316f9b 4469f63 d75090c 4469f63 eab216c 4469f63 d75090c eab216c 4469f63 eab216c 4469f63 eab216c 4469f63 eab216c 4469f63 eab216c d75090c eab216c d75090c 4469f63 d75090c eab216c d75090c eab216c d75090c 4469f63 d75090c 4469f63 c9f16fe 4469f63 d75090c 4469f63 eab216c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace
from langchain_huggingface import HuggingFaceEndpoint
from langchain_google_genai import ChatGoogleGenerativeAI
from dotenv import load_dotenv
from huggingface_hub import login
import os
# Load environment variables and authenticate
load_dotenv()
login(token=os.environ.get("HUGGING_FACE_API_KEY", ""))
os.environ['CURL_CA_BUNDLE'] = ''
class Bot:
def __init__(self):
# Updated model lists: Remove gated or unsupported IDs
self.groq_models = [
'gemma2-9b-it',
'llama-3.3-70b-versatile',
'llama-3.1-8b-instant',
'meta-llama/llama-guard-4-12b'
]
self.hf_models = [
"01-ai/Yi-1.5-34B-Chat",
#"google/gemma-1.1-2b-it"
]
# Use supported Google GenAI model names
self.google_models = [
"gemini-pro",
"gemini-pro-vision"
]
# Master list for sampling (only include accessible models)
self.models = self.google_models + self.hf_models + self.groq_models
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
try:
llm = ChatGroq(model=model, temperature=temp)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("human", "{text}")
])
chain = prompt | llm | StrOutputParser()
return chain.invoke({"text": given_prompt})
except Exception as e:
return f"⚠️ [Groq:{model}] {str(e)}"
def call_hf(self, model, temp=0.7, given_prompt="Hi"):
try:
llm = HuggingFaceEndpoint(repo_id=model, temperature=temp)
chat = ChatHuggingFace(llm=llm, verbose=True)
template = """
You are a helpful assistant.
User: {query}
Answer:
"""
prompt = PromptTemplate(template=template, input_variables=["query"])
chain = prompt | chat | StrOutputParser()
return chain.invoke({"query": given_prompt})
except Exception as e:
return f"⚠️ [HF:{model}] {str(e)}"
def call_google(self, model, temp=0.7, given_prompt="Hi"):
try:
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
prompt = ChatPromptTemplate.from_messages([("system", "You are a helpful assistant."), ("human", "{text}")])
chain = prompt | gm | StrOutputParser()
return chain.invoke({"text": given_prompt})
except Exception as e:
return f"⚠️ [Google:{model}] {str(e)}"
def response(self, model, prompt="Hi", temperature=0.7):
# Route to the correct provider and catch errors
try:
if model in self.groq_models:
return self.call_groq(model, temp=temperature, given_prompt=prompt)
if model in self.hf_models:
return self.call_hf(model, temp=temperature, given_prompt=prompt)
if model in self.google_models:
return self.call_google(model, temp=temperature, given_prompt=prompt)
return f"❌ Unsupported model: {model}"
except Exception as e:
return f"⚠️ Skipping {model} due to error: {str(e)}"
|