|
import sys |
|
import os |
|
|
|
|
|
current_dir = os.path.dirname(os.path.abspath(__file__)) |
|
project_root = os.path.dirname(current_dir) |
|
if project_root not in sys.path: |
|
sys.path.append(project_root) |
|
|
|
from llm.wenxin_llm import Wenxin_LLM |
|
from llm.spark_llm import Spark_LLM |
|
from llm.zhipuai_llm import ZhipuAILLM |
|
from langchain.chat_models import ChatOpenAI |
|
from llm.call_llm import parse_llm_api_key |
|
|
|
|
|
def model_to_llm(model:str=None, temperature:float=0.0, appid:str=None, api_key:str=None,Spark_api_secret:str=None,Wenxin_secret_key:str=None): |
|
""" |
|
星火:model,temperature,appid,api_key,api_secret |
|
百度问心:model,temperature,api_key,api_secret |
|
智谱:model,temperature,api_key |
|
OpenAI:model,temperature,api_key |
|
""" |
|
if model in ["gpt-3.5-turbo", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0613", "gpt-4", "gpt-4-32k"]: |
|
if api_key == None: |
|
api_key = parse_llm_api_key("openai") |
|
llm = ChatOpenAI(model_name = model, temperature = temperature , openai_api_key = api_key) |
|
elif model in ["ERNIE-Bot", "ERNIE-Bot-4", "ERNIE-Bot-turbo"]: |
|
if api_key == None or Wenxin_secret_key == None: |
|
api_key, Wenxin_secret_key = parse_llm_api_key("wenxin") |
|
llm = Wenxin_LLM(model=model, temperature = temperature, api_key=api_key, secret_key=Wenxin_secret_key) |
|
elif model == "Spark-X1": |
|
if api_key == None or appid == None and Spark_api_secret == None: |
|
api_key, appid, Spark_api_secret = parse_llm_api_key("spark") |
|
llm = Spark_LLM(model=model, temperature = temperature, appid=appid, api_secret=Spark_api_secret, api_key=api_key) |
|
elif model in ["chatglm_pro", "chatglm_std", "chatglm_lite"]: |
|
if api_key == None: |
|
api_key = parse_llm_api_key("zhipuai") |
|
llm = ZhipuAILLM(model=model, zhipuai_api_key=api_key, temperature = temperature) |
|
elif model in ["qwen-turbo", "qwen-plus", "qwen-max"]: |
|
if api_key == None: |
|
api_key = parse_llm_api_key("ali") |
|
from llm.ali_llm import Ali_LLM |
|
llm = Ali_LLM(model=model, temperature=temperature, api_key=api_key) |
|
|
|
else: |
|
raise ValueError(f"model{model} not support!!!") |
|
return llm |