MuntasirHossain commited on
Commit
ba41222
·
verified ·
1 Parent(s): 1489d1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -7
app.py CHANGED
@@ -2,6 +2,11 @@ import gradio as gr
2
  import os
3
  api_token = os.getenv("HF_TOKEN")
4
 
 
 
 
 
 
5
 
6
  from langchain_community.vectorstores import FAISS
7
  from langchain_community.document_loaders import PyPDFLoader
@@ -15,9 +20,64 @@ from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceHub, HuggingFaceEndpoint
16
  import torch
17
 
18
- list_llm = ["meta-llama/Llama-3.1-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"] # meta-llama/Meta-Llama-3-8B-Instruct
 
 
 
 
 
 
19
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Load and split PDF document
22
  def load_doc(list_file_path):
23
  # Processing for one document only
@@ -43,7 +103,14 @@ def create_db(splits):
43
 
44
  # Initialize langchain LLM chain
45
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
46
- if llm_model == "meta-llama/Llama-3.1-8B-Instruct":
 
 
 
 
 
 
 
47
  # llm = HuggingFaceEndpoint(
48
  # repo_id=llm_model,
49
  # huggingfacehub_api_token = api_token,
@@ -52,11 +119,11 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
52
  # top_k = top_k,
53
  # )
54
 
55
- llm = HuggingFaceHub(
56
- repo_id="mistralai/Mistral-7B-Instruct-v0.2",
57
- huggingfacehub_api_token=api_token,
58
- model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens}
59
- )
60
 
61
  else:
62
  llm = HuggingFaceEndpoint(
 
2
  import os
3
  api_token = os.getenv("HF_TOKEN")
4
 
5
+ from langchain.llms.base import LLM
6
+ from transformers import AutoTokenizer
7
+ from huggingface_hub import HfApi
8
+ import requests
9
+
10
 
11
  from langchain_community.vectorstores import FAISS
12
  from langchain_community.document_loaders import PyPDFLoader
 
20
  from langchain_community.llms import HuggingFaceHub, HuggingFaceEndpoint
21
  import torch
22
 
23
+
24
+ from langchain.llms.base import LLM
25
+ from transformers import AutoTokenizer
26
+ from huggingface_hub import HfApi
27
+ import requests
28
+
29
+ list_llm = ["HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-3.1-8B-Instruct"] # "mistralai/Mistral-7B-Instruct-v0.2" # meta-llama/Meta-Llama-3-8B-Instruct
30
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
31
 
32
+ class ZephyrLLM(LLM):
33
+ def __init__(self, repo_id, huggingfacehub_api_token, max_new_tokens=512, temperature=0.7, **kwargs):
34
+ super().__init__(**kwargs)
35
+ self.repo_id = repo_id
36
+ self.api_token = huggingfacehub_api_token
37
+ self.api_url = f"https://api-inference.huggingface.co/models/{repo_id}"
38
+ self.headers = {"Authorization": f"Bearer {huggingfacehub_api_token}"}
39
+ self.tokenizer = AutoTokenizer.from_pretrained(repo_id)
40
+ self.max_new_tokens = max_new_tokens
41
+ self.temperature = temperature
42
+
43
+ def _call(self, prompt, stop=None):
44
+ # Format as chat message
45
+ messages = [{"role": "user", "content": prompt}]
46
+
47
+ # Apply Zephyr's chat template
48
+ formatted_prompt = self.tokenizer.apply_chat_template(
49
+ messages, tokenize=False, add_generation_prompt=True
50
+ )
51
+
52
+ # Send request to Hugging Face Inference API
53
+ payload = {
54
+ "inputs": formatted_prompt,
55
+ "parameters": {
56
+ "max_new_tokens": self.max_new_tokens,
57
+ "temperature": self.temperature
58
+ }
59
+ }
60
+ response = requests.post(self.api_url, headers=self.headers, json=payload)
61
+
62
+ if response.status_code == 200:
63
+ full_response = response.json()[0]["generated_text"]
64
+
65
+ # Extract the assistant reply from the full response
66
+ # After <|assistant|>\n, everything is the model's answer
67
+ if "<|assistant|>" in full_response:
68
+ return full_response.split("<|assistant|>")[-1].strip()
69
+ else:
70
+ return full_response.strip()
71
+
72
+ else:
73
+ raise Exception(f"Failed call [{response.status_code}]: {response.text}")
74
+
75
+
76
+ @property
77
+ def _llm_type(self) -> str:
78
+ return "zephyr-custom"
79
+
80
+
81
  # Load and split PDF document
82
  def load_doc(list_file_path):
83
  # Processing for one document only
 
103
 
104
  # Initialize langchain LLM chain
105
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
106
+ if llm_model == "HuggingFaceH4/zephyr-7b-beta":
107
+ llm = ZephyrLLM(
108
+ repo_id=llm_model,
109
+ huggingfacehub_api_token=api_token,
110
+ temperature=temperature,
111
+ max_new_tokens=max_tokens,
112
+ )
113
+ # if llm_model == "meta-llama/Llama-3.1-8B-Instruct":
114
  # llm = HuggingFaceEndpoint(
115
  # repo_id=llm_model,
116
  # huggingfacehub_api_token = api_token,
 
119
  # top_k = top_k,
120
  # )
121
 
122
+ # llm = HuggingFaceHub(
123
+ # repo_id="mistralai/Mistral-7B-Instruct-v0.2",
124
+ # huggingfacehub_api_token=api_token,
125
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens}
126
+ # )
127
 
128
  else:
129
  llm = HuggingFaceEndpoint(