Amirizaniani commited on
Commit
1ec84da
·
1 Parent(s): 2292262

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -8
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
- from langchain import PromptTemplate, LLMChain
3
  from langchain.llms import CTransformers
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
  def generate_prompts(user_input):
7
  prompt_template = PromptTemplate(
@@ -10,13 +9,9 @@ def generate_prompts(user_input):
10
  )
11
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
12
 
 
 
13
 
14
- model_name = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
15
- model = AutoModelForCausalLM.from_pretrained(model_name)
16
- tokenizer = AutoTokenizer.from_pretrained(model_name)
17
- llm = CTransformers(model, tokenizer,
18
- config=config,
19
- threads=os.cpu_count())
20
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
21
 
22
  input_data = {"Question": user_input}
 
1
  import gradio as gr
2
+ from langchain import PromptTemplate, LLMChain, HuggingFaceHub
3
  from langchain.llms import CTransformers
 
4
 
5
  def generate_prompts(user_input):
6
  prompt_template = PromptTemplate(
 
9
  )
10
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
11
 
12
+ llm = HuggingFaceHub(repo_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
13
+ model_kwargs={"temperature": 0, "max_length":200},
14
 
 
 
 
 
 
 
15
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
16
 
17
  input_data = {"Question": user_input}