Amirizaniani commited on
Commit
aa06b0e
·
1 Parent(s): d398349

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from langchain import PromptTemplate, LLMChain, HuggingFaceHub
3
  from langchain.llms import CTransformers
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
5
 
6
 
7
  def generate_prompts(user_input):
@@ -11,13 +12,13 @@ def generate_prompts(user_input):
11
  )
12
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
13
 
14
- model_name ="TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
15
- model = AutoModelForCausalLM.from_pretrained(model_name)
16
- llm = CTransformers(model=model,
17
- config=config,
18
- threads=os.cpu_count())
19
 
20
- hub_chain = LLMChain(prompt = prompt_template, llm = llm)
 
21
 
22
  input_data = {"Question": user_input}
23
 
 
2
  from langchain import PromptTemplate, LLMChain, HuggingFaceHub
3
  from langchain.llms import CTransformers
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ from transformers import pipeline
6
 
7
 
8
  def generate_prompts(user_input):
 
12
  )
13
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
14
 
15
+
16
+ pipe = pipeline("text-generation", model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF")
17
+
18
+ llm_classifier = HuggingFacePipeline(pipeline=pipe)
 
19
 
20
+
21
+ hub_chain = LLMChain(prompt = prompt_template, llm = llm_classifier)
22
 
23
  input_data = {"Question": user_input}
24