Spaces:
Running
Running
Commit
·
383cecf
1
Parent(s):
874fe39
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
3 |
from langchain.llms import CTransformers
|
|
|
|
|
4 |
|
5 |
def generate_prompts(user_input):
|
6 |
prompt_template = PromptTemplate(
|
@@ -9,9 +11,12 @@ def generate_prompts(user_input):
|
|
9 |
)
|
10 |
config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
15 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
16 |
input_data = {"Question": user_input}
|
17 |
|
|
|
1 |
import gradio as gr
|
2 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
3 |
from langchain.llms import CTransformers
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
+
|
6 |
|
7 |
def generate_prompts(user_input):
|
8 |
prompt_template = PromptTemplate(
|
|
|
11 |
)
|
12 |
config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
|
13 |
|
14 |
+
model_name = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
17 |
+
|
18 |
+
llm = CTransformers(model, tokenizer)
|
19 |
+
|
20 |
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
21 |
input_data = {"Question": user_input}
|
22 |
|