Amirizaniani commited on
Commit
e12f506
·
1 Parent(s): fe6c5d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -12,7 +12,7 @@ def generate_prompts(user_input):
12
 
13
  prompt_template = PromptTemplate(
14
  input_variables=["Question"],
15
- template=f"Just list 10 quetion prompts for {user_input} and don't put number before each of the prompts."
16
  )
17
  config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
18
  llm = CTransformers(model="TheBloke/zephyr-7B-alpha-GGUF",
@@ -31,7 +31,7 @@ def generate_prompts(user_input):
31
  def answer_question(prompt):
32
  prompt_template = PromptTemplate(
33
  input_variables=["Question"],
34
- template=f"Answer '{prompt} ' and do not consider the number behind it."
35
  )
36
  config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
37
  llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
 
12
 
13
  prompt_template = PromptTemplate(
14
  input_variables=["Question"],
15
+ template=f"list 10 quetion prompts for {user_input}"
16
  )
17
  config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
18
  llm = CTransformers(model="TheBloke/zephyr-7B-alpha-GGUF",
 
31
  def answer_question(prompt):
32
  prompt_template = PromptTemplate(
33
  input_variables=["Question"],
34
+ template=f"Answer '{prompt} 'and do not consider the number behind it."
35
  )
36
  config = {'max_new_tokens': 512, 'temperature': 0.7, 'context_length': 512}
37
  llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",