Amirizaniani commited on
Commit
1de37c9
·
1 Parent(s): e672b0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -13,12 +13,18 @@ def generate_prompts(user_input):
13
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
14
 
15
  llm = HuggingFaceHub(
16
- repo_id="google/flan-t5-xxl", model_kwargs={"temperature": 0.5, "max_length": 64})
 
 
 
 
 
 
17
 
18
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
19
  input_data = {"Question": user_input}
20
 
21
- generated_prompts = hub_chain.run(input_data) # Modify this part based on how you run the model
22
  questions_list = generated_prompts.split('\n')
23
 
24
 
 
13
  config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
14
 
15
  llm = HuggingFaceHub(
16
+ repo_id="google/flan-t5-xxl", model_kwargs={"temperature": 0.5, "max_length": 64})
17
+
18
+ model_name = "deepset/roberta-base-squad2"
19
+ model = AutoModelForQuestionAnswering.from_pretrained(model_name)
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+
22
+ llm = pipeline("question-answering", model=model, tokenizer=tokenizer)
23
 
24
  hub_chain = LLMChain(prompt = prompt_template, llm = llm)
25
  input_data = {"Question": user_input}
26
 
27
+ generated_prompts = hub_chain.run(input_data)
28
  questions_list = generated_prompts.split('\n')
29
 
30