fCola commited on
Commit
99b4862
·
verified ·
1 Parent(s): abb9d2c

Final touches

Browse files
Files changed (1) hide show
  1. app.py +44 -8
app.py CHANGED
@@ -1,4 +1,6 @@
1
  import os
 
 
2
  from threading import Thread
3
 
4
 
@@ -51,10 +53,10 @@ class HfModelWrapper:
51
  "inputs": input_ids,
52
  "streamer": streamer,
53
  "pad_token_id": self.tokenizer.eos_token_id,
54
- "max_length": 8192,
55
  "temperature": 0.1,
56
  "top_p": 0.8,
57
- "repetition_penalty": 1.25,
58
  }
59
 
60
  thread = Thread(target=self.model.generate, kwargs=gen_kwargs)
@@ -486,20 +488,36 @@ paper_plane_svg = """<svg xmlns="http://www.w3.org/2000/svg" width="20" height="
486
  </svg>"""
487
 
488
  wrapper = HfModelWrapper()
 
 
 
489
 
490
 
491
  # Mock data function for chatbot
492
  def send_message(message, history):
493
  #if not message:
494
  # return history
495
- history.append({"role": "user", "content": message})
496
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
497
  #return history
 
 
 
 
 
 
498
  response_generator = wrapper.generate(message, history)
499
  partial = ""
 
500
  for t in response_generator:
501
- partial += t
502
- yield history + [{"role": "assistant", "content": partial}]
 
 
 
 
 
 
 
503
 
504
 
505
  # Create the dashboard
@@ -542,18 +560,36 @@ with gr.Blocks(theme=ArtemisiaTheme(), css=custom_css) as demo:
542
  results_placeholder = gr.Markdown("Ask a question in the chat to analyze risk data and see insights here", elem_classes="placeholder-text")
543
 
544
  # Wire up the chat functionality
 
 
 
 
 
 
 
 
 
 
 
545
  send_button.click(
 
 
 
 
546
  fn=send_message,
547
  inputs=[chat_input, chatbot],
548
  outputs=[chatbot]
549
- ).then(lambda: "", None, chat_input)
550
 
551
  chat_input.submit(
 
 
 
 
552
  fn=send_message,
553
  inputs=[chat_input, chatbot],
554
  outputs=[chatbot]
555
- ).then(lambda: "", None, chat_input)
556
-
557
  # JavaScript for UI enhancements
558
  gr.HTML("""
559
  <script>
 
1
  import os
2
+ import json
3
+ import random
4
  from threading import Thread
5
 
6
 
 
53
  "inputs": input_ids,
54
  "streamer": streamer,
55
  "pad_token_id": self.tokenizer.eos_token_id,
56
+ "max_length": 32768,
57
  "temperature": 0.1,
58
  "top_p": 0.8,
59
+ "repetition_penalty": 1.1,
60
  }
61
 
62
  thread = Thread(target=self.model.generate, kwargs=gen_kwargs)
 
488
  </svg>"""
489
 
490
  wrapper = HfModelWrapper()
491
+ with open("./question_mapping.json", "r") as f:
492
+ database = json.load(f)
493
+ database = {k.lower(): v for k, v in database.items()}
494
 
495
 
496
  # Mock data function for chatbot
497
  def send_message(message, history):
498
  #if not message:
499
  # return history
 
500
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
501
  #return history
502
+
503
+ history.append({"role": "user", "content": message})
504
+ # Recover extra data
505
+ if message.lower() in database:
506
+ context = random.choice(database[message.lower()])
507
+ message += " \n"+context
508
  response_generator = wrapper.generate(message, history)
509
  partial = ""
510
+ idx = 0
511
  for t in response_generator:
512
+ if idx <= 5:
513
+ idx += 1
514
+ continue
515
+ else:
516
+ if t != "<|eot_id|>":
517
+ partial += t
518
+ yield history + [{"role": "assistant", "content": partial}]
519
+ else:
520
+ continue
521
 
522
 
523
  # Create the dashboard
 
560
  results_placeholder = gr.Markdown("Ask a question in the chat to analyze risk data and see insights here", elem_classes="placeholder-text")
561
 
562
  # Wire up the chat functionality
563
+ #send_button.click(
564
+ # fn=send_message,
565
+ # inputs=[chat_input, chatbot],
566
+ # outputs=[chatbot]
567
+ #).then(lambda: "", None, chat_input)
568
+
569
+ #chat_input.submit(
570
+ # fn=send_message,
571
+ # inputs=[chat_input, chatbot],
572
+ # outputs=[chatbot]
573
+ #).then(lambda: "", None, chat_input)
574
  send_button.click(
575
+ lambda msg: (gr.update(value=""), msg), # clears textbox immediately
576
+ inputs=[chat_input],
577
+ outputs=[chat_input, gr.State()], # dummy state to capture message
578
+ ).then(
579
  fn=send_message,
580
  inputs=[chat_input, chatbot],
581
  outputs=[chatbot]
582
+ )
583
 
584
  chat_input.submit(
585
+ lambda msg: (gr.update(value=""), msg),
586
+ inputs=[chat_input],
587
+ outputs=[chat_input, gr.State()],
588
+ ).then(
589
  fn=send_message,
590
  inputs=[chat_input, chatbot],
591
  outputs=[chatbot]
592
+ )
 
593
  # JavaScript for UI enhancements
594
  gr.HTML("""
595
  <script>