fCola commited on
Commit
41ed910
·
verified ·
1 Parent(s): 701819e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -32
app.py CHANGED
@@ -494,7 +494,7 @@ database = {k.lower(): v for k, v in database.items()}
494
 
495
 
496
  # Mock data function for chatbot
497
- def send_message(message, history):
498
  #if not message:
499
  # return history
500
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
@@ -514,27 +514,33 @@ def send_message(message, history):
514
  continue
515
  else:
516
  partial += t
517
- yield history + [{"role": "assistant", "content": partial}]
518
 
519
-
520
- def start_generation(message, history):
521
  history.append({"role": "user", "content": message})
522
- return message, history # Pass message along with updated history
523
-
524
-
525
- def continue_generation(message, history):
526
  if message.lower() in database:
527
  context = random.choice(database[message.lower()])
528
  message += " \n" + context
 
529
  response_generator = wrapper.generate(message, history)
530
  partial = ""
531
  idx = 0
 
 
532
  for t in response_generator:
533
  if idx <= 3:
534
  idx += 1
535
  continue
536
- partial += t
537
- yield history + [{"role": "assistant", "content": partial}]
 
 
 
 
 
 
538
 
539
 
540
  # Create the dashboard
@@ -610,31 +616,15 @@ with gr.Blocks(theme=ArtemisiaTheme(), css=custom_css) as demo:
610
  )
611
  """
612
  send_button.click(
613
- fn=lambda msg: (msg, gr.update(value="")),
614
- inputs=[chat_input],
615
- outputs=[gr.State(), chat_input] # Store original msg in State
616
- ).then(
617
- fn=start_generation,
618
- inputs=[gr.State(), chatbot], # Use the stored message
619
- outputs=[gr.State(), chatbot] # Store again for streaming
620
- ).then(
621
- fn=continue_generation,
622
- inputs=[gr.State(), chatbot], # Stream using same message
623
- outputs=[chatbot]
624
  )
625
 
626
  chat_input.submit(
627
- fn=lambda msg: (msg, gr.update(value="")),
628
- inputs=[chat_input],
629
- outputs=[gr.State(), chat_input]
630
- ).then(
631
- fn=start_generation,
632
- inputs=[gr.State(), chatbot],
633
- outputs=[gr.State(), chatbot]
634
- ).then(
635
- fn=continue_generation,
636
- inputs=[gr.State(), chatbot],
637
- outputs=[chatbot]
638
  )
639
  # JavaScript for UI enhancements
640
  gr.HTML("""
 
494
 
495
 
496
  # Mock data function for chatbot
497
+ """def send_message(message, history):
498
  #if not message:
499
  # return history
500
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
 
514
  continue
515
  else:
516
  partial += t
517
+ yield history + [{"role": "assistant", "content": partial}]"""
518
 
519
+ def send_message(message, history):
 
520
  history.append({"role": "user", "content": message})
521
+
522
+ # Recupera contesto extra se esiste
 
 
523
  if message.lower() in database:
524
  context = random.choice(database[message.lower()])
525
  message += " \n" + context
526
+
527
  response_generator = wrapper.generate(message, history)
528
  partial = ""
529
  idx = 0
530
+ cleared = False
531
+
532
  for t in response_generator:
533
  if idx <= 3:
534
  idx += 1
535
  continue
536
+ else:
537
+ partial += t
538
+ # Quando arriva il primo token utile, svuota la textbox
539
+ if not cleared:
540
+ cleared = True
541
+ yield gr.update(value=""), history + [{"role": "assistant", "content": partial}]
542
+ else:
543
+ yield history + [{"role": "assistant", "content": partial}]
544
 
545
 
546
  # Create the dashboard
 
616
  )
617
  """
618
  send_button.click(
619
+ fn=send_message,
620
+ inputs=[chat_input, chatbot],
621
+ outputs=[chat_input, chatbot] # la textbox viene aggiornata qui dentro
 
 
 
 
 
 
 
 
622
  )
623
 
624
  chat_input.submit(
625
+ fn=send_message,
626
+ inputs=[chat_input, chatbot],
627
+ outputs=[chat_input, chatbot]
 
 
 
 
 
 
 
 
628
  )
629
  # JavaScript for UI enhancements
630
  gr.HTML("""