fCola commited on
Commit
d1d6df8
·
verified ·
1 Parent(s): a7f8386

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -5
app.py CHANGED
@@ -1,7 +1,13 @@
 
 
 
1
  import gradio as gr
2
  from gradio.themes import Base
3
  from gradio.themes.utils import colors
4
 
 
 
 
5
  # Custom theme colors based on brand standards
6
  class ArtemisiaTheme(Base):
7
  def __init__(self, **kwargs):
@@ -425,13 +431,37 @@ paper_plane_svg = """<svg xmlns="http://www.w3.org/2000/svg" width="20" height="
425
  <path d="M22 2L15 22L11 13L2 9L22 2Z"/>
426
  </svg>"""
427
 
 
 
 
 
428
  # Mock data function for chatbot
429
  def send_message(message, history):
430
- if not message:
431
- return history
432
- history.append({"role": "user", "content": message})
433
- history.append({"role": "assistant", "content": f"This is a response about: {message}"})
434
- return history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
 
436
  # Create the dashboard
437
  with gr.Blocks(theme=ArtemisiaTheme(), css=custom_css) as demo:
 
1
+ from threading import Thread
2
+
3
+
4
  import gradio as gr
5
  from gradio.themes import Base
6
  from gradio.themes.utils import colors
7
 
8
+ from transformers import pipeline, TextIteratorStreamer
9
+
10
+
11
  # Custom theme colors based on brand standards
12
  class ArtemisiaTheme(Base):
13
  def __init__(self, **kwargs):
 
431
  <path d="M22 2L15 22L11 13L2 9L22 2Z"/>
432
  </svg>"""
433
 
434
+
435
+ # Pipeline loading
436
+ generator = pipeline("text-generation", model="openai-community/gpt2")
437
+
438
  # Mock data function for chatbot
439
  def send_message(message, history):
440
+ #if not message:
441
+ # return history
442
+ #history.append({"role": "user", "content": message})
443
+ #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
444
+ #return history
445
+ tokenizer = generator.tokenizer
446
+ streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True)
447
+
448
+ gen_kwargs = {
449
+ "inputs": input_ids,
450
+ "streamer": streamer,
451
+ "pad_token_id": self.tokenizer.eos_token_id,
452
+ "max_length": 8192,
453
+ "temperature": 0.1,
454
+ "top_p": 0.8,
455
+ "repetition_penalty": 1.25,
456
+ }
457
+
458
+ thread = Thread(target=self.model.generate, kwargs=gen_kwargs)
459
+ thread.start()
460
+ #for token in generator(message, max_new_tokens=200):
461
+ for t in response_generator:
462
+ partial += t#token["generated_text"][len(message):]
463
+ yield history + [{"role": "assistant", "content": partial}]
464
+
465
 
466
  # Create the dashboard
467
  with gr.Blocks(theme=ArtemisiaTheme(), css=custom_css) as demo: