fCola commited on
Commit
c918044
·
verified ·
1 Parent(s): 05e7dc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -443,22 +443,22 @@ def send_message(message, history):
443
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
444
  #return history
445
  tokenizer = generator.tokenizer
446
- streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True)
447
 
448
  gen_kwargs = {
449
  "inputs": input_ids,
450
  "streamer": streamer,
451
- "pad_token_id": self.tokenizer.eos_token_id,
452
  "max_length": 8192,
453
  "temperature": 0.1,
454
  "top_p": 0.8,
455
  "repetition_penalty": 1.25,
456
  }
457
-
458
- thread = Thread(target=self.model.generate, kwargs=gen_kwargs)
459
  thread.start()
460
  #for token in generator(message, max_new_tokens=200):
461
- for t in response_generator:
462
  partial += t#token["generated_text"][len(message):]
463
  yield history + [{"role": "assistant", "content": partial}]
464
 
 
443
  #history.append({"role": "assistant", "content": f"This is a response about: {message}"})
444
  #return history
445
  tokenizer = generator.tokenizer
446
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
447
 
448
  gen_kwargs = {
449
  "inputs": input_ids,
450
  "streamer": streamer,
451
+ "pad_token_id": tokenizer.eos_token_id,
452
  "max_length": 8192,
453
  "temperature": 0.1,
454
  "top_p": 0.8,
455
  "repetition_penalty": 1.25,
456
  }
457
+ partial = ""
458
+ thread = Thread(target=pipeline.generate, kwargs=gen_kwargs)
459
  thread.start()
460
  #for token in generator(message, max_new_tokens=200):
461
+ for t in streamer:
462
  partial += t#token["generated_text"][len(message):]
463
  yield history + [{"role": "assistant", "content": partial}]
464