surkovvv commited on
Commit
b261ed6
·
1 Parent(s): d8257a2

new message format3

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -19,17 +19,19 @@ class StopOnTokens(StoppingCriteria):
19
 
20
 
21
  def predict(message, history):
22
- history_transformer_format = history + [[message, ""]]
 
 
23
  stop = StopOnTokens()
24
 
25
  # messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
26
  # "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"+item[1]])
27
  # for item in history_transformer_format])
28
- messages = [{"user": item[0], "content": item[1]} for item in history_transformer_format]
29
- print(messages)
30
 
31
  # model_inputs = tokenizer([messages], return_tensors="pt") # .to("cuda")
32
- model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
33
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
34
  generate_kwargs = dict(
35
  model_inputs,
 
19
 
20
 
21
  def predict(message, history):
22
+ print(history)
23
+ history_transformer_format = history + [{"role": "user", "content": message},
24
+ {"role": "assistant", "content": ""}]
25
  stop = StopOnTokens()
26
 
27
  # messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
28
  # "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"+item[1]])
29
  # for item in history_transformer_format])
30
+ # messages = [{"role": "user", item[0], "content": item[1]} for item in history_transformer_format]
31
+ #print(messages)
32
 
33
  # model_inputs = tokenizer([messages], return_tensors="pt") # .to("cuda")
34
+ model_inputs = tokenizer.apply_chat_template(history_transformer_format, return_tensors="pt")
35
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
36
  generate_kwargs = dict(
37
  model_inputs,