tr3n1ttty commited on
Commit
6757728
·
1 Parent(s): abf4b40

changes in message encoding

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -22,13 +22,14 @@ def predict(message, history):
22
  history_transformer_format = history + [[message, ""]]
23
  stop = StopOnTokens()
24
 
25
- messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
26
- "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"+item[1]])
27
- for item in history_transformer_format])
28
- print("MESSAGES: ", messages)
29
-
30
- model_inputs = tokenizer([messages], return_tensors="pt") # .to("cuda")
31
- print("MODEL INPUT:\n", model_inputs)
 
32
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
33
  generate_kwargs = dict(
34
  model_inputs,
 
22
  history_transformer_format = history + [[message, ""]]
23
  stop = StopOnTokens()
24
 
25
+ # messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
26
+ # "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"+item[1]])
27
+ # for item in history_transformer_format])
28
+ messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
29
+ for item in history_transformer_format])
30
+
31
+ # model_inputs = tokenizer([messages], return_tensors="pt") # .to("cuda")
32
+ model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
33
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
34
  generate_kwargs = dict(
35
  model_inputs,