Spaces:
Sleeping
Sleeping
changes in message encoding
Browse files
app.py
CHANGED
@@ -22,13 +22,14 @@ def predict(message, history):
|
|
22 |
history_transformer_format = history + [[message, ""]]
|
23 |
stop = StopOnTokens()
|
24 |
|
25 |
-
messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
32 |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
33 |
generate_kwargs = dict(
|
34 |
model_inputs,
|
|
|
22 |
history_transformer_format = history + [[message, ""]]
|
23 |
stop = StopOnTokens()
|
24 |
|
25 |
+
# messages = "".join(["".join(["<|start_header_id|>user<|end_header_id|>\n"+item[0],
|
26 |
+
# "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"+item[1]])
|
27 |
+
# for item in history_transformer_format])
|
28 |
+
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
|
29 |
+
for item in history_transformer_format])
|
30 |
+
|
31 |
+
# model_inputs = tokenizer([messages], return_tensors="pt") # .to("cuda")
|
32 |
+
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
|
33 |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
34 |
generate_kwargs = dict(
|
35 |
model_inputs,
|