keynes42 commited on
Commit
e758acb
·
verified ·
1 Parent(s): 804ec12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -106,7 +106,7 @@ class BasicModel:
106
  # This serialization might need adjustment based on how CodeAgent sends prompts.
107
  # It needs to handle both initial strings and potential chat histories (as a list of ChatMessage objects).
108
  if isinstance(messages, str):
109
- return f"<|start_header_id|>user<|end_header_id|>\n\n{messages}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
110
 
111
  prompt = []
112
  for m in messages: ## <- For each ChatMessage object
@@ -120,12 +120,12 @@ class BasicModel:
120
  content_text = "".join([c.get('text', '') for c in content if c.get('type') == 'text'])
121
  elif isinstance(content, str): ## <- If m.content is simply a string
122
  content_text = content
123
- prompt.append(f"<|start_header_id|>{role}<|end_header_id|>\n\n{content_text}<|eot_id|>") # Llama 3.1 format
124
 
125
  print(f"{role}: {content_text}") ## <- Print the last message in log
126
 
127
  # Add the assistant prompt start
128
- prompt.append("<|start_header_id|>assistant<|end_header_id|>\n\n")
129
 
130
  return "".join(prompt)
131
 
@@ -137,7 +137,7 @@ class BasicModel:
137
  # 2. Get the response
138
  terminators = [
139
  self.pipe.tokenizer.eos_token_id,
140
- self.pipe.tokenizer.convert_tokens_to_ids("<|eot_id|>")
141
  ]
142
  prompt_str = self._serialize_messages(prompt)
143
  outputs = self.pipe(prompt_str, eos_token_id=terminators, **gen_kwargs)
 
106
  # This serialization might need adjustment based on how CodeAgent sends prompts.
107
  # It needs to handle both initial strings and potential chat histories (as a list of ChatMessage objects).
108
  if isinstance(messages, str):
109
+ return f"<|im_start|>user\n\n{messages}<|im_end|><|im_start|>assistant<|im_end|>\n\n"
110
 
111
  prompt = []
112
  for m in messages: ## <- For each ChatMessage object
 
120
  content_text = "".join([c.get('text', '') for c in content if c.get('type') == 'text'])
121
  elif isinstance(content, str): ## <- If m.content is simply a string
122
  content_text = content
123
+ prompt.append(f"<|im_start|>{role}\n\n{content_text}<|im_end|>") # Qwen3 format
124
 
125
  print(f"{role}: {content_text}") ## <- Print the last message in log
126
 
127
  # Add the assistant prompt start
128
+ prompt.append("<|im_start|>assistant\n\n")
129
 
130
  return "".join(prompt)
131
 
 
137
  # 2. Get the response
138
  terminators = [
139
  self.pipe.tokenizer.eos_token_id,
140
+ self.pipe.tokenizer.convert_tokens_to_ids("<|endoftext|>")
141
  ]
142
  prompt_str = self._serialize_messages(prompt)
143
  outputs = self.pipe(prompt_str, eos_token_id=terminators, **gen_kwargs)