雷娃 commited on
Commit
291372b
·
1 Parent(s): eb16940

modify output length

Browse files
Files changed (2) hide show
  1. app.py +3 -3
  2. app_api.py +3 -3
app.py CHANGED
@@ -15,15 +15,15 @@ client = OpenAI(
15
  def chat(user_input, max_new_tokens=20480):
16
  # chat history
17
  messages_template = [
18
- {"role": "system", "content": "You are Ling, an assistant created by inclusionAI"},
19
  {"role": "user", "content": user_input}
20
  ]
21
 
22
  response = client.chat.completions.create(
23
  model="Ling-lite-1.5-250604",
24
  messages=messages_template,
25
- max_tokens=max_new_tokens,
26
- temperature=0.7,
27
  top_p=1,
28
  )
29
  resp_text = response.choices[0].message.content
 
15
  def chat(user_input, max_new_tokens=20480):
16
  # chat history
17
  messages_template = [
18
+ # {"role": "system", "content": "You are Ling, an assistant created by inclusionAI"},
19
  {"role": "user", "content": user_input}
20
  ]
21
 
22
  response = client.chat.completions.create(
23
  model="Ling-lite-1.5-250604",
24
  messages=messages_template,
25
+ max_tokens=11264,
26
+ temperature=0.01,
27
  top_p=1,
28
  )
29
  resp_text = response.choices[0].message.content
app_api.py CHANGED
@@ -15,15 +15,15 @@ client = OpenAI(
15
  def chat(user_input, max_new_tokens=2048):
16
  # chat history
17
  messages_template = [
18
- {"role": "system", "content": "You are Ling, an assistant created by inclusionAI"},
19
  {"role": "user", "content": user_input}
20
  ]
21
 
22
  response = client.chat.completions.create(
23
  model="Ling-lite-1.5-250604",
24
  messages=messages_template,
25
- max_tokens=max_new_tokens,
26
- temperature=0.7,
27
  top_p=1,
28
  )
29
  yield response.choices[0].message.content
 
15
  def chat(user_input, max_new_tokens=2048):
16
  # chat history
17
  messages_template = [
18
+ # {"role": "system", "content": "You are Ling, an assistant created by inclusionAI"},
19
  {"role": "user", "content": user_input}
20
  ]
21
 
22
  response = client.chat.completions.create(
23
  model="Ling-lite-1.5-250604",
24
  messages=messages_template,
25
+ max_tokens=11264,
26
+ temperature=0.01,
27
  top_p=1,
28
  )
29
  yield response.choices[0].message.content