ginipick commited on
Commit
1a773fb
ยท
verified ยท
1 Parent(s): 2f8b595

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -25
app.py CHANGED
@@ -125,7 +125,6 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
125
  formatted.append({"role": role, "parts": [m.content or ""]})
126
  return formatted
127
 
128
-
129
  import google.generativeai as genai
130
 
131
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
@@ -188,22 +187,14 @@ def explain_usage(app_content: str):
188
  except Exception as e:
189
  return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
190
 
191
-
192
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
193
  """
194
  Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ.
195
- user_message๊ฐ€ ๋นˆ ๋ฌธ์ž์—ด์ด์–ด๋„ ์˜ˆ์™ธ ์—†์ด ์ฒ˜๋ฆฌ.
196
  """
197
- # ๋นˆ ๋ฉ”์‹œ์ง€ ์ฒ˜๋ฆฌ
198
  if not user_message.strip():
199
- conversation_state.append(
200
- ChatMessage(
201
- role="assistant",
202
- content="(Note: You sent an empty message. No LLM call was made.)"
203
- )
204
- )
205
- yield conversation_state
206
- return
207
 
208
  print(f"\n=== New Request ===\nUser message: {user_message}")
209
  chat_history = format_chat_history(conversation_state)
@@ -273,11 +264,9 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
273
  )
274
  yield conversation_state
275
 
276
-
277
  def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
278
  """
279
- Chatbot์— type="messages"๋กœ ๋„˜๊ธฐ๊ธฐ ์œ„ํ•ด,
280
- ChatMessage ๋ฆฌ์ŠคํŠธ -> [{"role": "...", "content": "..."}] ๋ชฉ๋ก์œผ๋กœ ๋ณ€ํ™˜
281
  """
282
  output = []
283
  for msg in messages:
@@ -286,18 +275,13 @@ def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, s
286
 
287
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
288
  conversation_state.append(ChatMessage(role="user", content=msg))
289
- # ์ž…๋ ฅ์ฐฝ ํด๋ฆฌ์–ด
290
  return "", conversation_state
291
 
292
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
293
- """
294
- Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> conversation_state ๊ฐฑ์‹  -> messages format์— ๋งž์ถฐ ๋ณ€ํ™˜ ํ›„ ๋ฐ˜ํ™˜
295
- """
296
  for updated_messages in stream_gemini_response(message, conversation_state):
297
- # ChatMessage -> [{"role":"user"/"assistant", "content": "..."}] ๋กœ ๋ณ€ํ™˜
298
  yield "", convert_for_messages_format(updated_messages)
299
 
300
-
301
  def create_ui():
302
  try:
303
  css = """
@@ -337,11 +321,10 @@ def create_ui():
337
  with gr.TabItem("AI ์ฝ”๋“œ์ฑ—"):
338
  gr.Markdown("## ์˜ˆ์ œ๋ฅผ ์ž…๋ ฅ ๋˜๋Š” ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ๋ถ™์—ฌ๋„ฃ๊ณ  ์งˆ๋ฌธํ•˜์„ธ์š”")
339
 
340
- # Chatbot: type="messages"
341
  chatbot = gr.Chatbot(
342
  label="๋Œ€ํ™”",
343
  height=400,
344
- type="messages" # ์ดํ›„์—” [{"role":..., "content":...}] ํ˜•์‹
345
  )
346
 
347
  msg = gr.Textbox(
@@ -375,7 +358,6 @@ def create_ui():
375
  ]
376
  gr.Examples(examples, inputs=msg)
377
 
378
- # ๋Œ€ํ™” ์ƒํƒœ
379
  conversation_state = gr.State([])
380
 
381
  msg.submit(
@@ -429,7 +411,6 @@ def create_ui():
429
  print(traceback.format_exc())
430
  raise
431
 
432
-
433
  if __name__ == "__main__":
434
  try:
435
  print("Starting HuggingFace Space Analyzer...")
 
125
  formatted.append({"role": role, "parts": [m.content or ""]})
126
  return formatted
127
 
 
128
  import google.generativeai as genai
129
 
130
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
 
187
  except Exception as e:
188
  return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
189
 
 
190
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
191
  """
192
  Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ.
193
+ user_message๊ฐ€ ๋น„์–ด ์žˆ์œผ๋ฉด, ์•„๋ฌด๊ฒƒ๋„ ํ•˜์ง€ ์•Š๊ณ  return.
194
  """
 
195
  if not user_message.strip():
196
+ # ์‚ฌ์šฉ์ž ๋ฉ”์‹œ์ง€๊ฐ€ ๋นˆ ๊ฒฝ์šฐ: ์•„๋ฌด ๋ฉ”์‹œ์ง€๋„ ์ถ”๊ฐ€ ์•ˆ ํ•จ, LLM ํ˜ธ์ถœ๋„ ์•ˆ ํ•จ.
197
+ return # yield๋„ ์—†์ด ๊ทธ๋ƒฅ ์ข…๋ฃŒ
 
 
 
 
 
 
198
 
199
  print(f"\n=== New Request ===\nUser message: {user_message}")
200
  chat_history = format_chat_history(conversation_state)
 
264
  )
265
  yield conversation_state
266
 
 
267
  def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
268
  """
269
+ ChatMessage ๋ฆฌ์ŠคํŠธ -> [{"role":"...", "content":"..."}] ๋ชฉ๋ก
 
270
  """
271
  output = []
272
  for msg in messages:
 
275
 
276
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
277
  conversation_state.append(ChatMessage(role="user", content=msg))
278
+ # ์ž…๋ ฅ์ฐฝ ๋น„์›€
279
  return "", conversation_state
280
 
281
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
 
 
 
282
  for updated_messages in stream_gemini_response(message, conversation_state):
 
283
  yield "", convert_for_messages_format(updated_messages)
284
 
 
285
  def create_ui():
286
  try:
287
  css = """
 
321
  with gr.TabItem("AI ์ฝ”๋“œ์ฑ—"):
322
  gr.Markdown("## ์˜ˆ์ œ๋ฅผ ์ž…๋ ฅ ๋˜๋Š” ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ๋ถ™์—ฌ๋„ฃ๊ณ  ์งˆ๋ฌธํ•˜์„ธ์š”")
323
 
 
324
  chatbot = gr.Chatbot(
325
  label="๋Œ€ํ™”",
326
  height=400,
327
+ type="messages"
328
  )
329
 
330
  msg = gr.Textbox(
 
358
  ]
359
  gr.Examples(examples, inputs=msg)
360
 
 
361
  conversation_state = gr.State([])
362
 
363
  msg.submit(
 
411
  print(traceback.format_exc())
412
  raise
413
 
 
414
  if __name__ == "__main__":
415
  try:
416
  print("Starting HuggingFace Space Analyzer...")