Update app.py
Browse files
app.py
CHANGED
@@ -1,238 +1,72 @@
|
|
1 |
-
import
|
2 |
-
import typing
|
3 |
-
if sys.version_info < (3, 11):
|
4 |
-
typing.Self = object # 3.10μμ μμ μ°ν
|
5 |
-
from PySide6.QtWidgets import (QApplication, QMainWindow, QVBoxLayout,
|
6 |
-
QTextEdit, QLineEdit, QPushButton, QWidget,
|
7 |
-
QScrollArea, QHBoxLayout)
|
8 |
-
from PySide6.QtCore import Qt, QThread, Signal
|
9 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
self.model = model
|
19 |
-
self.tokenizer = tokenizer
|
20 |
-
self.chat_history = chat_history
|
21 |
-
self.user_input = ""
|
22 |
-
|
23 |
-
def run(self):
|
24 |
-
try:
|
25 |
-
# μ¬μ©μ μ
λ ₯ μΆκ°
|
26 |
-
self.chat_history.append({"role": "user", "content": self.user_input})
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
**inputs,
|
40 |
-
max_length=512,
|
41 |
-
repetition_penalty=1.1,
|
42 |
-
temperature=0.7,
|
43 |
-
top_p=0.9,
|
44 |
-
stop_strings=["<|endofturn|>", "<|stop|>"],
|
45 |
-
tokenizer=self.tokenizer
|
46 |
-
)
|
47 |
|
48 |
-
|
49 |
-
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
# 리μμ€ μ 리
|
64 |
-
torch.cuda.empty_cache()
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
super().__init__()
|
69 |
-
self.setWindowTitle("CLOVA X μ±λ΄")
|
70 |
-
self.resize(800, 600)
|
71 |
-
|
72 |
-
# λͺ¨λΈ μ΄κΈ°ν
|
73 |
-
self.init_model()
|
74 |
-
|
75 |
-
# UI μ€μ
|
76 |
-
self.init_ui()
|
77 |
-
|
78 |
-
# μ±ν
κΈ°λ‘ μ΄κΈ°ν
|
79 |
-
self.chat_history = [
|
80 |
-
{"role": "tool_list", "content": ""},
|
81 |
-
{"role": "system", "content": "- AI μΈμ΄λͺ¨λΈμ μ΄λ¦μ \"CLOVA X\" μ΄λ©° λ€μ΄λ²μμ λ§λ€μλ€.\n- μ€λμ 2025λ
04μ 24μΌ(λͺ©)μ΄λ€."},
|
82 |
-
]
|
83 |
-
|
84 |
-
def init_model(self):
|
85 |
-
"""λͺ¨λΈκ³Ό ν ν¬λμ΄μ λ‘λ"""
|
86 |
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
87 |
-
model_dir = "./clova_model_full"
|
88 |
-
|
89 |
-
print("λͺ¨λΈ λ‘λ μ€...")
|
90 |
-
self.model = AutoModelForCausalLM.from_pretrained(model_dir).to(self.device)
|
91 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
92 |
-
print("λͺ¨λΈ λ‘λ μλ£!")
|
93 |
-
|
94 |
-
def init_ui(self):
|
95 |
-
"""μ¬μ©μ μΈν°νμ΄μ€ μ€μ """
|
96 |
-
central_widget = QWidget()
|
97 |
-
self.setCentralWidget(central_widget)
|
98 |
-
|
99 |
-
layout = QVBoxLayout()
|
100 |
-
central_widget.setLayout(layout)
|
101 |
-
|
102 |
-
# λν λ΄μ© νμ μμ
|
103 |
-
self.chat_display = QTextEdit()
|
104 |
-
self.chat_display.setReadOnly(True)
|
105 |
-
self.chat_display.setStyleSheet("""
|
106 |
-
QTextEdit {
|
107 |
-
background-color: #1a1a1a;
|
108 |
-
border: 1px solid #333333;
|
109 |
-
border-radius: 5px;
|
110 |
-
padding: 10px;
|
111 |
-
font-size: 14px;
|
112 |
-
color: #ffffff;
|
113 |
-
}
|
114 |
-
""")
|
115 |
-
|
116 |
-
# μ€ν¬λ‘€ κ°λ₯νλλ‘ μ€μ
|
117 |
-
scroll_area = QScrollArea()
|
118 |
-
scroll_area.setWidgetResizable(True)
|
119 |
-
scroll_area.setWidget(self.chat_display)
|
120 |
-
layout.addWidget(scroll_area)
|
121 |
-
|
122 |
-
# μ
λ ₯ μμ
|
123 |
-
input_layout = QHBoxLayout()
|
124 |
-
|
125 |
-
self.user_input = QLineEdit()
|
126 |
-
self.user_input.setPlaceholderText("λ©μμ§λ₯Ό μ
λ ₯νμΈμ...")
|
127 |
-
self.user_input.setStyleSheet("""
|
128 |
-
QLineEdit {
|
129 |
-
padding: 8px;
|
130 |
-
border: 1px solid #444444;
|
131 |
-
border-radius: 4px;
|
132 |
-
font-size: 14px;
|
133 |
-
color: #ffffff;
|
134 |
-
background-color: #2d2d2d;
|
135 |
-
}
|
136 |
-
""")
|
137 |
-
self.user_input.returnPressed.connect(self.send_message)
|
138 |
-
|
139 |
-
self.send_button = QPushButton("μ μ‘")
|
140 |
-
self.send_button.setStyleSheet("""
|
141 |
-
QPushButton {
|
142 |
-
background-color: #4a90e2;
|
143 |
-
color: white;
|
144 |
-
border: none;
|
145 |
-
border-radius: 4px;
|
146 |
-
padding: 8px 16px;
|
147 |
-
font-size: 14px;
|
148 |
-
}
|
149 |
-
QPushButton:hover {
|
150 |
-
background-color: #357abd;
|
151 |
-
}
|
152 |
-
""")
|
153 |
-
self.send_button.clicked.connect(self.send_message)
|
154 |
-
|
155 |
-
input_layout.addWidget(self.user_input)
|
156 |
-
input_layout.addWidget(self.send_button)
|
157 |
-
|
158 |
-
layout.addLayout(input_layout)
|
159 |
-
|
160 |
-
# μν νμμ€
|
161 |
-
self.statusBar().setStyleSheet("QStatusBar { color: #ffffff; }")
|
162 |
-
self.statusBar().showMessage(f"μ¬μ© μ€μΈ λλ°μ΄μ€: {self.device.upper()}")
|
163 |
-
|
164 |
-
def send_message(self):
|
165 |
-
"""μ¬μ©μ λ©μμ§ μ²λ¦¬"""
|
166 |
-
message = self.user_input.text().strip()
|
167 |
-
if not message:
|
168 |
-
return
|
169 |
-
|
170 |
-
# μ¬μ©μ λ©μμ§ νμ
|
171 |
-
self.append_message("λΉμ ", message)
|
172 |
-
self.user_input.clear()
|
173 |
-
|
174 |
-
# μ΄μ μ€λ λ μ 리
|
175 |
-
if hasattr(self, 'ai_worker') and hasattr(self.ai_worker, 'is_running') and self.ai_worker.is_running:
|
176 |
-
self.ai_worker.is_running = False
|
177 |
-
self.ai_worker.wait()
|
178 |
-
torch.cuda.empty_cache()
|
179 |
-
|
180 |
-
# AI μμ
μ€λ λ μμ
|
181 |
-
self.ai_worker = AIWorker(self.model, self.tokenizer, self.chat_history)
|
182 |
-
self.ai_worker.user_input = message
|
183 |
-
self.ai_worker.response_ready.connect(self.show_ai_response)
|
184 |
-
self.ai_worker.start()
|
185 |
-
|
186 |
-
# λ²νΌ λΉνμ±ν (μλ΅ λκΈ° μ€)
|
187 |
-
self.send_button.setEnabled(False)
|
188 |
-
self.user_input.setEnabled(False)
|
189 |
-
self.statusBar().showMessage("CLOVA Xκ° λ΅λ³μ μμ± μ€μ
λλ€...")
|
190 |
-
|
191 |
-
def show_ai_response(self, response):
|
192 |
-
"""AI μλ΅ νμ"""
|
193 |
-
self.append_message("CLOVA X", response)
|
194 |
-
|
195 |
-
# μ
λ ₯μ°½ λ€μ νμ±ν
|
196 |
-
self.send_button.setEnabled(True)
|
197 |
-
self.user_input.setEnabled(True)
|
198 |
-
self.user_input.setFocus()
|
199 |
-
self.statusBar().showMessage(f"μ¬μ© μ€μΈ λλ°μ΄μ€: {self.device.upper()}")
|
200 |
-
|
201 |
-
# λ©λͺ¨λ¦¬ μ 리
|
202 |
-
torch.cuda.empty_cache()
|
203 |
-
|
204 |
-
def append_message(self, sender, message):
|
205 |
-
"""λνμ°½μ λ©μμ§ μΆκ°"""
|
206 |
-
if sender == "λΉμ ":
|
207 |
-
prefix = "μ¬μ©μ: "
|
208 |
-
color = "#fff" # νλμ
|
209 |
-
else:
|
210 |
-
prefix = "AI: "
|
211 |
-
color = "#ffe31e" # λΉ¨κ°μ
|
212 |
-
|
213 |
-
html = f'''
|
214 |
-
<div style="margin: 5px;">
|
215 |
-
<span style="color: {color}; display: inline-block;">
|
216 |
-
{prefix}{message}
|
217 |
-
</span>
|
218 |
-
</div>'''
|
219 |
-
|
220 |
-
self.chat_display.append(html)
|
221 |
-
self.chat_display.verticalScrollBar().setValue(self.chat_display.verticalScrollBar().maximum())
|
222 |
|
223 |
-
|
224 |
-
app = QApplication(sys.argv)
|
225 |
-
|
226 |
-
# ν°νΈ λ° κΈ°λ³Έ μ€νμΌ μ€μ
|
227 |
-
font = app.font()
|
228 |
-
font.setPointSize(12)
|
229 |
-
app.setFont(font)
|
230 |
-
app.setStyleSheet("""
|
231 |
-
QWidget {
|
232 |
-
color: #000000;
|
233 |
-
}
|
234 |
-
""")
|
235 |
-
|
236 |
-
window = ChatWindow()
|
237 |
-
window.show()
|
238 |
-
sys.exit(app.exec())
|
|
|
1 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
|
5 |
+
# λλ°μ΄μ€ μ€μ
|
6 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
+
|
8 |
+
# λͺ¨λΈ λ° ν ν¬λμ΄μ λΆλ¬μ€κΈ°
|
9 |
+
model_name = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B"
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
12 |
+
|
13 |
+
# μ΄κΈ° μμ€ν
λ©μμ§ λ° νμ€ν 리
|
14 |
+
initial_chat = [
|
15 |
+
{"role": "tool_list", "content": ""},
|
16 |
+
{"role": "system", "content": "- AI μΈμ΄λͺ¨λΈμ μ΄λ¦μ \"CLOVA X\" μ΄λ©° λ€μ΄λ²μμ λ§λ€μλ€.\n- μ€λμ 2025λ
04μ 24μΌ(λͺ©)μ΄λ€."},
|
17 |
+
]
|
18 |
+
|
19 |
+
chat_history = initial_chat.copy()
|
20 |
+
|
21 |
+
def generate_response(user_input):
|
22 |
+
global chat_history
|
23 |
+
chat_history.append({"role": "user", "content": user_input})
|
24 |
|
25 |
+
inputs = tokenizer.apply_chat_template(
|
26 |
+
chat_history,
|
27 |
+
add_generation_prompt=True,
|
28 |
+
return_dict=True,
|
29 |
+
return_tensors="pt"
|
30 |
+
).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
with torch.no_grad():
|
33 |
+
output_ids = model.generate(
|
34 |
+
**inputs,
|
35 |
+
max_length=1024,
|
36 |
+
stop_strings=["<|endofturn|>", "<|stop|>"],
|
37 |
+
repetition_penalty=1.2,
|
38 |
+
tokenizer=tokenizer,
|
39 |
+
temperature=0.7,
|
40 |
+
top_p=0.9,
|
41 |
+
)
|
42 |
|
43 |
+
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
44 |
+
# μ¬μ©μ μ
λ ₯ λ€μλΆν° μλ΅λ§ μΆμΆ
|
45 |
+
model_reply = output_text.split(user_input)[-1].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
chat_history.append({"role": "assistant", "content": model_reply})
|
48 |
+
return model_reply
|
49 |
|
50 |
+
# Gradio μΈν°νμ΄μ€ ꡬμ±
|
51 |
+
with gr.Blocks() as demo:
|
52 |
+
gr.Markdown("# CLOVA X μ±λ΄")
|
53 |
+
chatbot = gr.Chatbot()
|
54 |
+
msg = gr.Textbox(placeholder="λ©μμ§λ₯Ό μ
λ ₯νμΈμ...")
|
55 |
+
clear = gr.Button("μ΄κΈ°ν")
|
56 |
|
57 |
+
def user(user_message, chat_history_gr):
|
58 |
+
chat_history_gr = chat_history_gr or []
|
59 |
+
chat_history_gr.append((user_message, None))
|
60 |
+
response = generate_response(user_message)
|
61 |
+
chat_history_gr[-1] = (user_message, response)
|
62 |
+
return chat_history_gr, ""
|
63 |
|
64 |
+
def reset_chat():
|
65 |
+
global chat_history
|
66 |
+
chat_history = initial_chat.copy()
|
67 |
+
return []
|
|
|
|
|
68 |
|
69 |
+
msg.submit(user, [msg, chatbot], [chatbot, msg])
|
70 |
+
clear.click(reset_chat, None, chatbot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|