donjun commited on
Commit
b30eda1
Β·
verified Β·
1 Parent(s): a2d7df2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -228
app.py CHANGED
@@ -1,238 +1,72 @@
1
- import sys
2
- import typing
3
- if sys.version_info < (3, 11):
4
- typing.Self = object # 3.10μ—μ„œ μž„μ‹œ 우회
5
- from PySide6.QtWidgets import (QApplication, QMainWindow, QVBoxLayout,
6
- QTextEdit, QLineEdit, QPushButton, QWidget,
7
- QScrollArea, QHBoxLayout)
8
- from PySide6.QtCore import Qt, QThread, Signal
9
- from transformers import AutoModelForCausalLM, AutoTokenizer
10
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- class AIWorker(QThread):
13
- response_ready = Signal(str)
14
- progress_update = Signal(int) # μ§„ν–‰λ₯  μ—…λ°μ΄νŠΈ μ‹ ν˜Έ
15
-
16
- def __init__(self, model, tokenizer, chat_history):
17
- super().__init__()
18
- self.model = model
19
- self.tokenizer = tokenizer
20
- self.chat_history = chat_history
21
- self.user_input = ""
22
-
23
- def run(self):
24
- try:
25
- # μ‚¬μš©μž μž…λ ₯ μΆ”κ°€
26
- self.chat_history.append({"role": "user", "content": self.user_input})
27
 
28
- # μž…λ ₯ ν…μ„œ 생성
29
- inputs = self.tokenizer.apply_chat_template(
30
- self.chat_history,
31
- add_generation_prompt=True,
32
- return_dict=True,
33
- return_tensors="pt"
34
- ).to(self.model.device)
 
 
 
35
 
36
- # 응닡 생성
37
- with torch.no_grad():
38
- output_ids = self.model.generate(
39
- **inputs,
40
- max_length=512,
41
- repetition_penalty=1.1,
42
- temperature=0.7,
43
- top_p=0.9,
44
- stop_strings=["<|endofturn|>", "<|stop|>"],
45
- tokenizer=self.tokenizer
46
- )
47
 
48
- # 좜λ ₯ λ””μ½”λ”©
49
- output_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
50
 
51
- # μ‚¬μš©μž μž…λ ₯ 이후 μ‘λ‹΅λ§Œ μΆ”μΆœ
52
- model_reply = output_text.split(self.user_input)[-1].strip()
 
 
 
 
53
 
54
- # νžˆμŠ€ν† λ¦¬μ— λͺ¨λΈ 응닡 μΆ”κ°€
55
- self.chat_history.append({"role": "assistant", "content": model_reply})
56
-
57
- self.response_ready.emit(model_reply)
 
 
58
 
59
- except Exception as e:
60
- print(f"Error in AIWorker: {str(e)}")
61
- self.response_ready.emit("μ£„μ†‘ν•©λ‹ˆλ‹€. 응닡 생성 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€.")
62
- finally:
63
- # λ¦¬μ†ŒμŠ€ 정리
64
- torch.cuda.empty_cache()
65
 
66
- class ChatWindow(QMainWindow):
67
- def __init__(self):
68
- super().__init__()
69
- self.setWindowTitle("CLOVA X 챗봇")
70
- self.resize(800, 600)
71
-
72
- # λͺ¨λΈ μ΄ˆκΈ°ν™”
73
- self.init_model()
74
-
75
- # UI μ„€μ •
76
- self.init_ui()
77
-
78
- # μ±„νŒ… 기둝 μ΄ˆκΈ°ν™”
79
- self.chat_history = [
80
- {"role": "tool_list", "content": ""},
81
- {"role": "system", "content": "- AI μ–Έμ–΄λͺ¨λΈμ˜ 이름은 \"CLOVA X\" 이며 λ„€μ΄λ²„μ—μ„œ λ§Œλ“€μ—ˆλ‹€.\n- μ˜€λŠ˜μ€ 2025λ…„ 04μ›” 24일(λͺ©)이닀."},
82
- ]
83
-
84
- def init_model(self):
85
- """λͺ¨λΈκ³Ό ν† ν¬λ‚˜μ΄μ € λ‘œλ“œ"""
86
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
87
- model_dir = "./clova_model_full"
88
-
89
- print("λͺ¨λΈ λ‘œλ“œ 쀑...")
90
- self.model = AutoModelForCausalLM.from_pretrained(model_dir).to(self.device)
91
- self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
92
- print("λͺ¨λΈ λ‘œλ“œ μ™„λ£Œ!")
93
-
94
- def init_ui(self):
95
- """μ‚¬μš©μž μΈν„°νŽ˜μ΄μŠ€ μ„€μ •"""
96
- central_widget = QWidget()
97
- self.setCentralWidget(central_widget)
98
-
99
- layout = QVBoxLayout()
100
- central_widget.setLayout(layout)
101
-
102
- # λŒ€ν™” λ‚΄μš© ν‘œμ‹œ μ˜μ—­
103
- self.chat_display = QTextEdit()
104
- self.chat_display.setReadOnly(True)
105
- self.chat_display.setStyleSheet("""
106
- QTextEdit {
107
- background-color: #1a1a1a;
108
- border: 1px solid #333333;
109
- border-radius: 5px;
110
- padding: 10px;
111
- font-size: 14px;
112
- color: #ffffff;
113
- }
114
- """)
115
-
116
- # 슀크둀 κ°€λŠ₯ν•˜λ„λ‘ μ„€μ •
117
- scroll_area = QScrollArea()
118
- scroll_area.setWidgetResizable(True)
119
- scroll_area.setWidget(self.chat_display)
120
- layout.addWidget(scroll_area)
121
-
122
- # μž…λ ₯ μ˜μ—­
123
- input_layout = QHBoxLayout()
124
-
125
- self.user_input = QLineEdit()
126
- self.user_input.setPlaceholderText("λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
127
- self.user_input.setStyleSheet("""
128
- QLineEdit {
129
- padding: 8px;
130
- border: 1px solid #444444;
131
- border-radius: 4px;
132
- font-size: 14px;
133
- color: #ffffff;
134
- background-color: #2d2d2d;
135
- }
136
- """)
137
- self.user_input.returnPressed.connect(self.send_message)
138
-
139
- self.send_button = QPushButton("전솑")
140
- self.send_button.setStyleSheet("""
141
- QPushButton {
142
- background-color: #4a90e2;
143
- color: white;
144
- border: none;
145
- border-radius: 4px;
146
- padding: 8px 16px;
147
- font-size: 14px;
148
- }
149
- QPushButton:hover {
150
- background-color: #357abd;
151
- }
152
- """)
153
- self.send_button.clicked.connect(self.send_message)
154
-
155
- input_layout.addWidget(self.user_input)
156
- input_layout.addWidget(self.send_button)
157
-
158
- layout.addLayout(input_layout)
159
-
160
- # μƒνƒœ ν‘œμ‹œμ€„
161
- self.statusBar().setStyleSheet("QStatusBar { color: #ffffff; }")
162
- self.statusBar().showMessage(f"μ‚¬μš© 쀑인 λ””λ°”μ΄μŠ€: {self.device.upper()}")
163
-
164
- def send_message(self):
165
- """μ‚¬μš©μž λ©”μ‹œμ§€ 처리"""
166
- message = self.user_input.text().strip()
167
- if not message:
168
- return
169
-
170
- # μ‚¬μš©μž λ©”μ‹œμ§€ ν‘œμ‹œ
171
- self.append_message("λ‹Ήμ‹ ", message)
172
- self.user_input.clear()
173
-
174
- # 이전 μŠ€λ ˆλ“œ 정리
175
- if hasattr(self, 'ai_worker') and hasattr(self.ai_worker, 'is_running') and self.ai_worker.is_running:
176
- self.ai_worker.is_running = False
177
- self.ai_worker.wait()
178
- torch.cuda.empty_cache()
179
-
180
- # AI μž‘μ—… μŠ€λ ˆλ“œ μ‹œμž‘
181
- self.ai_worker = AIWorker(self.model, self.tokenizer, self.chat_history)
182
- self.ai_worker.user_input = message
183
- self.ai_worker.response_ready.connect(self.show_ai_response)
184
- self.ai_worker.start()
185
-
186
- # λ²„νŠΌ λΉ„ν™œμ„±ν™” (응닡 λŒ€κΈ° 쀑)
187
- self.send_button.setEnabled(False)
188
- self.user_input.setEnabled(False)
189
- self.statusBar().showMessage("CLOVA Xκ°€ 닡변을 생성 μ€‘μž…λ‹ˆλ‹€...")
190
-
191
- def show_ai_response(self, response):
192
- """AI 응닡 ν‘œμ‹œ"""
193
- self.append_message("CLOVA X", response)
194
-
195
- # μž…λ ₯μ°½ λ‹€μ‹œ ν™œμ„±ν™”
196
- self.send_button.setEnabled(True)
197
- self.user_input.setEnabled(True)
198
- self.user_input.setFocus()
199
- self.statusBar().showMessage(f"μ‚¬μš© 쀑인 λ””λ°”μ΄μŠ€: {self.device.upper()}")
200
-
201
- # λ©”λͺ¨λ¦¬ 정리
202
- torch.cuda.empty_cache()
203
-
204
- def append_message(self, sender, message):
205
- """λŒ€ν™”μ°½μ— λ©”μ‹œμ§€ μΆ”κ°€"""
206
- if sender == "λ‹Ήμ‹ ":
207
- prefix = "μ‚¬μš©μž: "
208
- color = "#fff" # νŒŒλž€μƒ‰
209
- else:
210
- prefix = "AI: "
211
- color = "#ffe31e" # 빨간색
212
-
213
- html = f'''
214
- <div style="margin: 5px;">
215
- <span style="color: {color}; display: inline-block;">
216
- {prefix}{message}
217
- </span>
218
- </div>'''
219
-
220
- self.chat_display.append(html)
221
- self.chat_display.verticalScrollBar().setValue(self.chat_display.verticalScrollBar().maximum())
222
 
223
- if __name__ == "__main__":
224
- app = QApplication(sys.argv)
225
-
226
- # 폰트 및 κΈ°λ³Έ μŠ€νƒ€μΌ μ„€μ •
227
- font = app.font()
228
- font.setPointSize(12)
229
- app.setFont(font)
230
- app.setStyleSheet("""
231
- QWidget {
232
- color: #000000;
233
- }
234
- """)
235
-
236
- window = ChatWindow()
237
- window.show()
238
- sys.exit(app.exec())
 
1
+ import gradio as gr
 
 
 
 
 
 
 
 
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # λ””λ°”μ΄μŠ€ μ„€μ •
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+
8
+ # λͺ¨λΈ 및 ν† ν¬λ‚˜μ΄μ € 뢈러였기
9
+ model_name = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B"
10
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+
13
+ # 초기 μ‹œμŠ€ν…œ λ©”μ‹œμ§€ 및 νžˆμŠ€ν† λ¦¬
14
+ initial_chat = [
15
+ {"role": "tool_list", "content": ""},
16
+ {"role": "system", "content": "- AI μ–Έμ–΄λͺ¨λΈμ˜ 이름은 \"CLOVA X\" 이며 λ„€μ΄λ²„μ—μ„œ λ§Œλ“€μ—ˆλ‹€.\n- μ˜€λŠ˜μ€ 2025λ…„ 04μ›” 24일(λͺ©)이닀."},
17
+ ]
18
+
19
+ chat_history = initial_chat.copy()
20
+
21
+ def generate_response(user_input):
22
+ global chat_history
23
+ chat_history.append({"role": "user", "content": user_input})
24
 
25
+ inputs = tokenizer.apply_chat_template(
26
+ chat_history,
27
+ add_generation_prompt=True,
28
+ return_dict=True,
29
+ return_tensors="pt"
30
+ ).to(device)
 
 
 
 
 
 
 
 
 
31
 
32
+ with torch.no_grad():
33
+ output_ids = model.generate(
34
+ **inputs,
35
+ max_length=1024,
36
+ stop_strings=["<|endofturn|>", "<|stop|>"],
37
+ repetition_penalty=1.2,
38
+ tokenizer=tokenizer,
39
+ temperature=0.7,
40
+ top_p=0.9,
41
+ )
42
 
43
+ output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
44
+ # μ‚¬μš©μž μž…λ ₯ λ‹€μŒλΆ€ν„° μ‘λ‹΅λ§Œ μΆ”μΆœ
45
+ model_reply = output_text.split(user_input)[-1].strip()
 
 
 
 
 
 
 
 
46
 
47
+ chat_history.append({"role": "assistant", "content": model_reply})
48
+ return model_reply
49
 
50
+ # Gradio μΈν„°νŽ˜μ΄μŠ€ ꡬ성
51
+ with gr.Blocks() as demo:
52
+ gr.Markdown("# CLOVA X 챗봇")
53
+ chatbot = gr.Chatbot()
54
+ msg = gr.Textbox(placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
55
+ clear = gr.Button("μ΄ˆκΈ°ν™”")
56
 
57
+ def user(user_message, chat_history_gr):
58
+ chat_history_gr = chat_history_gr or []
59
+ chat_history_gr.append((user_message, None))
60
+ response = generate_response(user_message)
61
+ chat_history_gr[-1] = (user_message, response)
62
+ return chat_history_gr, ""
63
 
64
+ def reset_chat():
65
+ global chat_history
66
+ chat_history = initial_chat.copy()
67
+ return []
 
 
68
 
69
+ msg.submit(user, [msg, chatbot], [chatbot, msg])
70
+ clear.click(reset_chat, None, chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ demo.launch()