Update app.py
Browse files
app.py
CHANGED
@@ -1,426 +1,281 @@
|
|
1 |
-
import spaces
|
2 |
-
import math
|
3 |
import gradio as gr
|
4 |
-
import
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
13 |
-
from briarmbg import BriaRMBG
|
14 |
-
from enum import Enum
|
15 |
-
|
16 |
-
# Cohere ๋ชจ๋ธ์ ์ฌ์ฉํ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
|
17 |
from huggingface_hub import InferenceClient
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
sd_merged = {k: sd_origin[k] + sd_offset[k] for k in sd_origin.keys()}
|
59 |
-
unet.load_state_dict(sd_merged, strict=True)
|
60 |
-
del sd_offset, sd_origin, sd_merged
|
61 |
-
except FileNotFoundError:
|
62 |
-
print(f"Error: Model file not found at {model_path}")
|
63 |
-
# ์ ์ ํ ์์ธ ์ฒ๋ฆฌ๋ฅผ ์ถ๊ฐํ ์ ์์ต๋๋ค.
|
64 |
-
|
65 |
-
# ๋๋ฐ์ด์ค ์ค์
|
66 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
67 |
-
text_encoder = text_encoder.to(device=device, dtype=torch.float16)
|
68 |
-
vae = vae.to(device=device, dtype=torch.float16)
|
69 |
-
unet = unet.to(device=device, dtype=torch.float16)
|
70 |
-
rmbg = rmbg.to(device=device, dtype=torch.float32)
|
71 |
-
|
72 |
-
# SDP ์ค์
|
73 |
-
unet.set_attn_processor(AttnProcessor2_0())
|
74 |
-
vae.set_attn_processor(AttnProcessor2_0())
|
75 |
-
|
76 |
-
# ์ค์ผ์ค๋ฌ ์ค์
|
77 |
-
ddim_scheduler = DDIMScheduler(
|
78 |
-
num_train_timesteps=1000,
|
79 |
-
beta_start=0.00085,
|
80 |
-
beta_end=0.012,
|
81 |
-
beta_schedule="scaled_linear",
|
82 |
-
clip_sample=False,
|
83 |
-
set_alpha_to_one=False,
|
84 |
-
steps_offset=1,
|
85 |
-
)
|
86 |
-
|
87 |
-
euler_a_scheduler = EulerAncestralDiscreteScheduler(
|
88 |
-
num_train_timesteps=1000,
|
89 |
-
beta_start=0.00085,
|
90 |
-
beta_end=0.012,
|
91 |
-
steps_offset=1
|
92 |
-
)
|
93 |
-
|
94 |
-
dpmpp_2m_sde_karras_scheduler = DPMSolverMultistepScheduler(
|
95 |
-
num_train_timesteps=1000,
|
96 |
-
beta_start=0.00085,
|
97 |
-
beta_end=0.012,
|
98 |
-
algorithm_type="sde-dpmsolver++",
|
99 |
-
use_karras_sigmas=True,
|
100 |
-
steps_offset=1
|
101 |
-
)
|
102 |
-
|
103 |
-
# ํ์ดํ๋ผ์ธ ์ค์
|
104 |
-
t2i_pipe = StableDiffusionPipeline(
|
105 |
-
vae=vae,
|
106 |
-
text_encoder=text_encoder,
|
107 |
-
tokenizer=tokenizer,
|
108 |
-
unet=unet,
|
109 |
-
scheduler=dpmpp_2m_sde_karras_scheduler,
|
110 |
-
safety_checker=None,
|
111 |
-
requires_safety_checker=False,
|
112 |
-
feature_extractor=None,
|
113 |
-
image_encoder=None
|
114 |
-
)
|
115 |
-
|
116 |
-
i2i_pipe = StableDiffusionImg2ImgPipeline(
|
117 |
-
vae=vae,
|
118 |
-
text_encoder=text_encoder,
|
119 |
-
tokenizer=tokenizer,
|
120 |
-
unet=unet,
|
121 |
-
scheduler=dpmpp_2m_sde_karras_scheduler,
|
122 |
-
safety_checker=None,
|
123 |
-
requires_safety_checker=False,
|
124 |
-
feature_extractor=None,
|
125 |
-
image_encoder=None
|
126 |
-
)
|
127 |
-
|
128 |
-
@torch.inference_mode()
|
129 |
-
def encode_prompt_inner(txt: str):
|
130 |
-
max_length = tokenizer.model_max_length
|
131 |
-
chunk_length = tokenizer.model_max_length - 2
|
132 |
-
id_start = tokenizer.bos_token_id
|
133 |
-
id_end = tokenizer.eos_token_id
|
134 |
-
id_pad = id_end
|
135 |
-
|
136 |
-
def pad(x, p, i):
|
137 |
-
return x[:i] if len(x) >= i else x + [p] * (i - len(x))
|
138 |
-
|
139 |
-
tokens = tokenizer(txt, truncation=False, add_special_tokens=False)["input_ids"]
|
140 |
-
chunks = [[id_start] + tokens[i: i + chunk_length] + [id_end] for i in range(0, len(tokens), chunk_length)]
|
141 |
-
chunks = [pad(ck, id_pad, max_length) for ck in chunks]
|
142 |
-
|
143 |
-
token_ids = torch.tensor(chunks).to(device=device, dtype=torch.int64)
|
144 |
-
conds = text_encoder(token_ids).last_hidden_state
|
145 |
-
|
146 |
-
return conds
|
147 |
-
|
148 |
-
@torch.inference_mode()
|
149 |
-
def encode_prompt_pair(positive_prompt, negative_prompt):
|
150 |
-
c = encode_prompt_inner(positive_prompt)
|
151 |
-
uc = encode_prompt_inner(negative_prompt)
|
152 |
-
|
153 |
-
c_len = float(len(c))
|
154 |
-
uc_len = float(len(uc))
|
155 |
-
max_count = max(c_len, uc_len)
|
156 |
-
c_repeat = int(math.ceil(max_count / c_len))
|
157 |
-
uc_repeat = int(math.ceil(max_count / uc_len))
|
158 |
-
max_chunk = max(len(c), len(uc))
|
159 |
-
|
160 |
-
c = torch.cat([c] * c_repeat, dim=0)[:max_chunk]
|
161 |
-
uc = torch.cat([uc] * uc_repeat, dim=0)[:max_chunk]
|
162 |
-
|
163 |
-
c = torch.cat([p[None, ...] for p in c], dim=1)
|
164 |
-
uc = torch.cat([p[None, ...] for p in uc], dim=1)
|
165 |
-
|
166 |
-
return c, uc
|
167 |
-
|
168 |
-
@torch.inference_mode()
|
169 |
-
def pytorch2numpy(imgs, quant=True):
|
170 |
-
results = []
|
171 |
-
for x in imgs:
|
172 |
-
y = x.movedim(0, -1)
|
173 |
-
|
174 |
-
if quant:
|
175 |
-
y = y * 127.5 + 127.5
|
176 |
-
y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
|
177 |
else:
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
)
|
365 |
-
return response.choices[0].message.content.strip()
|
366 |
-
|
367 |
-
quick_prompts = [
|
368 |
-
'sunshine from window',
|
369 |
-
'neon light, city',
|
370 |
-
'sunset over sea',
|
371 |
-
'golden time',
|
372 |
-
'sci-fi RGB glowing, cyberpunk',
|
373 |
-
'natural lighting',
|
374 |
-
'warm atmosphere, at home, bedroom',
|
375 |
-
'magic lit',
|
376 |
-
'evil, gothic, Yharnam',
|
377 |
-
'light and shadow',
|
378 |
-
'shadow from window',
|
379 |
-
'soft studio lighting',
|
380 |
-
'home atmosphere, cozy bedroom illumination',
|
381 |
-
'neon, Wong Kar-wai, warm'
|
382 |
-
]
|
383 |
-
quick_prompts = [[x] for x in quick_prompts]
|
384 |
-
|
385 |
-
quick_subjects = [
|
386 |
-
'beautiful woman, detailed face',
|
387 |
-
'handsome man, detailed face',
|
388 |
-
]
|
389 |
-
quick_subjects = [[x] for x in quick_subjects]
|
390 |
-
|
391 |
-
class BGSource(Enum):
|
392 |
-
NONE = "None"
|
393 |
-
LEFT = "Left Light"
|
394 |
-
RIGHT = "Right Light"
|
395 |
-
TOP = "Top Light"
|
396 |
-
BOTTOM = "Bottom Light"
|
397 |
-
|
398 |
-
block = gr.Blocks().queue()
|
399 |
-
with block:
|
400 |
-
with gr.Row():
|
401 |
-
gr.Markdown("## IC-Light (Relighting with Foreground Condition)")
|
402 |
-
with gr.Row():
|
403 |
-
gr.Markdown("See also https://github.com/lllyasviel/IC-Light for background-conditioned model and normal estimation")
|
404 |
-
with gr.Row():
|
405 |
-
with gr.Column():
|
406 |
-
with gr.Row():
|
407 |
-
input_fg = gr.Image(sources='upload', type="numpy", label="Image", height=480)
|
408 |
-
output_bg = gr.Image(type="numpy", label="Preprocessed Foreground", height=480, visible=False) # UI์์ ๋ณด์ด์ง ์๋๋ก ์ค์
|
409 |
-
prompt = gr.Textbox(label="Prompt")
|
410 |
-
bg_source = gr.Radio(choices=[e.value for e in BGSource],
|
411 |
-
value=BGSource.NONE.value,
|
412 |
-
label="Lighting Preference (Initial Latent)", type='value')
|
413 |
-
example_quick_subjects = gr.Dataset(samples=quick_subjects, label='Subject Quick List', samples_per_page=1000, components=[prompt])
|
414 |
-
example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Lighting Quick List', samples_per_page=1000, components=[prompt])
|
415 |
-
relight_button = gr.Button(value="Relight")
|
416 |
-
|
417 |
-
with gr.Column():
|
418 |
-
result_gallery = gr.Gallery(height=832, object_fit='contain', label='Outputs')
|
419 |
-
download_button = gr.File(label="Download Processed Image")
|
420 |
|
421 |
-
|
422 |
-
relight_button.click(fn=process_relight, inputs=ips, outputs=[output_bg, result_gallery, download_button])
|
423 |
-
example_quick_prompts.click(lambda x, y: ', '.join(y.split(', ')[:2] + [x[0]]), inputs=[example_quick_prompts, prompt], outputs=prompt, show_progress=False, queue=False)
|
424 |
-
example_quick_subjects.click(lambda x: x[0], inputs=example_quick_subjects, outputs=prompt, show_progress=False, queue=False)
|
425 |
|
426 |
-
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import requests
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from requests.adapters import HTTPAdapter
|
5 |
+
from requests.packages.urllib3.util.retry import Retry
|
6 |
+
import re
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
10 |
from huggingface_hub import InferenceClient
|
11 |
|
12 |
+
def setup_session():
|
13 |
+
try:
|
14 |
+
session = requests.Session()
|
15 |
+
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
|
16 |
+
session.mount('https://', HTTPAdapter(max_retries=retries))
|
17 |
+
return session
|
18 |
+
except Exception as e:
|
19 |
+
return None
|
20 |
+
|
21 |
+
def generate_naver_search_url(query):
|
22 |
+
base_url = "https://search.naver.com/search.naver?"
|
23 |
+
params = {"ssc": "tab.blog.all", "sm": "tab_jum", "query": query}
|
24 |
+
url = base_url + "&".join(f"{key}={value}" for key, value in params.items())
|
25 |
+
return url
|
26 |
+
|
27 |
+
def crawl_blog_content(url, session):
|
28 |
+
try:
|
29 |
+
headers = {
|
30 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
31 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
|
32 |
+
"Accept-Language": "ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7",
|
33 |
+
"Accept-Encoding": "gzip, deflate, br",
|
34 |
+
"Connection": "keep-alive",
|
35 |
+
"Referer": "https://search.naver.com/search.naver",
|
36 |
+
}
|
37 |
+
|
38 |
+
# ๋๋ค ๋๋ ์ด ์ถ๊ฐ
|
39 |
+
delay = random.uniform(1, 2)
|
40 |
+
time.sleep(delay)
|
41 |
+
|
42 |
+
response = session.get(url, headers=headers)
|
43 |
+
if response.status_code != 200:
|
44 |
+
return ""
|
45 |
+
|
46 |
+
soup = BeautifulSoup(response.content, "html.parser")
|
47 |
+
content = soup.find("div", attrs={'class': 'se-main-container'})
|
48 |
+
|
49 |
+
if content:
|
50 |
+
return clean_text(content.get_text())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
else:
|
52 |
+
return ""
|
53 |
+
except Exception as e:
|
54 |
+
return ""
|
55 |
+
|
56 |
+
def crawl_naver_search_results(url, session):
|
57 |
+
try:
|
58 |
+
headers = {
|
59 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
60 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
|
61 |
+
"Accept-Language": "ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7",
|
62 |
+
"Accept-Encoding": "gzip, deflate, br",
|
63 |
+
"Connection": "keep-alive",
|
64 |
+
"Referer": "https://search.naver.com/search.naver",
|
65 |
+
}
|
66 |
+
response = session.get(url, headers=headers)
|
67 |
+
if response.status_code != 200:
|
68 |
+
return []
|
69 |
+
|
70 |
+
soup = BeautifulSoup(response.content, "html.parser")
|
71 |
+
results = []
|
72 |
+
count = 0
|
73 |
+
for li in soup.find_all("li", class_=re.compile("bx.*")):
|
74 |
+
if count >= 10:
|
75 |
+
break
|
76 |
+
for div in li.find_all("div", class_="detail_box"):
|
77 |
+
for div2 in div.find_all("div", class_="title_area"):
|
78 |
+
title = div2.text.strip()
|
79 |
+
for a in div2.find_all("a", href=True):
|
80 |
+
link = a["href"]
|
81 |
+
if "blog.naver" in link:
|
82 |
+
link = link.replace("https://", "https://m.")
|
83 |
+
results.append({"์ ๋ชฉ": title, "๋งํฌ": link})
|
84 |
+
count += 1
|
85 |
+
if count >= 10:
|
86 |
+
break
|
87 |
+
if count >= 10:
|
88 |
+
break
|
89 |
+
if count >= 10:
|
90 |
+
break
|
91 |
+
|
92 |
+
return results
|
93 |
+
except Exception as e:
|
94 |
+
return []
|
95 |
+
|
96 |
+
def clean_text(text):
|
97 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
98 |
+
return text
|
99 |
+
|
100 |
+
def create_client(model_name):
|
101 |
+
return InferenceClient(model_name, token=os.getenv("HF_TOKEN"))
|
102 |
+
|
103 |
+
client = create_client("CohereForAI/c4ai-command-r-plus")
|
104 |
+
|
105 |
+
def call_api(content, system_message, max_tokens, temperature, top_p):
|
106 |
+
messages = [{"role": "system", "content": system_message}, {"role": "user", "content": content}]
|
107 |
+
random_seed = random.randint(0, 1000000)
|
108 |
+
response = client.chat_completion(messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, seed=random_seed)
|
109 |
+
modified_text = response.choices[0].message.content
|
110 |
+
input_tokens = response.usage.prompt_tokens
|
111 |
+
output_tokens = response.usage.completion_tokens
|
112 |
+
total_tokens = response.usage.total_tokens
|
113 |
+
return modified_text, input_tokens, output_tokens, total_tokens
|
114 |
+
|
115 |
+
def analyze_info(category, topic, references1, references2, references3):
|
116 |
+
return f"์ ํํ ์นดํ
๊ณ ๋ฆฌ: {category}\n๋ธ๋ก๊ทธ ์ฃผ์ : {topic}\n์ฐธ๊ณ ๊ธ1: {references1}\n์ฐธ๊ณ ๊ธ2: {references2}\n์ฐธ๊ณ ๊ธ3: {references3}"
|
117 |
+
|
118 |
+
def suggest_title(category, topic, references1, references2, references3, system_message, max_tokens, temperature, top_p):
|
119 |
+
full_content = analyze_info(category, topic, references1, references2, references3)
|
120 |
+
modified_text, input_tokens, output_tokens, total_tokens = call_api(full_content, system_message, max_tokens, temperature, top_p)
|
121 |
+
token_usage_message = f"[์
๋ ฅ ํ ํฐ์: {input_tokens}]\n[์ถ๋ ฅ ํ ํฐ์: {output_tokens}]\n[์ด ํ ํฐ์: {total_tokens}]"
|
122 |
+
return modified_text, token_usage_message
|
123 |
+
|
124 |
+
def generate_outline(category, topic, references1, references2, references3, title, system_message, max_tokens, temperature, top_p):
|
125 |
+
full_content = analyze_info(category, topic, references1, references2, references3)
|
126 |
+
content = f"{full_content}\nTitle: {title}"
|
127 |
+
modified_text, input_tokens, output_tokens, total_tokens = call_api(content, system_message, max_tokens, temperature, top_p)
|
128 |
+
token_usage_message = f"[์
๋ ฅ ํ ํฐ์: {input_tokens}]\n[์ถ๋ ฅ ํ ํฐ์: {output_tokens}]\n[์ด ํ ํฐ์: {total_tokens}]"
|
129 |
+
return modified_text, token_usage_message
|
130 |
+
|
131 |
+
def generate_blog_post(category, topic, references1, references2, references3, title, outline, system_message, max_tokens, temperature, top_p):
|
132 |
+
full_content = analyze_info(category, topic, references1, references2, references3)
|
133 |
+
content = f"{full_content}\nTitle: {title}\nOutline: {outline}"
|
134 |
+
modified_text, input_tokens, output_tokens, total_tokens = call_api(content, system_message, max_tokens, temperature, top_p)
|
135 |
+
formatted_text = modified_text.replace('\n', '\n\n')
|
136 |
+
token_usage_message = f"[์
๋ ฅ ํ ํฐ์: {input_tokens}]\n[์ถ๋ ฅ ํ ํฐ์: {output_tokens}]\n[์ด ํ ํฐ์: {total_tokens}]"
|
137 |
+
return formatted_text, token_usage_message
|
138 |
+
|
139 |
+
def fetch_references(topic):
|
140 |
+
search_url = generate_naver_search_url(topic)
|
141 |
+
session = setup_session()
|
142 |
+
if session is None:
|
143 |
+
return "Failed to set up session.", "", "", ""
|
144 |
+
results = crawl_naver_search_results(search_url, session)
|
145 |
+
if not results:
|
146 |
+
return "No results found.", "", "", ""
|
147 |
+
|
148 |
+
selected_results = random.sample(results, 3)
|
149 |
+
references1_content = f"์ ๋ชฉ: {selected_results[0]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[0]['๋งํฌ'], session)}"
|
150 |
+
references2_content = f"์ ๋ชฉ: {selected_results[1]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[1]['๋งํฌ'], session)}"
|
151 |
+
references3_content = f"์ ๋ชฉ: {selected_results[2]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[2]['๋งํฌ'], session)}"
|
152 |
+
|
153 |
+
return "์ฐธ๊ณ ๊ธ ์์ฑ ์๋ฃ", references1_content, references2_content, references3_content
|
154 |
+
|
155 |
+
def fetch_references_and_generate_all_steps(category, topic, blog_title, system_message_outline, max_tokens_outline, temperature_outline, top_p_outline, system_message_blog_post, max_tokens_blog_post, temperature_blog_post, top_p_blog_post):
|
156 |
+
search_url = generate_naver_search_url(topic)
|
157 |
+
session = setup_session()
|
158 |
+
if session is None:
|
159 |
+
return "", "", "", "", "", "", "", "", "", ""
|
160 |
+
|
161 |
+
results = crawl_naver_search_results(search_url, session)
|
162 |
+
if not results:
|
163 |
+
return "", "", "", "", "", "", "", "", "", ""
|
164 |
+
|
165 |
+
selected_results = random.sample(results, 3)
|
166 |
+
references1_content = f"์ ๋ชฉ: {selected_results[0]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[0]['๋งํฌ'], session)}"
|
167 |
+
references2_content = f"์ ๋ชฉ: {selected_results[1]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[1]['๋งํฌ'], session)}"
|
168 |
+
references3_content = f"์ ๋ชฉ: {selected_results[2]['์ ๋ชฉ']}\n๋ด์ฉ: {crawl_blog_content(selected_results[2]['๋งํฌ'], session)}"
|
169 |
+
|
170 |
+
# ์์๋ผ์ธ ์์ฑ
|
171 |
+
outline_result, outline_token_usage = generate_outline(category, topic, references1_content, references2_content, references3_content, blog_title, system_message_outline, max_tokens_outline, temperature_outline, top_p_outline)
|
172 |
+
|
173 |
+
# ๋ธ๋ก๊ทธ ๊ธ ์์ฑ
|
174 |
+
blog_post_result, blog_post_token_usage = generate_blog_post(category, topic, references1_content, references2_content, references3_content, blog_title, outline_result, system_message_blog_post, max_tokens_blog_post, temperature_blog_post, top_p_blog_post)
|
175 |
+
|
176 |
+
return references1_content, references2_content, references3_content, outline_result, outline_token_usage, blog_post_result, blog_post_token_usage
|
177 |
+
|
178 |
+
def get_title_prompt(category):
|
179 |
+
if (category == "์ผ๋ฐ"):
|
180 |
+
return """
|
181 |
+
# ๋ธ๋ก๊ทธ ์ ๋ชฉ ์์ฑ ๊ท์น(์ผ๋ฐ)
|
182 |
+
"""
|
183 |
+
elif (category == "๊ฑด๊ฐ์ ๋ณด"):
|
184 |
+
return """
|
185 |
+
# ๋ธ๋ก๊ทธ ์ ๋ชฉ ์์ฑ ๊ท์น(๊ฑด๊ฐ์ ๋ณด)
|
186 |
+
"""
|
187 |
+
|
188 |
+
def get_outline_prompt(category):
|
189 |
+
if (category == "์ผ๋ฐ"):
|
190 |
+
return """
|
191 |
+
# ๋ธ๋ก๊ทธ ์์ฃผ์ (Subtopic) ์์ฑ ๊ท์น(์ผ๋ฐ)
|
192 |
+
"""
|
193 |
+
elif (category == "๊ฑด๊ฐ์ ๋ณด"):
|
194 |
+
return """
|
195 |
+
# ๋ธ๋ก๊ทธ ์์ฃผ์ (Subtopic) ์์ฑ ๊ท์น(๊ฑด๊ฐ์ ๋ณด)
|
196 |
+
"""
|
197 |
+
|
198 |
+
def get_blog_post_prompt(category):
|
199 |
+
if (category == "์ผ๋ฐ"):
|
200 |
+
return """
|
201 |
+
# ๋ธ๋ก๊ทธ ํ
์คํธ ์์ฑ ๊ท์น(์ผ๋ฐ)
|
202 |
+
"""
|
203 |
+
elif (category == "๊ฑด๊ฐ์ ๋ณด"):
|
204 |
+
return """
|
205 |
+
# ๋ธ๋ก๊ทธ ํ
์คํธ ์์ฑ ๊ท์น(๊ฑด๊ฐ์ ๋ณด)
|
206 |
+
"""
|
207 |
+
|
208 |
+
# Gradio ์ธํฐํ์ด์ค ๊ตฌ์ฑ
|
209 |
+
title = "์ ๋ณด์ฑ ํฌ์คํ
์๋์์ฑ๊ธฐ(์ ๋ชฉ์ถ์ฒ ํ ์๋)"
|
210 |
+
|
211 |
+
def update_prompts(category):
|
212 |
+
title_prompt = get_title_prompt(category)
|
213 |
+
outline_prompt = get_outline_prompt(category)
|
214 |
+
blog_post_prompt = get_blog_post_prompt(category)
|
215 |
+
return title_prompt, outline_prompt, blog_post_prompt
|
216 |
+
|
217 |
+
with gr.Blocks() as demo:
|
218 |
+
gr.Markdown(f"# {title}")
|
219 |
+
|
220 |
+
# 1๋จ๊ณ
|
221 |
+
gr.Markdown("### 1๋จ๊ณ : ํฌ์คํ
์นดํ
๊ณ ๋ฆฌ๋ฅผ ์ง์ ํด์ฃผ์ธ์")
|
222 |
+
category = gr.Radio(choices=["์ผ๋ฐ", "๊ฑด๊ฐ์ ๋ณด"], label="ํฌ์คํ
์นดํ
๊ณ ๋ฆฌ", value="์ผ๋ฐ")
|
223 |
+
|
224 |
+
# 2๋จ๊ณ
|
225 |
+
gr.Markdown("### 2๋จ๊ณ : ๋ธ๋ก๊ทธ ์ฃผ์ , ๋๋ ํค์๋๋ฅผ ์์ธํ ์
๋ ฅํ์ธ์")
|
226 |
+
topic = gr.Textbox(label="๋ธ๋ก๊ทธ ์ฃผ์ (์์: ์ค์ง์ด ๋ฌด์นจํ(X), ์ค์ง์ด ๋ฌด์นจํ ๋ ์ํผ(O))", placeholder="์์: ์ฌํ์ง ์ถ์ฒ(X), 8์ ๊ตญ๋ด ์ฌํ์ง ์ถ์ฒ(O)")
|
227 |
+
|
228 |
+
# 3๋จ๊ณ: ์ฐธ๊ณ ๊ธ์ ์ํ ๋ณ์๋ค ๋ฏธ๋ฆฌ ์ ์
|
229 |
+
references1 = gr.Textbox(label="์ฐธ๊ณ ๊ธ 1", placeholder="์ฐธ๊ณ ํ ๋ธ๋ก๊ทธ ํฌ์คํ
๊ธ์ ๋ณต์ฌํ์ฌ ๋ถ์ฌ๋ฃ์ผ์ธ์", lines=10, visible=False)
|
230 |
+
references2 = gr.Textbox(label="์ฐธ๊ณ ๊ธ 2", placeholder="์ฐธ๊ณ ํ ๋ธ๋ก๊ทธ ํฌ์คํ
๊ธ์ ๋ณต์ฌํ์ฌ ๋ถ์ฌ๋ฃ์ผ์ธ์", lines=10, visible=False)
|
231 |
+
references3 = gr.Textbox(label="์ฐธ๊ณ ๊ธ 3", placeholder="์ฐธ๊ณ ํ ๋ธ๋ก๊ทธ ํฌ์คํ
๊ธ์ ๋ณต์ฌํ์ฌ ๋ถ์ฌ๋ฃ์ผ์ธ์", lines=10, visible=False)
|
232 |
+
|
233 |
+
# ์ ๋ชฉ ์ถ์ฒ
|
234 |
+
gr.Markdown("### 4๋จ๊ณ : ์ ๋ชฉ ์ถ์ฒํ๊ธฐ")
|
235 |
+
|
236 |
+
with gr.Accordion("์ ๋ชฉ ์ค์ ", open=True):
|
237 |
+
title_system_message = gr.Textbox(label="์์คํ
๋ฉ์์ง", value=get_title_prompt("์ผ๋ฐ"), lines=15)
|
238 |
+
title_max_tokens = gr.Slider(label="Max Tokens", minimum=1000, maximum=8000, value=5000, step=1000)
|
239 |
+
title_temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.8, step=0.1)
|
240 |
+
title_top_p = gr.Slider(label="Top P", minimum=0.1, maximum=1.0, value=0.95, step=0.05)
|
241 |
+
|
242 |
+
title_suggestions = gr.Textbox(label="์ ๋ชฉ ์ถ์ฒ", lines=10)
|
243 |
+
title_token_output = gr.Markdown(label="์ฌ์ฉ๋ ํ ํฐ ์")
|
244 |
+
|
245 |
+
# ์ ๋ชฉ ์ถ์ฒ ๋ฒํผ
|
246 |
+
title_btn = gr.Button("์ ๋ชฉ ์ถ์ฒํ๊ธฐ")
|
247 |
+
title_btn.click(fn=suggest_title, inputs=[category, topic, references1, references2, references3, title_system_message, title_max_tokens, title_temperature, title_top_p], outputs=[title_suggestions, title_token_output])
|
248 |
+
|
249 |
+
blog_title = gr.Textbox(label="๋ธ๋ก๊ทธ ์ ๋ชฉ", placeholder="๋ธ๋ก๊ทธ ์ ๋ชฉ์ ์
๋ ฅํด์ฃผ์ธ์")
|
250 |
+
|
251 |
+
# ๋ธ๋ก๊ทธ ๊ธ ์์ฑ
|
252 |
+
gr.Markdown("### 5๋จ๊ณ : ๋ธ๋ก๊ทธ ๊ธ ์์ฑํ๊ธฐ")
|
253 |
+
gr.HTML("<span style='color: grey;'>[๋ธ๋ก๊ทธ ๊ธ ์์ฑํ๊ธฐ ๋ฒํผ์ ํด๋ฆญํ๋ฉด ์์๋ผ์ธ ์์ฑ ๋ฐ ๋ธ๋ก๊ทธ ๊ธ ์์ฑ์ด ์๋์ผ๋ก ์งํ๋ฉ๋๋ค.]</span>")
|
254 |
+
|
255 |
+
with gr.Accordion("๋ธ๋ก๊ทธ ๊ธ ์ค์ ", open=True):
|
256 |
+
outline_system_message = gr.Textbox(label="์์คํ
๋ฉ์์ง", value=get_outline_prompt("์ผ๋ฐ"), lines=20)
|
257 |
+
outline_max_tokens = gr.Slider(label="Max Tokens", minimum=1000, maximum=8000, value=6000, step=1000)
|
258 |
+
outline_temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.8, step=0.1)
|
259 |
+
outline_top_p = gr.Slider(label="Top P", minimum=0.1, maximum=1.0, value=0.95, step=0.05)
|
260 |
+
|
261 |
+
blog_system_message = gr.Textbox(label="์์คํ
๋ฉ์์ง", value=get_blog_post_prompt("์ผ๋ฐ"), lines=20)
|
262 |
+
blog_max_tokens = gr.Slider(label="Max Tokens", minimum=1000, maximum=12000, value=8000, step=1000)
|
263 |
+
blog_temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.8, step=0.1)
|
264 |
+
blog_top_p = gr.Slider(label="Top P", minimum=0.1, maximum=1.0, value=0.95, step=0.05)
|
265 |
+
|
266 |
+
outline_result = gr.Textbox(label="์์๋ผ์ธ ๊ฒฐ๊ณผ", lines=15, visible=False)
|
267 |
+
outline_token_output = gr.Markdown(label="์ฌ์ฉ๋ ํ ํฐ ์", visible=False)
|
268 |
+
output = gr.Textbox(label="์์ฑ๋ ๋ธ๋ก๊ทธ ๊ธ", lines=30)
|
269 |
+
token_output = gr.Markdown(label="์ฌ์ฉ๋ ํ ํฐ ์")
|
270 |
+
|
271 |
+
# ๋ธ๋ก๊ทธ ๊ธ ์์ฑ ๋ฒํผ
|
272 |
+
generate_post_btn = gr.Button("๋ธ๋ก๊ทธ ๊ธ ์์ฑํ๊ธฐ")
|
273 |
+
generate_post_btn.click(
|
274 |
+
fn=fetch_references_and_generate_all_steps,
|
275 |
+
inputs=[category, topic, blog_title, outline_system_message, outline_max_tokens, outline_temperature, outline_top_p, blog_system_message, blog_max_tokens, blog_temperature, blog_top_p],
|
276 |
+
outputs=[references1, references2, references3, outline_result, outline_token_output, output, token_output]
|
277 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
279 |
+
category.change(fn=update_prompts, inputs=category, outputs=[title_system_message, outline_system_message, blog_system_message])
|
|
|
|
|
|
|
280 |
|
281 |
+
demo.launch()
|