File size: 1,608 Bytes
b30eda1 8391c09 a673a02 b30eda1 0789c58 b30eda1 a673a02 0789c58 80986a7 0789c58 80986a7 0789c58 8391c09 0789c58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
# CPU ๊ฐ์ ์ค์
device = "cpu"
torch.set_num_threads(4) # CPU ์ค๋ ๋ ์ ํ
# ๊ฒฝ๋ํ๋ ๋ชจ๋ธ ๋ก๋
model = AutoModelForCausalLM.from_pretrained(
"naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B",
torch_dtype=torch.float32, # CPU๋ float32 ๊ถ์ฅ
low_cpu_mem_usage=True
).to(device)
tokenizer = AutoTokenizer.from_pretrained(
"naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B"
)
def predict(message, history):
# ๋ฉ๋ชจ๋ฆฌ ์ ์ฝ์ ์ํ ๊ฐ์ํ๋ ์ฑํ
๊ตฌ์ฑ
chat = [
{"role": "system", "content": "๊ฐ๊ฒฐํ๊ฒ ๋ต๋ณํด์ฃผ์ธ์."},
{"role": "user", "content": message}
]
# CPU ์ต์ ํ ์ค์
inputs = tokenizer.apply_chat_template(
chat,
return_tensors="pt",
max_length=512, # ๊ธธ์ด ์ ํ
truncation=True
).to(device)
outputs = model.generate(
inputs,
max_new_tokens=200, # ์งง์ ์๋ต
temperature=0.3, # ์ฐฝ์์ฑ ๊ฐ์
do_sample=False #็กฎๅฎๆง ์๋ต
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# ๊ฒฝ๋ํ๋ ์ธํฐํ์ด์ค
demo = gr.ChatInterface(
predict,
title="CLOVA X (CPU ๋ชจ๋)",
description="CPU ์ ์ฉ ๊ฒฝ๋ํ ๋ฒ์ ",
theme="soft",
examples=["์๋
ํ์ธ์", "๋ ์จ ์๋ ค์ค"]
)
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
favicon_path=None,
prevent_thread_lock=True
) |