Spaces:
Running
on
Zero
Running
on
Zero
update
Browse files
README.md
CHANGED
@@ -11,24 +11,29 @@ license: mit
|
|
11 |
short_description: Chinese input method accelerator
|
12 |
---
|
13 |
|
14 |
-
#
|
15 |
|
16 |
## 一、專案概述
|
17 |
本示範結合多種小型中文語言模型,並透過 Hugging Face 的 **ZeroGPU**(H200)即時執行文字生成,模擬中文輸入法中的候選詞建議功能。
|
18 |
|
19 |
## 二、主要功能
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
- **M(建議數量)**:控制同時產生的候選建議數量。
|
25 |
-
4. **使用 GPU 生成建議**:點擊後將在 H200 上啟動推理,並自動釋放資源。
|
26 |
-
5. **建議清單**:點選任一候選,該文字片段即會自動附加至輸入區。
|
27 |
|
28 |
## 三、運作原理
|
29 |
-
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
## 四、部署步驟
|
34 |
1. 在 Hugging Face Spaces 建立新 Space,框架選 **Gradio SDK**。
|
|
|
11 |
short_description: Chinese input method accelerator
|
12 |
---
|
13 |
|
14 |
+
# 台灣中文輸入法加速器(ZeroGPU + Gradio v5)
|
15 |
|
16 |
## 一、專案概述
|
17 |
本示範結合多種小型中文語言模型,並透過 Hugging Face 的 **ZeroGPU**(H200)即時執行文字生成,模擬中文輸入法中的候選詞建議功能。
|
18 |
|
19 |
## 二、主要功能
|
20 |
+
…
|
21 |
+
4. **使用 GPU 生成建議**:
|
22 |
+
- 採用 **Beam Search**(`num_beams=M`)同時產出 M 條最可能的候選下段,並在 H200 上執行推理。
|
23 |
+
…
|
|
|
|
|
|
|
24 |
|
25 |
## 三、運作原理
|
26 |
+
- 點擊「使用 GPU 生成建議」時,函式會以 **Beam Search** 模式呼叫模型:
|
27 |
+
```python
|
28 |
+
outs = gen_pipe(
|
29 |
+
text,
|
30 |
+
max_new_tokens=K,
|
31 |
+
num_beams=M,
|
32 |
+
num_return_sequences=M,
|
33 |
+
do_sample=False,
|
34 |
+
early_stopping=True
|
35 |
+
)
|
36 |
+
|
37 |
|
38 |
## 四、部署步驟
|
39 |
1. 在 Hugging Face Spaces 建立新 Space,框架選 **Gradio SDK**。
|
app.py
CHANGED
@@ -4,6 +4,7 @@ import gradio as gr
|
|
4 |
from functools import lru_cache
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
6 |
|
|
|
7 |
MODEL_LIST = [
|
8 |
"ckiplab/gpt2-tiny-chinese",
|
9 |
"ckiplab/gpt2-base-chinese",
|
@@ -17,31 +18,50 @@ MODEL_LIST = [
|
|
17 |
@lru_cache(maxsize=None)
|
18 |
def get_pipeline(model_name):
|
19 |
tok = AutoTokenizer.from_pretrained(model_name)
|
20 |
-
# By setting weights_only=False we bypass the torch.load(weights_only=True)
|
21 |
-
# path that is disallowed for torch<2.6 due to CVE-2025-32434 :contentReference[oaicite:1]{index=1}.
|
22 |
mdl = AutoModelForCausalLM.from_pretrained(model_name, weights_only=False)
|
23 |
mdl.to("cuda")
|
24 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
25 |
|
26 |
@spaces.GPU
|
27 |
def suggest_next(text, model_name, k, m):
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
)
|
|
|
31 |
return [out["generated_text"][len(text):] for out in outs]
|
32 |
|
33 |
def append_suggestion(current, choice):
|
34 |
return current + choice
|
35 |
|
36 |
with gr.Blocks() as demo:
|
37 |
-
gr.Markdown(
|
|
|
|
|
|
|
38 |
|
39 |
-
input_text = gr.TextArea(
|
|
|
|
|
40 |
|
41 |
with gr.Row():
|
42 |
-
model_selector = gr.Dropdown(
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
suggestions = gr.Dropdown([], label="建議清單", interactive=True)
|
47 |
gpu_button = gr.Button("使用 GPU 生成建議")
|
|
|
4 |
from functools import lru_cache
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
6 |
|
7 |
+
# 可選模型列表
|
8 |
MODEL_LIST = [
|
9 |
"ckiplab/gpt2-tiny-chinese",
|
10 |
"ckiplab/gpt2-base-chinese",
|
|
|
18 |
@lru_cache(maxsize=None)
|
19 |
def get_pipeline(model_name):
|
20 |
tok = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
|
21 |
mdl = AutoModelForCausalLM.from_pretrained(model_name, weights_only=False)
|
22 |
mdl.to("cuda")
|
23 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
24 |
|
25 |
@spaces.GPU
|
26 |
def suggest_next(text, model_name, k, m):
|
27 |
+
"""
|
28 |
+
使用 Beam Search 產生 M 條最可能的下段建議(每條最多 K 個新詞元)。
|
29 |
+
"""
|
30 |
+
gen_pipe = get_pipeline(model_name)
|
31 |
+
outs = gen_pipe(
|
32 |
+
text,
|
33 |
+
max_new_tokens=k,
|
34 |
+
num_beams=m,
|
35 |
+
num_return_sequences=m,
|
36 |
+
do_sample=False,
|
37 |
+
early_stopping=True
|
38 |
)
|
39 |
+
# 只取掉 prompt 的部份
|
40 |
return [out["generated_text"][len(text):] for out in outs]
|
41 |
|
42 |
def append_suggestion(current, choice):
|
43 |
return current + choice
|
44 |
|
45 |
with gr.Blocks() as demo:
|
46 |
+
gr.Markdown(
|
47 |
+
"## 🇹🇼 台灣中文下段預測\n"
|
48 |
+
"結合小型語言模型與 ZeroGPU,提供 Beam Search 風格的多條下段建議。"
|
49 |
+
)
|
50 |
|
51 |
+
input_text = gr.TextArea(
|
52 |
+
label="輸入文字", lines=4, placeholder="請在此輸入起始片段…"
|
53 |
+
)
|
54 |
|
55 |
with gr.Row():
|
56 |
+
model_selector = gr.Dropdown(
|
57 |
+
MODEL_LIST, value=MODEL_LIST[0], label="選擇模型"
|
58 |
+
)
|
59 |
+
k_slider = gr.Slider(
|
60 |
+
minimum=1, maximum=50, step=1, value=5, label="K(最大新生成詞元)"
|
61 |
+
)
|
62 |
+
m_slider = gr.Slider(
|
63 |
+
minimum=1, maximum=10, step=1, value=5, label="M(建議數量 / Beam 數)"
|
64 |
+
)
|
65 |
|
66 |
suggestions = gr.Dropdown([], label="建議清單", interactive=True)
|
67 |
gpu_button = gr.Button("使用 GPU 生成建議")
|