File size: 3,239 Bytes
f00ccef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# app.py
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
from threading import Thread
import gradio as gr
import re
import torch

# load model and tokenizer
model_name = "inclusionAI/Ling-lite-1.5"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto",
    trust_remote_code=True
).eval()

# define chat function
def chat(user_input, max_new_tokens=2048):
    # chat history
    messages = [
        {"role": "system", "content": "You are Ling, an assistant created by inclusionAI"},
        {"role": "user", "content": user_input}
    ]
    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    
    # encode the input prompt
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    #create streamer
    streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)

    def generate():
        model.generate(**inputs, max_new_tokens=max_new_tokens, streamer=streamer)

    thread = Thread(target=generate)
    thread.start()

    start_idx = len("SYSTEM") + len(messages[0]["content"]) + len("HUMAN") + len(user_input) + len("ASSISTANT")
    generated_text = ""
    for new_text in streamer:
        generated_text += new_text
        yield generated_text[start_idx:]

    thread.join()

# Create a custom layout using Blocks
with gr.Blocks(css="""
    #markdown-output {
        height: 300px;
        overflow-y: auto;
        border: 1px solid #ddd;
        padding: 10px;
    }
""") as demo:
    gr.Markdown(
        "## Ling-lite-1.5 AI Assistant\n"
        "Based on [inclusionAI/Ling-lite-1.5](https://huggingface.co/inclusionAI/Ling-lite-1.5) "
    )

    with gr.Row():
        max_tokens_slider = gr.Slider(minimum=128, maximum=2048, step=16, label="Generated length")

#    output_box = gr.Textbox(lines=10, label="Response")
    output_box = gr.Markdown(label="Response", elem_id="markdown-output")
    input_box = gr.Textbox(lines=8, label="Input you question")

    examples = gr.Examples(
        examples=[
            ["Introducing the basic concepts of large language models"],
            ["How to solve long context dependencies in math problems?"]
        ],
        inputs=input_box
    )

    interface = gr.Interface(
        fn=chat,
        inputs=[input_box, max_tokens_slider],
        outputs=output_box,
        live=False  # disable auto-triggering on input change
    )

# launch Gradio Service
demo.queue()
demo.launch()

# Construct Gradio Interface
#interface = gr.Interface(
#    fn=chat,
#    inputs=[
#        gr.Textbox(lines=8, label="输入你的问题"),
#        gr.Slider(minimum=100, maximum=102400, step=50, label="生成长度")
#    ],
#    outputs=[
#        gr.Textbox(lines=8, label="模型回复")
#    ],
#    title="Ling-lite-1.5 AI助手",
#    description="基于 [inclusionAI/Ling-lite-1.5](https://huggingface.co/inclusionAI/Ling-lite-1.5)  的对话式文本生成演示。",
#    examples=[
#        ["介绍大型语言模型的基本概念"],
#        ["如何解决数学问题中的长上下文依赖?"]
#    ]
#)

# launch Gradion Service
#interface.launch()