File size: 807 Bytes
c2d246d
 
 
 
5fd90b5
 
 
 
 
 
 
 
 
 
c2d246d
 
 
5fd90b5
 
c2d246d
 
 
5fd90b5
 
 
 
c2d246d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
import torch

# Используем более лёгкую модель
model_name = "Qwen/Qwen-1_8B"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",
    torch_dtype=torch.float16,
    trust_remote_code=True
)

def respond(message):
    inputs = tokenizer(message, return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=150)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

gr.Interface(
    fn=respond,
    inputs="text",
    outputs="text",
    title="Qwen Прокси для Janotaro.ai",
    description="Работает на Qwen-1_8B"
).launch()