|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import gradio as gr |
|
import torch |
|
|
|
|
|
model_name = "Qwen/Qwen-1_8B" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
device_map="auto", |
|
torch_dtype=torch.float16, |
|
trust_remote_code=True |
|
) |
|
|
|
def respond(message): |
|
inputs = tokenizer(message, return_tensors="pt").to(model.device) |
|
outputs = model.generate(**inputs, max_new_tokens=150) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
gr.Interface( |
|
fn=respond, |
|
inputs="text", |
|
outputs="text", |
|
title="Qwen Прокси для Janotaro.ai", |
|
description="Работает на Qwen-1_8B" |
|
).launch() |