File size: 697 Bytes
f7bb0ae 5309e7d 79d8103 e24e14f f7bb0ae e24e14f 79d8103 f7bb0ae 5309e7d f7bb0ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
def predict_code(input):
tokenizer = AutoTokenizer.from_pretrained('JetBrains/Mellum-4b-base')
model = AutoModelForCausalLM.from_pretrained('JetBrains/Mellum-4b-base')
encoded_input = tokenizer(input, return_tensors='pt', return_token_type_ids=False)
input_len = len(encoded_input["input_ids"][0])
out = model.generate(
**encoded_input,
max_new_tokens=100,
)
prediction = tokenizer.decode(out[0][input_len:])
return prediction
def run(input):
return predict_code(input)
app = gr.Interface(
fn=run,
inputs=["text"],
outputs=["text"]
)
app.launch()
|