File size: 486 Bytes
12c5979
f368ee1
12c5979
9a2cc54
12c5979
f368ee1
 
 
12c5979
f368ee1
 
 
12c5979
f368ee1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

model_id = "TheBloke/MythoMax-L2-13B-GGUF"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

def chat(prompt):
    response = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)[0]["generated_text"]
    return response

gr.ChatInterface(chat).launch()