- You can chat from it โก. best for running on mobile !
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("NuclearAi/GPT2-137M-Chat-v1.0")
model = AutoModelForCausalLM.from_pretrained("NuclearAi/GPT2-137M-Chat-v1.0")
# User input
prompt = input("Enter your message: ")
# Tokenize
inputs = tokenizer(prompt, return_tensors="pt")
# Generate
outputs = model.generate(
**inputs,
max_new_tokens=512,
pad_token_id=tokenizer.eos_token_id
)
# Decode and print
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
- Downloads last month
- 71
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support