Text Generation
Transformers
PyTorch
Safetensors
English
i3
i3-architecture
hybrid-model
rwkv-mamba
custom_code
File size: 1,004 Bytes
725812c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# example_run.py
from i3_model import i3Model, ChunkTokenizer
from modeling_i3 import I3ForCausalLM, I3Config
from tokenizer_i3 import I3Tokenizer
import torch

# Path to local model files (current folder)
model_path = "."

# Load tokenizer
tokenizer = I3Tokenizer(vocab_file=f"{model_path}/chunk_vocab_combined.json")

# Load HF-style model
model = I3ForCausalLM.from_pretrained(model_path)
model.eval()

# Example prompt
prompt = "hello, how are you"

# Encode text
input_ids = torch.tensor([tokenizer.encode(prompt)], dtype=torch.long)

# Optional: move to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
input_ids = input_ids.to(device)

# Generate tokens
with torch.no_grad():
    generated_ids = model.i3.generate(
        input_ids,
        max_new_tokens=50,
        temperature=0.8,
        top_k=40
    )

# Decode generated text
generated_text = tokenizer.decode(generated_ids[0].cpu().tolist())
print("Generated text:", generated_text)