|
|
|
|
|
import os |
|
|
import torch |
|
|
from transformers import PreTrainedTokenizerFast, GPTNeoXForCausalLM, GPTNeoXConfig |
|
|
from huggingface_hub import login, upload_folder |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_config = { |
|
|
"vocab_size": 50257, |
|
|
"n_embd": 512, |
|
|
"n_layer": 12, |
|
|
"n_head": 8, |
|
|
"block_size": 128, |
|
|
"dropout": 0.1, |
|
|
"model_type": "gpt_neox" |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
paths = { |
|
|
"model_save": "./agent_model", |
|
|
"tokenizer_save": "./agent_tokenizer" |
|
|
} |
|
|
os.makedirs(paths["model_save"], exist_ok=True) |
|
|
os.makedirs(paths["tokenizer_save"], exist_ok=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = PreTrainedTokenizerFast( |
|
|
tokenizer_file=None, |
|
|
bos_token="<s>", |
|
|
eos_token="</s>", |
|
|
unk_token="<unk>", |
|
|
pad_token="<pad>" |
|
|
) |
|
|
tokenizer.save_pretrained(paths["tokenizer_save"]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config = GPTNeoXConfig( |
|
|
vocab_size=model_config["vocab_size"], |
|
|
hidden_size=model_config["n_embd"], |
|
|
num_hidden_layers=model_config["n_layer"], |
|
|
num_attention_heads=model_config["n_head"], |
|
|
max_position_embeddings=model_config["block_size"], |
|
|
dropout_rate=model_config["dropout"], |
|
|
) |
|
|
|
|
|
model = GPTNeoXForCausalLM(config) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model.save_pretrained(paths["model_save"], safe_serialization=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt = "Bonjour Agent-AI, que peux-tu faire ?" |
|
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
|
output = model.generate(**inputs, max_length=50) |
|
|
print(tokenizer.decode(output[0], skip_special_tokens=True)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HF_TOKEN = os.environ.get("HF_TOKEN") |
|
|
REPO_ID = "Mauricio-100/agent-ai" |
|
|
|
|
|
if HF_TOKEN: |
|
|
print("🔐 Connexion à Hugging Face Hub...") |
|
|
login(token=HF_TOKEN) |
|
|
|
|
|
print("📤 Upload du modèle...") |
|
|
upload_folder(folder_path=paths["model_save"], repo_id=REPO_ID, repo_type="model") |
|
|
upload_folder(folder_path=paths["tokenizer_save"], repo_id=REPO_ID, repo_type="model") |
|
|
print(f"✅ Modèle poussé sur https://huggingface.co/{REPO_ID}") |
|
|
print("🚀 Tu peux maintenant activer Friendli Endpoints depuis l’onglet [Deploy] du modèle.") |
|
|
else: |
|
|
print("⚠️ Aucun HF_TOKEN trouvé. Ajoute-le dans les Secrets ou les variables d’environnement.") |
|
|
|
|
|
|