low quality qlora on nemo base.
Model was trained with Anonymized names, without trips, and without dates*. The dataset has the original post info.
*I had one failed run with dates. It could somewhat base its disussions around the topics of the time, but it was underbaked. I deleted it and never retrained with new settings. With lower ranks like 8 or 16, it seemed to not care about the date when generating discussions for a given post date.
Dataset here:
https://huggingface.co/datasets/quasar-of-mikus/lmg-neo-lora-v0.3
See also:
https://huggingface.co/llama-anon/lmg-lora by the original lmg lora anon
https://huggingface.co/llama-anon/lmg-lora-2 by the original lmg lora anon>>97868993
Recommended samplers:
Temp 1
rep pen 1.15-1.2 # Model is fucking fried. Good luck.
rep pen range 2048 # "
top-k 20
minp 0.01 # Try without first.
Prompt template (text completion only, Mikupad recommended):
---
/lmg/ - Local Models General
---
Anonymous No.61958
/lmg/ - a general dedicated to the discussion and development of local language models.
Previous threads:
Misc:
►News
►Recent Highlights
Traingin config, unsloth:
import os
from unsloth import FastLanguageModel
from unsloth import is_bfloat16_supported
import torch
from unsloth import is_bfloat16_supported
from unsloth import UnslothTrainer, UnslothTrainingArguments
from datasets import load_dataset
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
max_seq_length = 4096 # Supports RoPE Scaling interally, so choose any!
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "T:\models\Mistral-Nemo-Base-2407",
max_seq_length = max_seq_length,
dtype = None,
load_in_4bit = True,
)
# Do model patching and add fast LoRA weights
model = FastLanguageModel.get_peft_model(
model,
r = 64,
lora_alpha = 64,
lora_dropout = 0.0, # Supports any, but = 0 is optimized
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj", "embed_tokens", "lm_head",],
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 1337,
max_seq_length = max_seq_length,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
train_dataset = load_dataset(path="datasets", data_files = {"train" : "lmg-threads-cleaned-2-nodate-train-shuffled.json"}, split="train")
eval_dataset = load_dataset(path="datasets", data_files = {"train" : "lmg-threads-cleaned-2-nodate-eval.json"}, split="train")
# print out 5 rows of the dataset
for row in train_dataset[:5]["text"]:
print("=========================")
print(row)
trainer = UnslothTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = train_dataset,
eval_dataset = eval_dataset,
dataset_text_field = "text",
max_seq_length = max_seq_length,
dataset_num_proc = 1,
args = UnslothTrainingArguments(
per_device_train_batch_size = 1,
gradient_accumulation_steps = 1,
per_device_eval_batch_size = 1,
eval_accumulation_steps = 4,
fp16_full_eval = True,
warmup_ratio = 0,
#max_steps=20,
num_train_epochs = 1,
learning_rate = 1e-4,
embedding_learning_rate = 1e-5,
eval_strategy = "steps",
eval_steps = 50,
do_eval = True,
fp16 = False,
bf16 = True,
logging_steps = 1,
optim = "adamw_8bit",
weight_decay = 0.01,
lr_scheduler_type = "constant",
seed = 3407,
save_strategy = "epoch",
output_dir = "outputs",
report_to = "tensorboard", # wandb, tensorboard, whatever
),
)
# @title Show current memory stats
gpu_stats = torch.cuda.get_device_properties(0)
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
print(f"{start_gpu_memory} GB of memory reserved.")
trainer_stats = trainer.train()
if False: model.save_pretrained_gguf("model", tokenizer, quantization_method = "f16")
if True: model.save_pretrained_merged("model", tokenizer, save_method = "merged_16bit",)
- Downloads last month
- 2
8-bit