kcz358 commited on
Commit
e55ba99
·
verified ·
1 Parent(s): 08c82c8

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4096, "k": 256, "multi_topk": false}, "batch_size": 1, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["model.layers.7"], "layers": [7], "layer_stride": 1, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "llava-hf_llama3-llava-next-8b-hf-lmms-lab_LLaVA-NeXT-Data-sae-4096-7", "wandb_log_frequency": 1, "mm_data": true, "model": "llava-hf/llama3-llava-next-8b-hf", "dataset": "lmms-lab/LLaVA-NeXT-Data", "split": "train", "ctx_len": 2048, "hf_token": null, "load_in_8bit": false, "max_examples": null, "resume": false, "seed": 42, "data_preprocessing_num_proc": 32}
lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f033735a64103118887b6625875f432aeed45631b2b99cc9142a79c2a383d0
3
+ size 1012
model.layers.7/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4096, "k": 256, "multi_topk": false, "d_in": 4096}
model.layers.7/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e024a7b1534ee32c58df00f0e1e4cc28f065c33f6f3472249df7aa3375951f6e
3
+ size 134250824
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9287180515d73d11c8d9527a14f92db36a5e46cddb8b6c30af68fe3778512d1
3
+ size 268505298
state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06a79d9d0bd5cef13374d41e3a6b6d7995eb9cfac97143e1f663e15e41ac5f55
3
+ size 33938