edwko commited on
Commit
51a920c
·
verified ·
1 Parent(s): ef521aa

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 133309,
8
+ "eos_token_id": 133310,
9
+ "head_dim": 64,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 8192,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 16,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "quantization_config": {
22
+ "config_groups": {
23
+ "group_0": {
24
+ "input_activations": {
25
+ "actorder": null,
26
+ "block_structure": null,
27
+ "dynamic": true,
28
+ "group_size": null,
29
+ "num_bits": 8,
30
+ "observer": null,
31
+ "observer_kwargs": {},
32
+ "strategy": "token",
33
+ "symmetric": true,
34
+ "type": "float"
35
+ },
36
+ "output_activations": null,
37
+ "targets": [
38
+ "Linear"
39
+ ],
40
+ "weights": {
41
+ "actorder": null,
42
+ "block_structure": null,
43
+ "dynamic": false,
44
+ "group_size": null,
45
+ "num_bits": 8,
46
+ "observer": "minmax",
47
+ "observer_kwargs": {},
48
+ "strategy": "channel",
49
+ "symmetric": true,
50
+ "type": "float"
51
+ }
52
+ }
53
+ },
54
+ "format": "float-quantized",
55
+ "global_compression_ratio": null,
56
+ "ignore": [
57
+ "lm_head"
58
+ ],
59
+ "kv_cache_scheme": null,
60
+ "quant_method": "compressed-tensors",
61
+ "quantization_status": "compressed"
62
+ },
63
+ "rms_norm_eps": 1e-05,
64
+ "rope_scaling": {
65
+ "factor": 32.0,
66
+ "high_freq_factor": 4.0,
67
+ "low_freq_factor": 1.0,
68
+ "original_max_position_embeddings": 8192,
69
+ "rope_type": "llama3"
70
+ },
71
+ "rope_theta": 500000.0,
72
+ "tie_word_embeddings": true,
73
+ "torch_dtype": "bfloat16",
74
+ "transformers_version": "4.51.3",
75
+ "use_cache": true,
76
+ "vocab_size": 134400
77
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 133309,
4
+ "do_sample": true,
5
+ "eos_token_id": 133310,
6
+ "min_p": 0.05,
7
+ "pad_token_id": 128001,
8
+ "repetition_penalty": 1.1,
9
+ "temperature": 0.4,
10
+ "top_k": 40,
11
+ "top_p": 0.9,
12
+ "transformers_version": "4.51.3"
13
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59161b65d339da3f31cb7451e5432c015a6cdddfa6b9523bb88766b06e8ebd5e
3
+ size 2075002744
recipe.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ QuantizationModifier:
4
+ ignore: [lm_head]
5
+ targets: [Linear]
6
+ scheme: FP8_DYNAMIC
special_tokens_map.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec54a55e3c6fc7318ea02cbd1a6eb1fb180bff1b58acbf068504a25f1b407b7b
3
+ size 18366636
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff