Bencode92 commited on
Commit
f666f0e
Β·
1 Parent(s): 05fd1f7

πŸ”„ Incremental label | Acc: 0.995, F1: 0.993

Browse files
README.md CHANGED
@@ -21,19 +21,19 @@ Fine-tuned FinBERT model for financial sentiment analysis in TradePulse.
21
 
22
  ## Performance
23
 
24
- *Last training: 2025-07-30 10:33*
25
- *Dataset: `base_reference.csv` (637 samples)*
26
 
27
  | Metric | Value |
28
  |--------|-------|
29
- | Loss | 1.2841 |
30
- | Accuracy | 0.8313 |
31
- | F1 Score | 0.8290 |
32
 
33
- | F1 Macro | 0.8290 |
34
 
35
- | Precision | 0.8333 |
36
- | Recall | 0.8313 |
37
 
38
  ## Training Details
39
 
@@ -65,4 +65,4 @@ predictions = outputs.logits.softmax(dim=-1)
65
  ## Model Card Authors
66
 
67
  - TradePulse ML Team
68
- - Auto-generated on 2025-07-30 10:33:35
 
21
 
22
  ## Performance
23
 
24
+ *Last training: 2025-07-30 15:51*
25
+ *Dataset: `base_reference.csv` (1797 samples)*
26
 
27
  | Metric | Value |
28
  |--------|-------|
29
+ | Loss | 0.0141 |
30
+ | Accuracy | 0.9978 |
31
+ | F1 Score | 0.9978 |
32
 
33
+ | F1 Macro | 0.9978 |
34
 
35
+ | Precision | 0.9978 |
36
+ | Recall | 0.9978 |
37
 
38
  ## Training Details
39
 
 
65
  ## Model Card Authors
66
 
67
  - TradePulse ML Team
68
+ - Auto-generated on 2025-07-30 15:51:35
checkpoint-240/trainer_state.json DELETED
@@ -1,240 +0,0 @@
1
- {
2
- "best_metric": 0.829041087388282,
3
- "best_model_checkpoint": "hf-sentiment-production/checkpoint-240",
4
- "epoch": 2.0,
5
- "eval_steps": 500,
6
- "global_step": 240,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.08333333333333333,
13
- "grad_norm": 0.06056511402130127,
14
- "learning_rate": 8.403361344537816e-07,
15
- "loss": 0.1772,
16
- "step": 10
17
- },
18
- {
19
- "epoch": 0.16666666666666666,
20
- "grad_norm": 0.029755454510450363,
21
- "learning_rate": 1.6806722689075632e-06,
22
- "loss": 0.1311,
23
- "step": 20
24
- },
25
- {
26
- "epoch": 0.25,
27
- "grad_norm": 0.014640219509601593,
28
- "learning_rate": 2.521008403361345e-06,
29
- "loss": 0.0002,
30
- "step": 30
31
- },
32
- {
33
- "epoch": 0.3333333333333333,
34
- "grad_norm": 0.11976221948862076,
35
- "learning_rate": 3.3613445378151265e-06,
36
- "loss": 0.0001,
37
- "step": 40
38
- },
39
- {
40
- "epoch": 0.4166666666666667,
41
- "grad_norm": 0.3207981586456299,
42
- "learning_rate": 4.201680672268908e-06,
43
- "loss": 0.0008,
44
- "step": 50
45
- },
46
- {
47
- "epoch": 0.5,
48
- "grad_norm": 0.010227964259684086,
49
- "learning_rate": 5.04201680672269e-06,
50
- "loss": 0.0001,
51
- "step": 60
52
- },
53
- {
54
- "epoch": 0.5833333333333334,
55
- "grad_norm": 0.003333959961310029,
56
- "learning_rate": 5.882352941176471e-06,
57
- "loss": 0.0001,
58
- "step": 70
59
- },
60
- {
61
- "epoch": 0.6666666666666666,
62
- "grad_norm": 0.0037250840105116367,
63
- "learning_rate": 6.722689075630253e-06,
64
- "loss": 0.0178,
65
- "step": 80
66
- },
67
- {
68
- "epoch": 0.75,
69
- "grad_norm": 0.0019598035141825676,
70
- "learning_rate": 7.563025210084034e-06,
71
- "loss": 0.0004,
72
- "step": 90
73
- },
74
- {
75
- "epoch": 0.8333333333333334,
76
- "grad_norm": 0.005815756041556597,
77
- "learning_rate": 8.403361344537815e-06,
78
- "loss": 0.0002,
79
- "step": 100
80
- },
81
- {
82
- "epoch": 0.9166666666666666,
83
- "grad_norm": 0.2174970656633377,
84
- "learning_rate": 9.243697478991598e-06,
85
- "loss": 0.0001,
86
- "step": 110
87
- },
88
- {
89
- "epoch": 1.0,
90
- "grad_norm": 1.864820660557598e-05,
91
- "learning_rate": 9.917355371900828e-06,
92
- "loss": 0.0001,
93
- "step": 120
94
- },
95
- {
96
- "epoch": 1.0,
97
- "eval_accuracy": 0.8125,
98
- "eval_f1": 0.8079910714285715,
99
- "eval_f1_macro": 0.7952380952380952,
100
- "eval_loss": 1.5992441177368164,
101
- "eval_precision": 0.8254665124476757,
102
- "eval_precision_macro": 0.8422633472864801,
103
- "eval_recall": 0.8125,
104
- "eval_recall_macro": 0.7739716990467395,
105
- "eval_runtime": 88.2636,
106
- "eval_samples_per_second": 1.813,
107
- "eval_steps_per_second": 0.453,
108
- "step": 120
109
- },
110
- {
111
- "epoch": 1.0833333333333333,
112
- "grad_norm": 0.0010791183449327946,
113
- "learning_rate": 9.090909090909091e-06,
114
- "loss": 0.0001,
115
- "step": 130
116
- },
117
- {
118
- "epoch": 1.1666666666666667,
119
- "grad_norm": 0.0033589524682611227,
120
- "learning_rate": 8.264462809917356e-06,
121
- "loss": 0.0002,
122
- "step": 140
123
- },
124
- {
125
- "epoch": 1.25,
126
- "grad_norm": 0.07826946675777435,
127
- "learning_rate": 7.43801652892562e-06,
128
- "loss": 0.0001,
129
- "step": 150
130
- },
131
- {
132
- "epoch": 1.3333333333333333,
133
- "grad_norm": 0.008779647760093212,
134
- "learning_rate": 6.611570247933885e-06,
135
- "loss": 0.0003,
136
- "step": 160
137
- },
138
- {
139
- "epoch": 1.4166666666666667,
140
- "grad_norm": 0.007557153236120939,
141
- "learning_rate": 5.785123966942149e-06,
142
- "loss": 0.0001,
143
- "step": 170
144
- },
145
- {
146
- "epoch": 1.5,
147
- "grad_norm": 0.10376619547605515,
148
- "learning_rate": 4.958677685950414e-06,
149
- "loss": 0.0005,
150
- "step": 180
151
- },
152
- {
153
- "epoch": 1.5833333333333335,
154
- "grad_norm": 0.016951242461800575,
155
- "learning_rate": 4.132231404958678e-06,
156
- "loss": 0.0044,
157
- "step": 190
158
- },
159
- {
160
- "epoch": 1.6666666666666665,
161
- "grad_norm": 0.026832090690732002,
162
- "learning_rate": 3.3057851239669424e-06,
163
- "loss": 0.0001,
164
- "step": 200
165
- },
166
- {
167
- "epoch": 1.75,
168
- "grad_norm": 0.0011464687995612621,
169
- "learning_rate": 2.479338842975207e-06,
170
- "loss": 0.0006,
171
- "step": 210
172
- },
173
- {
174
- "epoch": 1.8333333333333335,
175
- "grad_norm": 0.15988384187221527,
176
- "learning_rate": 1.6528925619834712e-06,
177
- "loss": 0.0002,
178
- "step": 220
179
- },
180
- {
181
- "epoch": 1.9166666666666665,
182
- "grad_norm": 0.2989813983440399,
183
- "learning_rate": 8.264462809917356e-07,
184
- "loss": 0.1214,
185
- "step": 230
186
- },
187
- {
188
- "epoch": 2.0,
189
- "grad_norm": 5.207050435274141e-06,
190
- "learning_rate": 0.0,
191
- "loss": 0.3908,
192
- "step": 240
193
- },
194
- {
195
- "epoch": 2.0,
196
- "eval_accuracy": 0.83125,
197
- "eval_f1": 0.829041087388282,
198
- "eval_f1_macro": 0.8202912942734194,
199
- "eval_loss": 1.2841339111328125,
200
- "eval_precision": 0.8333425925925926,
201
- "eval_precision_macro": 0.8355555555555556,
202
- "eval_recall": 0.83125,
203
- "eval_recall_macro": 0.8112252751481567,
204
- "eval_runtime": 88.1774,
205
- "eval_samples_per_second": 1.815,
206
- "eval_steps_per_second": 0.454,
207
- "step": 240
208
- }
209
- ],
210
- "logging_steps": 10,
211
- "max_steps": 240,
212
- "num_input_tokens_seen": 0,
213
- "num_train_epochs": 2,
214
- "save_steps": 500,
215
- "stateful_callbacks": {
216
- "EarlyStoppingCallback": {
217
- "args": {
218
- "early_stopping_patience": 1,
219
- "early_stopping_threshold": 0.0
220
- },
221
- "attributes": {
222
- "early_stopping_patience_counter": 0
223
- }
224
- },
225
- "TrainerControl": {
226
- "args": {
227
- "should_epoch_stop": false,
228
- "should_evaluate": false,
229
- "should_log": false,
230
- "should_save": true,
231
- "should_training_stop": true
232
- },
233
- "attributes": {}
234
- }
235
- },
236
- "total_flos": 251010200512512.0,
237
- "train_batch_size": 4,
238
- "trial_name": null,
239
- "trial_params": null
240
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{checkpoint-240 β†’ checkpoint-674}/config.json RENAMED
File without changes
{checkpoint-240 β†’ checkpoint-674}/model.safetensors RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa2bc7c9841dfe824d6c70bfd3916308d9a44b7693233a545b97104d85ddd8ae
3
  size 439039996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65c86576e3490028c001970eeed1a443b627250e862d0a08874ada27469ca34a
3
  size 439039996
{checkpoint-240 β†’ checkpoint-674}/special_tokens_map.json RENAMED
File without changes
{checkpoint-240 β†’ checkpoint-674}/tokenizer.json RENAMED
File without changes
{checkpoint-240 β†’ checkpoint-674}/tokenizer_config.json RENAMED
File without changes
checkpoint-674/trainer_state.json ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9977731894655975,
3
+ "best_model_checkpoint": "hf-sentiment-production/checkpoint-674",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 674,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02967359050445104,
13
+ "grad_norm": 0.33146166801452637,
14
+ "learning_rate": 2.9761904761904765e-07,
15
+ "loss": 1.2128,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.05934718100890208,
20
+ "grad_norm": 12.761545181274414,
21
+ "learning_rate": 5.952380952380953e-07,
22
+ "loss": 1.3721,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.08902077151335312,
27
+ "grad_norm": 9.993690490722656,
28
+ "learning_rate": 8.928571428571429e-07,
29
+ "loss": 0.7583,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.11869436201780416,
34
+ "grad_norm": 48.96151351928711,
35
+ "learning_rate": 1.1904761904761906e-06,
36
+ "loss": 0.2415,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.14836795252225518,
41
+ "grad_norm": 158.08262634277344,
42
+ "learning_rate": 1.4880952380952381e-06,
43
+ "loss": 0.7109,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.17804154302670624,
48
+ "grad_norm": 11.123541831970215,
49
+ "learning_rate": 1.7857142857142859e-06,
50
+ "loss": 0.2779,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.20771513353115728,
55
+ "grad_norm": 5.884549617767334,
56
+ "learning_rate": 2.0833333333333334e-06,
57
+ "loss": 0.2621,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.23738872403560832,
62
+ "grad_norm": 87.84249877929688,
63
+ "learning_rate": 2.380952380952381e-06,
64
+ "loss": 0.7296,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.26706231454005935,
69
+ "grad_norm": 0.09888105094432831,
70
+ "learning_rate": 2.6785714285714285e-06,
71
+ "loss": 0.6088,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.29673590504451036,
76
+ "grad_norm": 5.604600429534912,
77
+ "learning_rate": 2.9761904761904763e-06,
78
+ "loss": 0.5594,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.3264094955489614,
83
+ "grad_norm": 0.04671299830079079,
84
+ "learning_rate": 3.273809523809524e-06,
85
+ "loss": 0.2191,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.3560830860534125,
90
+ "grad_norm": 2.0412070751190186,
91
+ "learning_rate": 3.5714285714285718e-06,
92
+ "loss": 0.0897,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.3857566765578635,
97
+ "grad_norm": 1.162245273590088,
98
+ "learning_rate": 3.869047619047619e-06,
99
+ "loss": 0.0154,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.41543026706231456,
104
+ "grad_norm": 4.64531946182251,
105
+ "learning_rate": 4.166666666666667e-06,
106
+ "loss": 0.1424,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.44510385756676557,
111
+ "grad_norm": 0.0003644149692263454,
112
+ "learning_rate": 4.464285714285715e-06,
113
+ "loss": 0.0236,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.47477744807121663,
118
+ "grad_norm": 0.0010449385736137629,
119
+ "learning_rate": 4.761904761904762e-06,
120
+ "loss": 0.0082,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.5044510385756676,
125
+ "grad_norm": 0.0023269294761121273,
126
+ "learning_rate": 5.05952380952381e-06,
127
+ "loss": 0.0005,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.5341246290801187,
132
+ "grad_norm": 19.690420150756836,
133
+ "learning_rate": 5.357142857142857e-06,
134
+ "loss": 0.0044,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.5637982195845698,
139
+ "grad_norm": 5.873505592346191,
140
+ "learning_rate": 5.654761904761905e-06,
141
+ "loss": 0.002,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.5934718100890207,
146
+ "grad_norm": 0.2877367436885834,
147
+ "learning_rate": 5.9523809523809525e-06,
148
+ "loss": 0.1023,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.6231454005934718,
153
+ "grad_norm": 0.008003341034054756,
154
+ "learning_rate": 6.25e-06,
155
+ "loss": 0.0003,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.6528189910979229,
160
+ "grad_norm": 79.11434936523438,
161
+ "learning_rate": 6.547619047619048e-06,
162
+ "loss": 0.0245,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.6824925816023739,
167
+ "grad_norm": 0.00577218271791935,
168
+ "learning_rate": 6.845238095238096e-06,
169
+ "loss": 0.0001,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.712166172106825,
174
+ "grad_norm": 0.003522080834954977,
175
+ "learning_rate": 7.1428571428571436e-06,
176
+ "loss": 0.0,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.7418397626112759,
181
+ "grad_norm": 0.0008736816816963255,
182
+ "learning_rate": 7.440476190476191e-06,
183
+ "loss": 0.0001,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.771513353115727,
188
+ "grad_norm": 5.390814476413652e-05,
189
+ "learning_rate": 7.738095238095238e-06,
190
+ "loss": 0.0001,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.8011869436201781,
195
+ "grad_norm": 0.00016854175191838294,
196
+ "learning_rate": 8.035714285714286e-06,
197
+ "loss": 0.0,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.8308605341246291,
202
+ "grad_norm": 9.450274956179783e-05,
203
+ "learning_rate": 8.333333333333334e-06,
204
+ "loss": 0.0001,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.8605341246290801,
209
+ "grad_norm": 7.811003888491541e-05,
210
+ "learning_rate": 8.630952380952381e-06,
211
+ "loss": 0.0001,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 0.8902077151335311,
216
+ "grad_norm": 0.0032586443703621626,
217
+ "learning_rate": 8.92857142857143e-06,
218
+ "loss": 0.0,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 0.9198813056379822,
223
+ "grad_norm": 0.0009132844279520214,
224
+ "learning_rate": 9.226190476190477e-06,
225
+ "loss": 0.0044,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 0.9495548961424333,
230
+ "grad_norm": 0.0014514722861349583,
231
+ "learning_rate": 9.523809523809525e-06,
232
+ "loss": 0.0001,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 0.9792284866468842,
237
+ "grad_norm": 0.00012150395195931196,
238
+ "learning_rate": 9.821428571428573e-06,
239
+ "loss": 0.0006,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 1.0,
244
+ "eval_accuracy": 0.9933333333333333,
245
+ "eval_f1": 0.9932911444421615,
246
+ "eval_f1_macro": 0.991480035613999,
247
+ "eval_loss": 0.05405780300498009,
248
+ "eval_precision": 0.9934188034188034,
249
+ "eval_precision_macro": 0.9957264957264957,
250
+ "eval_recall": 0.9933333333333333,
251
+ "eval_recall_macro": 0.9874999999999999,
252
+ "eval_runtime": 259.5646,
253
+ "eval_samples_per_second": 1.734,
254
+ "eval_steps_per_second": 0.435,
255
+ "step": 337
256
+ },
257
+ {
258
+ "epoch": 1.0089020771513353,
259
+ "grad_norm": 0.00010033950820798054,
260
+ "learning_rate": 9.88165680473373e-06,
261
+ "loss": 0.1403,
262
+ "step": 340
263
+ },
264
+ {
265
+ "epoch": 1.0385756676557865,
266
+ "grad_norm": 0.013481284491717815,
267
+ "learning_rate": 9.585798816568049e-06,
268
+ "loss": 0.0,
269
+ "step": 350
270
+ },
271
+ {
272
+ "epoch": 1.0682492581602374,
273
+ "grad_norm": 0.0009898018324747682,
274
+ "learning_rate": 9.289940828402368e-06,
275
+ "loss": 0.0,
276
+ "step": 360
277
+ },
278
+ {
279
+ "epoch": 1.0979228486646884,
280
+ "grad_norm": 0.00012659125786740333,
281
+ "learning_rate": 8.994082840236687e-06,
282
+ "loss": 0.0,
283
+ "step": 370
284
+ },
285
+ {
286
+ "epoch": 1.1275964391691395,
287
+ "grad_norm": 0.00047182640992105007,
288
+ "learning_rate": 8.698224852071006e-06,
289
+ "loss": 0.0,
290
+ "step": 380
291
+ },
292
+ {
293
+ "epoch": 1.1572700296735905,
294
+ "grad_norm": 0.0001335637061856687,
295
+ "learning_rate": 8.402366863905327e-06,
296
+ "loss": 0.0,
297
+ "step": 390
298
+ },
299
+ {
300
+ "epoch": 1.1869436201780414,
301
+ "grad_norm": 0.0002707823005039245,
302
+ "learning_rate": 8.106508875739646e-06,
303
+ "loss": 0.0,
304
+ "step": 400
305
+ },
306
+ {
307
+ "epoch": 1.2166172106824926,
308
+ "grad_norm": 0.00016450774273835123,
309
+ "learning_rate": 7.810650887573965e-06,
310
+ "loss": 0.0,
311
+ "step": 410
312
+ },
313
+ {
314
+ "epoch": 1.2462908011869436,
315
+ "grad_norm": 0.0005354972090572119,
316
+ "learning_rate": 7.5147928994082845e-06,
317
+ "loss": 0.0,
318
+ "step": 420
319
+ },
320
+ {
321
+ "epoch": 1.2759643916913945,
322
+ "grad_norm": 0.00011406593694118783,
323
+ "learning_rate": 7.218934911242604e-06,
324
+ "loss": 0.0,
325
+ "step": 430
326
+ },
327
+ {
328
+ "epoch": 1.3056379821958457,
329
+ "grad_norm": 0.16808006167411804,
330
+ "learning_rate": 6.923076923076923e-06,
331
+ "loss": 0.0001,
332
+ "step": 440
333
+ },
334
+ {
335
+ "epoch": 1.3353115727002967,
336
+ "grad_norm": 8.87056376086548e-05,
337
+ "learning_rate": 6.627218934911244e-06,
338
+ "loss": 0.0,
339
+ "step": 450
340
+ },
341
+ {
342
+ "epoch": 1.3649851632047478,
343
+ "grad_norm": 0.0005222823820076883,
344
+ "learning_rate": 6.331360946745563e-06,
345
+ "loss": 0.0,
346
+ "step": 460
347
+ },
348
+ {
349
+ "epoch": 1.3946587537091988,
350
+ "grad_norm": 1.3695029338123277e-05,
351
+ "learning_rate": 6.035502958579882e-06,
352
+ "loss": 0.0,
353
+ "step": 470
354
+ },
355
+ {
356
+ "epoch": 1.4243323442136497,
357
+ "grad_norm": 5.737605533795431e-05,
358
+ "learning_rate": 5.739644970414202e-06,
359
+ "loss": 0.0,
360
+ "step": 480
361
+ },
362
+ {
363
+ "epoch": 1.454005934718101,
364
+ "grad_norm": 0.005723549518734217,
365
+ "learning_rate": 5.443786982248521e-06,
366
+ "loss": 0.0,
367
+ "step": 490
368
+ },
369
+ {
370
+ "epoch": 1.4836795252225519,
371
+ "grad_norm": 1.2021506336168386e-05,
372
+ "learning_rate": 5.14792899408284e-06,
373
+ "loss": 0.0,
374
+ "step": 500
375
+ },
376
+ {
377
+ "epoch": 1.513353115727003,
378
+ "grad_norm": 4.697950862464495e-05,
379
+ "learning_rate": 4.85207100591716e-06,
380
+ "loss": 0.0,
381
+ "step": 510
382
+ },
383
+ {
384
+ "epoch": 1.543026706231454,
385
+ "grad_norm": 0.000708841485902667,
386
+ "learning_rate": 4.55621301775148e-06,
387
+ "loss": 0.0,
388
+ "step": 520
389
+ },
390
+ {
391
+ "epoch": 1.572700296735905,
392
+ "grad_norm": 1.7618485799175687e-05,
393
+ "learning_rate": 4.2603550295858e-06,
394
+ "loss": 0.0,
395
+ "step": 530
396
+ },
397
+ {
398
+ "epoch": 1.6023738872403561,
399
+ "grad_norm": 0.00013081534416414797,
400
+ "learning_rate": 3.964497041420119e-06,
401
+ "loss": 0.0,
402
+ "step": 540
403
+ },
404
+ {
405
+ "epoch": 1.632047477744807,
406
+ "grad_norm": 0.00028243096312507987,
407
+ "learning_rate": 3.668639053254438e-06,
408
+ "loss": 0.0,
409
+ "step": 550
410
+ },
411
+ {
412
+ "epoch": 1.6617210682492582,
413
+ "grad_norm": 0.0002703359059523791,
414
+ "learning_rate": 3.3727810650887576e-06,
415
+ "loss": 0.0,
416
+ "step": 560
417
+ },
418
+ {
419
+ "epoch": 1.6913946587537092,
420
+ "grad_norm": 2.5464911232120357e-05,
421
+ "learning_rate": 3.0769230769230774e-06,
422
+ "loss": 0.0,
423
+ "step": 570
424
+ },
425
+ {
426
+ "epoch": 1.7210682492581602,
427
+ "grad_norm": 2.6416844775667414e-05,
428
+ "learning_rate": 2.7810650887573965e-06,
429
+ "loss": 0.0,
430
+ "step": 580
431
+ },
432
+ {
433
+ "epoch": 1.7507418397626113,
434
+ "grad_norm": 0.012607904151082039,
435
+ "learning_rate": 2.485207100591716e-06,
436
+ "loss": 0.051,
437
+ "step": 590
438
+ },
439
+ {
440
+ "epoch": 1.7804154302670623,
441
+ "grad_norm": 0.0064520263113081455,
442
+ "learning_rate": 2.1893491124260358e-06,
443
+ "loss": 0.0001,
444
+ "step": 600
445
+ },
446
+ {
447
+ "epoch": 1.8100890207715135,
448
+ "grad_norm": 0.00013779209984932095,
449
+ "learning_rate": 1.8934911242603552e-06,
450
+ "loss": 0.0,
451
+ "step": 610
452
+ },
453
+ {
454
+ "epoch": 1.8397626112759644,
455
+ "grad_norm": 0.00019228862947784364,
456
+ "learning_rate": 1.5976331360946749e-06,
457
+ "loss": 0.0,
458
+ "step": 620
459
+ },
460
+ {
461
+ "epoch": 1.8694362017804154,
462
+ "grad_norm": 3.4337310353294015e-05,
463
+ "learning_rate": 1.301775147928994e-06,
464
+ "loss": 0.0,
465
+ "step": 630
466
+ },
467
+ {
468
+ "epoch": 1.8991097922848663,
469
+ "grad_norm": 0.000257375038927421,
470
+ "learning_rate": 1.0059171597633138e-06,
471
+ "loss": 0.0,
472
+ "step": 640
473
+ },
474
+ {
475
+ "epoch": 1.9287833827893175,
476
+ "grad_norm": 0.0015161657938733697,
477
+ "learning_rate": 7.100591715976332e-07,
478
+ "loss": 0.001,
479
+ "step": 650
480
+ },
481
+ {
482
+ "epoch": 1.9584569732937687,
483
+ "grad_norm": 0.0051199602894485,
484
+ "learning_rate": 4.1420118343195276e-07,
485
+ "loss": 0.0,
486
+ "step": 660
487
+ },
488
+ {
489
+ "epoch": 1.9881305637982196,
490
+ "grad_norm": 0.0002789547143038362,
491
+ "learning_rate": 1.183431952662722e-07,
492
+ "loss": 0.0,
493
+ "step": 670
494
+ },
495
+ {
496
+ "epoch": 2.0,
497
+ "eval_accuracy": 0.9977777777777778,
498
+ "eval_f1": 0.9977731894655975,
499
+ "eval_f1_macro": 0.9971836215366922,
500
+ "eval_loss": 0.01406792365014553,
501
+ "eval_precision": 0.997787356321839,
502
+ "eval_precision_macro": 0.9985632183908045,
503
+ "eval_recall": 0.9977777777777778,
504
+ "eval_recall_macro": 0.9958333333333332,
505
+ "eval_runtime": 251.8116,
506
+ "eval_samples_per_second": 1.787,
507
+ "eval_steps_per_second": 0.449,
508
+ "step": 674
509
+ }
510
+ ],
511
+ "logging_steps": 10,
512
+ "max_steps": 674,
513
+ "num_input_tokens_seen": 0,
514
+ "num_train_epochs": 2,
515
+ "save_steps": 500,
516
+ "stateful_callbacks": {
517
+ "EarlyStoppingCallback": {
518
+ "args": {
519
+ "early_stopping_patience": 1,
520
+ "early_stopping_threshold": 0.0
521
+ },
522
+ "attributes": {
523
+ "early_stopping_patience_counter": 0
524
+ }
525
+ },
526
+ "TrainerControl": {
527
+ "args": {
528
+ "should_epoch_stop": false,
529
+ "should_evaluate": false,
530
+ "should_log": false,
531
+ "should_save": true,
532
+ "should_training_stop": true
533
+ },
534
+ "attributes": {}
535
+ }
536
+ },
537
+ "total_flos": 708827547359232.0,
538
+ "train_batch_size": 4,
539
+ "trial_name": null,
540
+ "trial_params": null
541
+ }
{checkpoint-240 β†’ checkpoint-674}/vocab.txt RENAMED
File without changes
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa2bc7c9841dfe824d6c70bfd3916308d9a44b7693233a545b97104d85ddd8ae
3
  size 439039996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65c86576e3490028c001970eeed1a443b627250e862d0a08874ada27469ca34a
3
  size 439039996
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76b8cf3932f11a5314ed3d8475b3e07f076b574cf6ed23ea9ee7b8a1833b39a3
3
  size 5521
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8d368ce496fb886ffa4923127a0a81a3a9f151c0978b2be2437bacdbc4f72a
3
  size 5521