Bencode92's picture
πŸ”„ Incremental label | Acc: 1.000, F1: 1.000
427c94d
{
"best_metric": 1.0,
"best_model_checkpoint": "hf-sentiment-production/checkpoint-337",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 337,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02967359050445104,
"grad_norm": 0.0003316526999697089,
"learning_rate": 2.9761904761904765e-07,
"loss": 0.0003,
"step": 10
},
{
"epoch": 0.05934718100890208,
"grad_norm": 0.0005845068953931332,
"learning_rate": 5.952380952380953e-07,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.08902077151335312,
"grad_norm": 6.946044595679268e-05,
"learning_rate": 8.928571428571429e-07,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.11869436201780416,
"grad_norm": 0.00013723627489525825,
"learning_rate": 1.1904761904761906e-06,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.14836795252225518,
"grad_norm": 5.739806510973722e-05,
"learning_rate": 1.4880952380952381e-06,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.17804154302670624,
"grad_norm": 7.3791602517303545e-06,
"learning_rate": 1.7857142857142859e-06,
"loss": 0.0004,
"step": 60
},
{
"epoch": 0.20771513353115728,
"grad_norm": 1.4184925021254458e-05,
"learning_rate": 2.0833333333333334e-06,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.23738872403560832,
"grad_norm": 1.2190222378194449e-06,
"learning_rate": 2.380952380952381e-06,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.26706231454005935,
"grad_norm": 1.4691991964355111e-05,
"learning_rate": 2.6785714285714285e-06,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.29673590504451036,
"grad_norm": 6.091187606216408e-05,
"learning_rate": 2.9761904761904763e-06,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.3264094955489614,
"grad_norm": 1.0290184945915826e-05,
"learning_rate": 3.273809523809524e-06,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.3560830860534125,
"grad_norm": 5.887858060305007e-05,
"learning_rate": 3.5714285714285718e-06,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.3857566765578635,
"grad_norm": 0.00019515887834131718,
"learning_rate": 3.869047619047619e-06,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.41543026706231456,
"grad_norm": 7.377419478871161e-06,
"learning_rate": 4.166666666666667e-06,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.44510385756676557,
"grad_norm": 7.4168701758026145e-06,
"learning_rate": 4.464285714285715e-06,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.47477744807121663,
"grad_norm": 0.00012300463276915252,
"learning_rate": 4.761904761904762e-06,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.5044510385756676,
"grad_norm": 1.722447996144183e-05,
"learning_rate": 5.05952380952381e-06,
"loss": 0.0,
"step": 170
},
{
"epoch": 0.5341246290801187,
"grad_norm": 0.0006671595619991422,
"learning_rate": 5.357142857142857e-06,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.5637982195845698,
"grad_norm": 2.224778290838003e-05,
"learning_rate": 5.654761904761905e-06,
"loss": 0.0,
"step": 190
},
{
"epoch": 0.5934718100890207,
"grad_norm": 0.00016540104115847498,
"learning_rate": 5.9523809523809525e-06,
"loss": 0.0,
"step": 200
},
{
"epoch": 0.6231454005934718,
"grad_norm": 0.00019113092275802046,
"learning_rate": 6.25e-06,
"loss": 0.0,
"step": 210
},
{
"epoch": 0.6528189910979229,
"grad_norm": 0.2549625039100647,
"learning_rate": 6.547619047619048e-06,
"loss": 0.0,
"step": 220
},
{
"epoch": 0.6824925816023739,
"grad_norm": 5.711175253964029e-05,
"learning_rate": 6.845238095238096e-06,
"loss": 0.0,
"step": 230
},
{
"epoch": 0.712166172106825,
"grad_norm": 6.6464672272559255e-06,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.0,
"step": 240
},
{
"epoch": 0.7418397626112759,
"grad_norm": 1.1021610589523334e-05,
"learning_rate": 7.440476190476191e-06,
"loss": 0.0,
"step": 250
},
{
"epoch": 0.771513353115727,
"grad_norm": 4.050310451475525e-07,
"learning_rate": 7.738095238095238e-06,
"loss": 0.0,
"step": 260
},
{
"epoch": 0.8011869436201781,
"grad_norm": 5.8233334129909053e-05,
"learning_rate": 8.035714285714286e-06,
"loss": 0.0,
"step": 270
},
{
"epoch": 0.8308605341246291,
"grad_norm": 1.0179860510106664e-05,
"learning_rate": 8.333333333333334e-06,
"loss": 0.0,
"step": 280
},
{
"epoch": 0.8605341246290801,
"grad_norm": 7.523889962612884e-06,
"learning_rate": 8.630952380952381e-06,
"loss": 0.0,
"step": 290
},
{
"epoch": 0.8902077151335311,
"grad_norm": 9.157076419796795e-06,
"learning_rate": 8.92857142857143e-06,
"loss": 0.0,
"step": 300
},
{
"epoch": 0.9198813056379822,
"grad_norm": 8.59304127516225e-05,
"learning_rate": 9.226190476190477e-06,
"loss": 0.0,
"step": 310
},
{
"epoch": 0.9495548961424333,
"grad_norm": 4.281292262930947e-07,
"learning_rate": 9.523809523809525e-06,
"loss": 0.0,
"step": 320
},
{
"epoch": 0.9792284866468842,
"grad_norm": 7.248609108501114e-06,
"learning_rate": 9.821428571428573e-06,
"loss": 0.0,
"step": 330
},
{
"epoch": 1.0,
"eval_accuracy": 1.0,
"eval_f1": 1.0,
"eval_f1_macro": 1.0,
"eval_loss": 0.00040641959640197456,
"eval_precision": 1.0,
"eval_precision_macro": 1.0,
"eval_recall": 1.0,
"eval_recall_macro": 1.0,
"eval_runtime": 248.5056,
"eval_samples_per_second": 1.811,
"eval_steps_per_second": 0.455,
"step": 337
}
],
"logging_steps": 10,
"max_steps": 674,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 354413773679616.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}