| { | |
| "best_metric": 1.5942710638046265, | |
| "best_model_checkpoint": "./output/training_results/C013_llama3-8b-base_pretrain_20240428_005832/checkpoint-65", | |
| "epoch": 4.0, | |
| "eval_steps": 5, | |
| "global_step": 268, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014925373134328358, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.7594, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.014925373134328358, | |
| "eval_loss": 1.7162834405899048, | |
| "eval_runtime": 5.9233, | |
| "eval_samples_per_second": 80.36, | |
| "eval_steps_per_second": 0.675, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.07462686567164178, | |
| "grad_norm": 3.233517003168196, | |
| "learning_rate": 2.25e-06, | |
| "loss": 1.7333, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07462686567164178, | |
| "eval_loss": 1.7008264064788818, | |
| "eval_runtime": 5.9309, | |
| "eval_samples_per_second": 80.257, | |
| "eval_steps_per_second": 0.674, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.14925373134328357, | |
| "grad_norm": 3.423591914627149, | |
| "learning_rate": 6e-06, | |
| "loss": 1.6854, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14925373134328357, | |
| "eval_loss": 1.682459831237793, | |
| "eval_runtime": 5.9184, | |
| "eval_samples_per_second": 80.427, | |
| "eval_steps_per_second": 0.676, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22388059701492538, | |
| "grad_norm": 2.75139227215463, | |
| "learning_rate": 9.75e-06, | |
| "loss": 1.6897, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.22388059701492538, | |
| "eval_loss": 1.670121192932129, | |
| "eval_runtime": 5.9473, | |
| "eval_samples_per_second": 80.036, | |
| "eval_steps_per_second": 0.673, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.29850746268656714, | |
| "grad_norm": 2.55059243594346, | |
| "learning_rate": 1.3500000000000001e-05, | |
| "loss": 1.6656, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.29850746268656714, | |
| "eval_loss": 1.6651127338409424, | |
| "eval_runtime": 5.942, | |
| "eval_samples_per_second": 80.108, | |
| "eval_steps_per_second": 0.673, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "grad_norm": 2.7488276843880652, | |
| "learning_rate": 1.3725993373990923e-05, | |
| "loss": 1.7254, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "eval_loss": 1.6679344177246094, | |
| "eval_runtime": 5.9726, | |
| "eval_samples_per_second": 79.697, | |
| "eval_steps_per_second": 0.67, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.44776119402985076, | |
| "grad_norm": 3.497477848836299, | |
| "learning_rate": 1.096088986215196e-05, | |
| "loss": 1.7178, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.44776119402985076, | |
| "eval_loss": 1.6541944742202759, | |
| "eval_runtime": 5.953, | |
| "eval_samples_per_second": 79.96, | |
| "eval_steps_per_second": 0.672, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5223880597014925, | |
| "grad_norm": 2.7958991196951666, | |
| "learning_rate": 8.713726547724396e-06, | |
| "loss": 1.6656, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5223880597014925, | |
| "eval_loss": 1.6459277868270874, | |
| "eval_runtime": 5.9488, | |
| "eval_samples_per_second": 80.016, | |
| "eval_steps_per_second": 0.672, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 2.2395806127723907, | |
| "learning_rate": 6.8954890860223175e-06, | |
| "loss": 1.6647, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "eval_loss": 1.6308344602584839, | |
| "eval_runtime": 5.9441, | |
| "eval_samples_per_second": 80.08, | |
| "eval_steps_per_second": 0.673, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6716417910447762, | |
| "grad_norm": 2.542267170825853, | |
| "learning_rate": 5.431031497828169e-06, | |
| "loss": 1.6645, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6716417910447762, | |
| "eval_loss": 1.6204748153686523, | |
| "eval_runtime": 5.9555, | |
| "eval_samples_per_second": 79.927, | |
| "eval_steps_per_second": 0.672, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 2.796577480411167, | |
| "learning_rate": 4.257147264863097e-06, | |
| "loss": 1.6151, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "eval_loss": 1.6129424571990967, | |
| "eval_runtime": 5.9792, | |
| "eval_samples_per_second": 79.609, | |
| "eval_steps_per_second": 0.669, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8208955223880597, | |
| "grad_norm": 2.8952316281119757, | |
| "learning_rate": 3.320875141743125e-06, | |
| "loss": 1.6359, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8208955223880597, | |
| "eval_loss": 1.6052401065826416, | |
| "eval_runtime": 5.9501, | |
| "eval_samples_per_second": 79.998, | |
| "eval_steps_per_second": 0.672, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8955223880597015, | |
| "grad_norm": 2.455450911279608, | |
| "learning_rate": 2.57801538033309e-06, | |
| "loss": 1.5885, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8955223880597015, | |
| "eval_loss": 1.5995320081710815, | |
| "eval_runtime": 5.9574, | |
| "eval_samples_per_second": 79.9, | |
| "eval_steps_per_second": 0.671, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9701492537313433, | |
| "grad_norm": 1.8236330059926131, | |
| "learning_rate": 1.9918341191886553e-06, | |
| "loss": 1.6142, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9701492537313433, | |
| "eval_loss": 1.5942710638046265, | |
| "eval_runtime": 5.9491, | |
| "eval_samples_per_second": 80.013, | |
| "eval_steps_per_second": 0.672, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.044776119402985, | |
| "grad_norm": 2.5122250154998893, | |
| "learning_rate": 1.5319356141270557e-06, | |
| "loss": 1.4875, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.044776119402985, | |
| "eval_loss": 1.5963352918624878, | |
| "eval_runtime": 5.9465, | |
| "eval_samples_per_second": 80.047, | |
| "eval_steps_per_second": 0.673, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "grad_norm": 2.2374216929111017, | |
| "learning_rate": 1.173283781790582e-06, | |
| "loss": 1.3844, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "eval_loss": 1.611823558807373, | |
| "eval_runtime": 5.9845, | |
| "eval_samples_per_second": 79.538, | |
| "eval_steps_per_second": 0.668, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1940298507462686, | |
| "grad_norm": 2.061088772790073, | |
| "learning_rate": 8.95356202130823e-07, | |
| "loss": 1.3555, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1940298507462686, | |
| "eval_loss": 1.6069068908691406, | |
| "eval_runtime": 5.9508, | |
| "eval_samples_per_second": 79.99, | |
| "eval_steps_per_second": 0.672, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2686567164179103, | |
| "grad_norm": 2.0165800449063878, | |
| "learning_rate": 6.814152836799623e-07, | |
| "loss": 1.3597, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.2686567164179103, | |
| "eval_loss": 1.6040061712265015, | |
| "eval_runtime": 5.9381, | |
| "eval_samples_per_second": 80.161, | |
| "eval_steps_per_second": 0.674, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.3432835820895521, | |
| "grad_norm": 2.0046777857294593, | |
| "learning_rate": 5.178827427887253e-07, | |
| "loss": 1.3737, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3432835820895521, | |
| "eval_loss": 1.607108235359192, | |
| "eval_runtime": 5.938, | |
| "eval_samples_per_second": 80.161, | |
| "eval_steps_per_second": 0.674, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.417910447761194, | |
| "grad_norm": 1.9599395887640554, | |
| "learning_rate": 3.938048900654646e-07, | |
| "loss": 1.3492, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.417910447761194, | |
| "eval_loss": 1.607422113418579, | |
| "eval_runtime": 5.9586, | |
| "eval_samples_per_second": 79.884, | |
| "eval_steps_per_second": 0.671, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 2.090141489085601, | |
| "learning_rate": 3.0039745928104856e-07, | |
| "loss": 1.3826, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "eval_loss": 1.6055359840393066, | |
| "eval_runtime": 5.9265, | |
| "eval_samples_per_second": 80.317, | |
| "eval_steps_per_second": 0.675, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5671641791044775, | |
| "grad_norm": 1.9332970593875183, | |
| "learning_rate": 2.3065986110942366e-07, | |
| "loss": 1.3533, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.5671641791044775, | |
| "eval_loss": 1.6034619808197021, | |
| "eval_runtime": 5.9297, | |
| "eval_samples_per_second": 80.273, | |
| "eval_steps_per_second": 0.675, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.6417910447761193, | |
| "grad_norm": 1.942043821463108, | |
| "learning_rate": 1.790498012203728e-07, | |
| "loss": 1.3611, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6417910447761193, | |
| "eval_loss": 1.6023228168487549, | |
| "eval_runtime": 5.9407, | |
| "eval_samples_per_second": 80.126, | |
| "eval_steps_per_second": 0.673, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.716417910447761, | |
| "grad_norm": 1.9221561505361804, | |
| "learning_rate": 1.412101742620585e-07, | |
| "loss": 1.328, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.716417910447761, | |
| "eval_loss": 1.602164387702942, | |
| "eval_runtime": 5.9392, | |
| "eval_samples_per_second": 80.145, | |
| "eval_steps_per_second": 0.673, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.7910447761194028, | |
| "grad_norm": 1.8559815029587312, | |
| "learning_rate": 1.1374103686567517e-07, | |
| "loss": 1.3443, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7910447761194028, | |
| "eval_loss": 1.602574348449707, | |
| "eval_runtime": 5.9713, | |
| "eval_samples_per_second": 79.715, | |
| "eval_steps_per_second": 0.67, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "grad_norm": 2.031347067054233, | |
| "learning_rate": 9.401027853897119e-08, | |
| "loss": 1.3386, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "eval_loss": 1.602864146232605, | |
| "eval_runtime": 5.956, | |
| "eval_samples_per_second": 79.919, | |
| "eval_steps_per_second": 0.672, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.9402985074626866, | |
| "grad_norm": 1.9772043505318835, | |
| "learning_rate": 7.999735362178058e-08, | |
| "loss": 1.3396, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.9402985074626866, | |
| "eval_loss": 1.6029260158538818, | |
| "eval_runtime": 5.955, | |
| "eval_samples_per_second": 79.933, | |
| "eval_steps_per_second": 0.672, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.014925373134328, | |
| "grad_norm": 2.0413931319541203, | |
| "learning_rate": 7.016511465398424e-08, | |
| "loss": 1.3573, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.014925373134328, | |
| "eval_loss": 1.6028627157211304, | |
| "eval_runtime": 5.974, | |
| "eval_samples_per_second": 79.679, | |
| "eval_steps_per_second": 0.67, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.08955223880597, | |
| "grad_norm": 1.9174789145055136, | |
| "learning_rate": 6.335540171983123e-08, | |
| "loss": 1.3754, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.08955223880597, | |
| "eval_loss": 1.6034482717514038, | |
| "eval_runtime": 5.9455, | |
| "eval_samples_per_second": 80.061, | |
| "eval_steps_per_second": 0.673, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.1641791044776117, | |
| "grad_norm": 1.8365955877771794, | |
| "learning_rate": 5.870459761414777e-08, | |
| "loss": 1.3229, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.1641791044776117, | |
| "eval_loss": 1.6044023036956787, | |
| "eval_runtime": 5.9418, | |
| "eval_samples_per_second": 80.11, | |
| "eval_steps_per_second": 0.673, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 1.957661723231933, | |
| "learning_rate": 5.5575858924021805e-08, | |
| "loss": 1.3194, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "eval_loss": 1.60551917552948, | |
| "eval_runtime": 5.9349, | |
| "eval_samples_per_second": 80.204, | |
| "eval_steps_per_second": 0.674, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.3134328358208958, | |
| "grad_norm": 1.910367302653755, | |
| "learning_rate": 5.3505182098899865e-08, | |
| "loss": 1.3361, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.3134328358208958, | |
| "eval_loss": 1.6064879894256592, | |
| "eval_runtime": 5.9543, | |
| "eval_samples_per_second": 79.942, | |
| "eval_steps_per_second": 0.672, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.388059701492537, | |
| "grad_norm": 1.978379692085088, | |
| "learning_rate": 5.2158864924030916e-08, | |
| "loss": 1.3231, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.388059701492537, | |
| "eval_loss": 1.60716712474823, | |
| "eval_runtime": 5.959, | |
| "eval_samples_per_second": 79.879, | |
| "eval_steps_per_second": 0.671, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.4626865671641793, | |
| "grad_norm": 1.8179876847012644, | |
| "learning_rate": 5.130028101466846e-08, | |
| "loss": 1.32, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.4626865671641793, | |
| "eval_loss": 1.6076022386550903, | |
| "eval_runtime": 5.918, | |
| "eval_samples_per_second": 80.433, | |
| "eval_steps_per_second": 0.676, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.5373134328358207, | |
| "grad_norm": 1.947999010708536, | |
| "learning_rate": 5.076420137573488e-08, | |
| "loss": 1.3406, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.5373134328358207, | |
| "eval_loss": 1.6078119277954102, | |
| "eval_runtime": 5.9402, | |
| "eval_samples_per_second": 80.132, | |
| "eval_steps_per_second": 0.673, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "grad_norm": 1.9301332893200993, | |
| "learning_rate": 5.0437175954586924e-08, | |
| "loss": 1.3184, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "eval_loss": 1.6078969240188599, | |
| "eval_runtime": 5.9794, | |
| "eval_samples_per_second": 79.607, | |
| "eval_steps_per_second": 0.669, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.6865671641791042, | |
| "grad_norm": 1.8025714970137283, | |
| "learning_rate": 5.024273255042209e-08, | |
| "loss": 1.2745, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.6865671641791042, | |
| "eval_loss": 1.607950210571289, | |
| "eval_runtime": 5.9775, | |
| "eval_samples_per_second": 79.632, | |
| "eval_steps_per_second": 0.669, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.7611940298507465, | |
| "grad_norm": 1.8246857333350315, | |
| "learning_rate": 5.013036339654868e-08, | |
| "loss": 1.3024, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.7611940298507465, | |
| "eval_loss": 1.607932448387146, | |
| "eval_runtime": 5.9386, | |
| "eval_samples_per_second": 80.154, | |
| "eval_steps_per_second": 0.674, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.835820895522388, | |
| "grad_norm": 1.8544541968240376, | |
| "learning_rate": 5.006745403152126e-08, | |
| "loss": 1.3243, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.835820895522388, | |
| "eval_loss": 1.6079127788543701, | |
| "eval_runtime": 5.9362, | |
| "eval_samples_per_second": 80.186, | |
| "eval_steps_per_second": 0.674, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.91044776119403, | |
| "grad_norm": 1.9345137440066773, | |
| "learning_rate": 5.0033467418823265e-08, | |
| "loss": 1.3239, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.91044776119403, | |
| "eval_loss": 1.6080163717269897, | |
| "eval_runtime": 5.9581, | |
| "eval_samples_per_second": 79.892, | |
| "eval_steps_per_second": 0.671, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 2.033807758057437, | |
| "learning_rate": 5.001583122566221e-08, | |
| "loss": 1.3349, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "eval_loss": 1.6080641746520996, | |
| "eval_runtime": 5.9287, | |
| "eval_samples_per_second": 80.287, | |
| "eval_steps_per_second": 0.675, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.0597014925373136, | |
| "grad_norm": 1.923714106710108, | |
| "learning_rate": 5.0007090159326376e-08, | |
| "loss": 1.337, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 3.0597014925373136, | |
| "eval_loss": 1.6079014539718628, | |
| "eval_runtime": 5.9355, | |
| "eval_samples_per_second": 80.195, | |
| "eval_steps_per_second": 0.674, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 3.1343283582089554, | |
| "grad_norm": 1.8059505650037335, | |
| "learning_rate": 5.000298058072559e-08, | |
| "loss": 1.3091, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.1343283582089554, | |
| "eval_loss": 1.6077641248703003, | |
| "eval_runtime": 5.9641, | |
| "eval_samples_per_second": 79.811, | |
| "eval_steps_per_second": 0.671, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.208955223880597, | |
| "grad_norm": 1.894523950936326, | |
| "learning_rate": 5.0001163431988796e-08, | |
| "loss": 1.3266, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.208955223880597, | |
| "eval_loss": 1.60787832736969, | |
| "eval_runtime": 5.9618, | |
| "eval_samples_per_second": 79.841, | |
| "eval_steps_per_second": 0.671, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.283582089552239, | |
| "grad_norm": 1.8919183390013388, | |
| "learning_rate": 5.0000415857597904e-08, | |
| "loss": 1.3014, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.283582089552239, | |
| "eval_loss": 1.6083022356033325, | |
| "eval_runtime": 5.9802, | |
| "eval_samples_per_second": 79.596, | |
| "eval_steps_per_second": 0.669, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.3582089552238807, | |
| "grad_norm": 1.8867080087928592, | |
| "learning_rate": 5.0000133662276014e-08, | |
| "loss": 1.3153, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 3.3582089552238807, | |
| "eval_loss": 1.608628273010254, | |
| "eval_runtime": 5.9587, | |
| "eval_samples_per_second": 79.883, | |
| "eval_steps_per_second": 0.671, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 3.4328358208955225, | |
| "grad_norm": 2.000197758434265, | |
| "learning_rate": 5.0000037695987735e-08, | |
| "loss": 1.3192, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.4328358208955225, | |
| "eval_loss": 1.6089787483215332, | |
| "eval_runtime": 5.931, | |
| "eval_samples_per_second": 80.256, | |
| "eval_steps_per_second": 0.674, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.5074626865671643, | |
| "grad_norm": 1.8236033935396483, | |
| "learning_rate": 5.0000009015715774e-08, | |
| "loss": 1.315, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 3.5074626865671643, | |
| "eval_loss": 1.6092561483383179, | |
| "eval_runtime": 5.9999, | |
| "eval_samples_per_second": 79.335, | |
| "eval_steps_per_second": 0.667, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 3.582089552238806, | |
| "grad_norm": 1.9175089422860454, | |
| "learning_rate": 5.0000001740409057e-08, | |
| "loss": 1.3047, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.582089552238806, | |
| "eval_loss": 1.6093214750289917, | |
| "eval_runtime": 5.9889, | |
| "eval_samples_per_second": 79.481, | |
| "eval_steps_per_second": 0.668, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.656716417910448, | |
| "grad_norm": 1.8702465201892255, | |
| "learning_rate": 5.000000025140825e-08, | |
| "loss": 1.3208, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 3.656716417910448, | |
| "eval_loss": 1.609329342842102, | |
| "eval_runtime": 5.9315, | |
| "eval_samples_per_second": 80.25, | |
| "eval_steps_per_second": 0.674, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 3.7313432835820897, | |
| "grad_norm": 1.9567098448807434, | |
| "learning_rate": 5.000000002399302e-08, | |
| "loss": 1.362, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.7313432835820897, | |
| "eval_loss": 1.609269380569458, | |
| "eval_runtime": 5.9582, | |
| "eval_samples_per_second": 79.89, | |
| "eval_steps_per_second": 0.671, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.8059701492537314, | |
| "grad_norm": 1.8742551595721921, | |
| "learning_rate": 5.0000000001205015e-08, | |
| "loss": 1.3255, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 3.8059701492537314, | |
| "eval_loss": 1.609143853187561, | |
| "eval_runtime": 5.9335, | |
| "eval_samples_per_second": 80.223, | |
| "eval_steps_per_second": 0.674, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 3.8805970149253732, | |
| "grad_norm": 1.9100964620707728, | |
| "learning_rate": 5.000000000001954e-08, | |
| "loss": 1.2941, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.8805970149253732, | |
| "eval_loss": 1.608902096748352, | |
| "eval_runtime": 5.9228, | |
| "eval_samples_per_second": 80.368, | |
| "eval_steps_per_second": 0.675, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.955223880597015, | |
| "grad_norm": 1.959684298255238, | |
| "learning_rate": 5.0000000000000024e-08, | |
| "loss": 1.3254, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 3.955223880597015, | |
| "eval_loss": 1.6086210012435913, | |
| "eval_runtime": 5.9667, | |
| "eval_samples_per_second": 79.776, | |
| "eval_steps_per_second": 0.67, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 268, | |
| "total_flos": 27742804377600.0, | |
| "train_loss": 1.416380336925165, | |
| "train_runtime": 8905.9803, | |
| "train_samples_per_second": 1.923, | |
| "train_steps_per_second": 0.03 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 268, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 5, | |
| "total_flos": 27742804377600.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |