| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9083181542197936, | |
| "global_step": 4000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.8e-07, | |
| "loss": 0.692, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.8e-07, | |
| "loss": 0.6931, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 5.800000000000001e-07, | |
| "loss": 0.6938, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 7.8e-07, | |
| "loss": 0.6928, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 9.800000000000001e-07, | |
| "loss": 0.6936, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.1800000000000001e-06, | |
| "loss": 0.6944, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.3800000000000001e-06, | |
| "loss": 0.6936, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 1.5800000000000001e-06, | |
| "loss": 0.6926, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 1.7800000000000001e-06, | |
| "loss": 0.6921, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 1.98e-06, | |
| "loss": 0.6921, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 2.1800000000000003e-06, | |
| "loss": 0.6924, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 2.38e-06, | |
| "loss": 0.6924, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 2.5800000000000003e-06, | |
| "loss": 0.6902, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.7800000000000005e-06, | |
| "loss": 0.6914, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.9800000000000003e-06, | |
| "loss": 0.6907, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 3.1800000000000005e-06, | |
| "loss": 0.6893, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 3.3800000000000007e-06, | |
| "loss": 0.6867, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 3.58e-06, | |
| "loss": 0.683, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 3.7800000000000002e-06, | |
| "loss": 0.677, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 3.980000000000001e-06, | |
| "loss": 0.6627, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_webgpt_accuracy": 0.5160878447395302, | |
| "eval_webgpt_loss": 0.69077467918396, | |
| "eval_webgpt_runtime": 62.8216, | |
| "eval_webgpt_samples_per_second": 62.335, | |
| "eval_webgpt_steps_per_second": 6.24, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_hfsummary_accuracy": 0.6221624399238279, | |
| "eval_hfsummary_loss": 0.6861704587936401, | |
| "eval_hfsummary_runtime": 1239.8418, | |
| "eval_hfsummary_samples_per_second": 26.683, | |
| "eval_hfsummary_steps_per_second": 2.669, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_gptsynthetic_accuracy": 0.9957767722473605, | |
| "eval_gptsynthetic_loss": 0.3938244879245758, | |
| "eval_gptsynthetic_runtime": 45.6545, | |
| "eval_gptsynthetic_samples_per_second": 72.611, | |
| "eval_gptsynthetic_steps_per_second": 7.272, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.18e-06, | |
| "loss": 0.6481, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.38e-06, | |
| "loss": 0.624, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.58e-06, | |
| "loss": 0.6219, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.78e-06, | |
| "loss": 0.6079, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.980000000000001e-06, | |
| "loss": 0.5815, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 5.18e-06, | |
| "loss": 0.5714, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 5.380000000000001e-06, | |
| "loss": 0.5904, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 5.580000000000001e-06, | |
| "loss": 0.5796, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 5.78e-06, | |
| "loss": 0.5649, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 5.98e-06, | |
| "loss": 0.581, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 6.18e-06, | |
| "loss": 0.5481, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 6.380000000000001e-06, | |
| "loss": 0.5839, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 6.5800000000000005e-06, | |
| "loss": 0.5552, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 6.780000000000001e-06, | |
| "loss": 0.5654, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 6.98e-06, | |
| "loss": 0.5618, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 7.180000000000001e-06, | |
| "loss": 0.5528, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 7.3800000000000005e-06, | |
| "loss": 0.5341, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 7.58e-06, | |
| "loss": 0.5458, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 7.78e-06, | |
| "loss": 0.5335, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 7.980000000000002e-06, | |
| "loss": 0.5285, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_webgpt_accuracy": 0.5617977528089888, | |
| "eval_webgpt_loss": 0.7310989499092102, | |
| "eval_webgpt_runtime": 63.0205, | |
| "eval_webgpt_samples_per_second": 62.139, | |
| "eval_webgpt_steps_per_second": 6.22, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_hfsummary_accuracy": 0.6381525254662516, | |
| "eval_hfsummary_loss": 0.6372680068016052, | |
| "eval_hfsummary_runtime": 1238.3216, | |
| "eval_hfsummary_samples_per_second": 26.716, | |
| "eval_hfsummary_steps_per_second": 2.672, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_gptsynthetic_accuracy": 0.9981900452488688, | |
| "eval_gptsynthetic_loss": 0.026522908359766006, | |
| "eval_gptsynthetic_runtime": 45.6333, | |
| "eval_gptsynthetic_samples_per_second": 72.644, | |
| "eval_gptsynthetic_steps_per_second": 7.275, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 8.18e-06, | |
| "loss": 0.5663, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 8.380000000000001e-06, | |
| "loss": 0.55, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 8.580000000000001e-06, | |
| "loss": 0.5465, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.5409, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 8.98e-06, | |
| "loss": 0.5496, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 9.180000000000002e-06, | |
| "loss": 0.5334, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 9.38e-06, | |
| "loss": 0.5108, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 9.58e-06, | |
| "loss": 0.5306, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 9.780000000000001e-06, | |
| "loss": 0.5379, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 9.980000000000001e-06, | |
| "loss": 0.5288, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 9.999967852222371e-06, | |
| "loss": 0.5362, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 9.999856724632902e-06, | |
| "loss": 0.5245, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 9.99966622210923e-06, | |
| "loss": 0.5424, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 9.999396347675668e-06, | |
| "loss": 0.5161, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 9.99904710561659e-06, | |
| "loss": 0.5295, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 9.998618501476367e-06, | |
| "loss": 0.5049, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 9.998110542059283e-06, | |
| "loss": 0.5136, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 9.997523235429414e-06, | |
| "loss": 0.5059, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 9.996856590910513e-06, | |
| "loss": 0.5268, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 9.996110619085859e-06, | |
| "loss": 0.515, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_webgpt_accuracy": 0.5640960163432074, | |
| "eval_webgpt_loss": 0.7018715143203735, | |
| "eval_webgpt_runtime": 63.203, | |
| "eval_webgpt_samples_per_second": 61.959, | |
| "eval_webgpt_steps_per_second": 6.202, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_hfsummary_accuracy": 0.6488831121724148, | |
| "eval_hfsummary_loss": 0.6238470077514648, | |
| "eval_hfsummary_runtime": 1239.6473, | |
| "eval_hfsummary_samples_per_second": 26.687, | |
| "eval_hfsummary_steps_per_second": 2.669, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_gptsynthetic_accuracy": 0.9978883861236802, | |
| "eval_gptsynthetic_loss": 0.031191527843475342, | |
| "eval_gptsynthetic_runtime": 45.7943, | |
| "eval_gptsynthetic_samples_per_second": 72.389, | |
| "eval_gptsynthetic_steps_per_second": 7.25, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 9.995285331798082e-06, | |
| "loss": 0.4913, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 9.994380742148981e-06, | |
| "loss": 0.4957, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 9.993396864499321e-06, | |
| "loss": 0.5041, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.992333714468589e-06, | |
| "loss": 0.5204, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.991191308934765e-06, | |
| "loss": 0.4964, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.989969666034039e-06, | |
| "loss": 0.5054, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 9.988668805160533e-06, | |
| "loss": 0.5156, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 9.987288746965987e-06, | |
| "loss": 0.4875, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 9.985829513359437e-06, | |
| "loss": 0.5002, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 9.984291127506857e-06, | |
| "loss": 0.5103, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 9.982673613830807e-06, | |
| "loss": 0.488, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 9.980976998010028e-06, | |
| "loss": 0.5091, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 9.979201306979047e-06, | |
| "loss": 0.5008, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 9.977346568927742e-06, | |
| "loss": 0.4933, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 9.9754128133009e-06, | |
| "loss": 0.5208, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 9.973400070797745e-06, | |
| "loss": 0.5036, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 9.971308373371453e-06, | |
| "loss": 0.4935, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 9.969137754228648e-06, | |
| "loss": 0.5026, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.966888247828864e-06, | |
| "loss": 0.5047, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.964559889884011e-06, | |
| "loss": 0.4785, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_webgpt_accuracy": 0.5684371807967313, | |
| "eval_webgpt_loss": 0.6995264887809753, | |
| "eval_webgpt_runtime": 63.093, | |
| "eval_webgpt_samples_per_second": 62.067, | |
| "eval_webgpt_steps_per_second": 6.213, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_hfsummary_accuracy": 0.6444397424659191, | |
| "eval_hfsummary_loss": 0.6311432719230652, | |
| "eval_hfsummary_runtime": 1241.3962, | |
| "eval_hfsummary_samples_per_second": 26.65, | |
| "eval_hfsummary_steps_per_second": 2.666, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_gptsynthetic_accuracy": 0.9978883861236802, | |
| "eval_gptsynthetic_loss": 0.016028160229325294, | |
| "eval_gptsynthetic_runtime": 45.7378, | |
| "eval_gptsynthetic_samples_per_second": 72.478, | |
| "eval_gptsynthetic_steps_per_second": 7.259, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 9.962152717357796e-06, | |
| "loss": 0.4752, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 9.959666768465153e-06, | |
| "loss": 0.4657, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 9.957102082671612e-06, | |
| "loss": 0.4935, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 9.954458700692699e-06, | |
| "loss": 0.4821, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 9.951736664493266e-06, | |
| "loss": 0.4949, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 9.948936017286844e-06, | |
| "loss": 0.4894, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 9.94605680353494e-06, | |
| "loss": 0.5032, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 9.943099068946348e-06, | |
| "loss": 0.4724, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 9.940062860476406e-06, | |
| "loss": 0.4871, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 9.936948226326273e-06, | |
| "loss": 0.4919, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 9.933755215942137e-06, | |
| "loss": 0.4763, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 9.930483880014448e-06, | |
| "loss": 0.4928, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 9.92713427047711e-06, | |
| "loss": 0.4608, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 9.923706440506654e-06, | |
| "loss": 0.491, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 9.920200444521395e-06, | |
| "loss": 0.4624, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 9.916616338180569e-06, | |
| "loss": 0.5028, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 9.912954178383445e-06, | |
| "loss": 0.4814, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 9.90921402326843e-06, | |
| "loss": 0.4901, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 9.905395932212139e-06, | |
| "loss": 0.459, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 9.901499965828451e-06, | |
| "loss": 0.4624, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_webgpt_accuracy": 0.5661389172625128, | |
| "eval_webgpt_loss": 0.6934247016906738, | |
| "eval_webgpt_runtime": 63.1076, | |
| "eval_webgpt_samples_per_second": 62.053, | |
| "eval_webgpt_steps_per_second": 6.212, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_hfsummary_accuracy": 0.6628177613880241, | |
| "eval_hfsummary_loss": 0.6276853680610657, | |
| "eval_hfsummary_runtime": 1241.5998, | |
| "eval_hfsummary_samples_per_second": 26.645, | |
| "eval_hfsummary_steps_per_second": 2.665, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_gptsynthetic_accuracy": 0.9975867269984917, | |
| "eval_gptsynthetic_loss": 0.015120746567845345, | |
| "eval_gptsynthetic_runtime": 45.6555, | |
| "eval_gptsynthetic_samples_per_second": 72.609, | |
| "eval_gptsynthetic_steps_per_second": 7.272, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 9.897526185967557e-06, | |
| "loss": 0.4729, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 9.893474655714962e-06, | |
| "loss": 0.4767, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 9.889345439390508e-06, | |
| "loss": 0.4737, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 9.88513860254732e-06, | |
| "loss": 0.4795, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 9.880854211970794e-06, | |
| "loss": 0.4674, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 9.876492335677523e-06, | |
| "loss": 0.4605, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 9.872053042914223e-06, | |
| "loss": 0.4347, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 9.867536404156621e-06, | |
| "loss": 0.4614, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 9.862942491108358e-06, | |
| "loss": 0.4849, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 9.858271376699832e-06, | |
| "loss": 0.4715, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 9.853523135087047e-06, | |
| "loss": 0.4768, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 9.848697841650435e-06, | |
| "loss": 0.4686, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 9.843795572993664e-06, | |
| "loss": 0.4521, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 9.83881640694241e-06, | |
| "loss": 0.4765, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.833760422543137e-06, | |
| "loss": 0.4829, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.828627700061833e-06, | |
| "loss": 0.4587, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.82341832098273e-06, | |
| "loss": 0.4684, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.818132368007022e-06, | |
| "loss": 0.4443, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.812769925051553e-06, | |
| "loss": 0.4898, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.807331077247468e-06, | |
| "loss": 0.44, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "eval_webgpt_accuracy": 0.5605209397344229, | |
| "eval_webgpt_loss": 0.6905099749565125, | |
| "eval_webgpt_runtime": 63.2036, | |
| "eval_webgpt_samples_per_second": 61.959, | |
| "eval_webgpt_steps_per_second": 6.202, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "eval_hfsummary_accuracy": 0.664419792642747, | |
| "eval_hfsummary_loss": 0.6330422163009644, | |
| "eval_hfsummary_runtime": 1242.5416, | |
| "eval_hfsummary_samples_per_second": 26.625, | |
| "eval_hfsummary_steps_per_second": 2.663, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "eval_gptsynthetic_accuracy": 0.9981900452488688, | |
| "eval_gptsynthetic_loss": 0.011154625564813614, | |
| "eval_gptsynthetic_runtime": 45.6845, | |
| "eval_gptsynthetic_samples_per_second": 72.563, | |
| "eval_gptsynthetic_steps_per_second": 7.267, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 9.80181591093888e-06, | |
| "loss": 0.4648, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 9.796224513681495e-06, | |
| "loss": 0.4483, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 9.79055697424121e-06, | |
| "loss": 0.4707, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 9.784813382592716e-06, | |
| "loss": 0.4638, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 9.77899382991807e-06, | |
| "loss": 0.4366, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 9.773098408605242e-06, | |
| "loss": 0.4457, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 9.76712721224665e-06, | |
| "loss": 0.4363, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 9.761080335637672e-06, | |
| "loss": 0.4577, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.754957874775146e-06, | |
| "loss": 0.4538, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.748759926855845e-06, | |
| "loss": 0.4499, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.742486590274927e-06, | |
| "loss": 0.4466, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 9.736137964624381e-06, | |
| "loss": 0.4459, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 9.729714150691448e-06, | |
| "loss": 0.4544, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 9.723215250457009e-06, | |
| "loss": 0.4453, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 9.716641367093976e-06, | |
| "loss": 0.4614, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 9.709992604965652e-06, | |
| "loss": 0.4863, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 9.703269069624075e-06, | |
| "loss": 0.4432, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.696470867808333e-06, | |
| "loss": 0.438, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.68959810744289e-06, | |
| "loss": 0.4593, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 9.682650897635845e-06, | |
| "loss": 0.4463, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "eval_webgpt_accuracy": 0.5617977528089888, | |
| "eval_webgpt_loss": 0.6875464916229248, | |
| "eval_webgpt_runtime": 63.2336, | |
| "eval_webgpt_samples_per_second": 61.929, | |
| "eval_webgpt_steps_per_second": 6.199, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "eval_hfsummary_accuracy": 0.6741528881903093, | |
| "eval_hfsummary_loss": 0.6355233788490295, | |
| "eval_hfsummary_runtime": 1241.0977, | |
| "eval_hfsummary_samples_per_second": 26.656, | |
| "eval_hfsummary_steps_per_second": 2.666, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "eval_gptsynthetic_accuracy": 0.9975867269984917, | |
| "eval_gptsynthetic_loss": 0.010252352803945541, | |
| "eval_gptsynthetic_runtime": 45.7898, | |
| "eval_gptsynthetic_samples_per_second": 72.396, | |
| "eval_gptsynthetic_steps_per_second": 7.251, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 9.675629348677232e-06, | |
| "loss": 0.4454, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.668533572037236e-06, | |
| "loss": 0.4444, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.661363680364445e-06, | |
| "loss": 0.4622, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 9.65411978748406e-06, | |
| "loss": 0.4591, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 9.64680200839608e-06, | |
| "loss": 0.4462, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 9.63941045927348e-06, | |
| "loss": 0.4423, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 9.631945257460369e-06, | |
| "loss": 0.4302, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 9.62440652147012e-06, | |
| "loss": 0.4668, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 9.616794370983498e-06, | |
| "loss": 0.4252, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 9.609108926846759e-06, | |
| "loss": 0.4563, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 9.60135031106972e-06, | |
| "loss": 0.4396, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 9.593518646823833e-06, | |
| "loss": 0.463, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 9.58561405844023e-06, | |
| "loss": 0.4292, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 9.577636671407743e-06, | |
| "loss": 0.4669, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 9.569586612370913e-06, | |
| "loss": 0.4648, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 9.561464009127988e-06, | |
| "loss": 0.4318, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 9.553268990628877e-06, | |
| "loss": 0.4274, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 9.545001686973123e-06, | |
| "loss": 0.4532, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 9.53666222940782e-06, | |
| "loss": 0.4476, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 9.528250750325545e-06, | |
| "loss": 0.4477, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "eval_webgpt_accuracy": 0.5801838610827375, | |
| "eval_webgpt_loss": 0.6804662346839905, | |
| "eval_webgpt_runtime": 63.2449, | |
| "eval_webgpt_samples_per_second": 61.918, | |
| "eval_webgpt_steps_per_second": 6.198, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "eval_hfsummary_accuracy": 0.6604298280083426, | |
| "eval_hfsummary_loss": 0.6250379085540771, | |
| "eval_hfsummary_runtime": 1240.5251, | |
| "eval_hfsummary_samples_per_second": 26.669, | |
| "eval_hfsummary_steps_per_second": 2.667, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "eval_gptsynthetic_accuracy": 0.9975867269984917, | |
| "eval_gptsynthetic_loss": 0.01225399598479271, | |
| "eval_gptsynthetic_runtime": 45.6829, | |
| "eval_gptsynthetic_samples_per_second": 72.565, | |
| "eval_gptsynthetic_steps_per_second": 7.267, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 9.519767383262244e-06, | |
| "loss": 0.4265, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 9.511212262895113e-06, | |
| "loss": 0.4509, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 9.502585525040469e-06, | |
| "loss": 0.4159, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 9.493887306651585e-06, | |
| "loss": 0.4241, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 9.485117745816523e-06, | |
| "loss": 0.4186, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 9.476276981755933e-06, | |
| "loss": 0.4334, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 9.467365154820853e-06, | |
| "loss": 0.4508, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 9.45838240649047e-06, | |
| "loss": 0.4227, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 9.44932887936988e-06, | |
| "loss": 0.4391, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 9.440204717187829e-06, | |
| "loss": 0.4461, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 9.43101006479442e-06, | |
| "loss": 0.4334, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 9.421745068158823e-06, | |
| "loss": 0.4095, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.41240987436695e-06, | |
| "loss": 0.4629, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.40300463161913e-06, | |
| "loss": 0.433, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.393529489227745e-06, | |
| "loss": 0.4323, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 9.383984597614867e-06, | |
| "loss": 0.4339, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 9.374370108309862e-06, | |
| "loss": 0.4263, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 9.364686173947e-06, | |
| "loss": 0.4095, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 9.354932948263014e-06, | |
| "loss": 0.4284, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 9.345110586094672e-06, | |
| "loss": 0.4398, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_webgpt_accuracy": 0.5873340143003064, | |
| "eval_webgpt_loss": 0.6802140474319458, | |
| "eval_webgpt_runtime": 63.2638, | |
| "eval_webgpt_samples_per_second": 61.9, | |
| "eval_webgpt_steps_per_second": 6.196, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_hfsummary_accuracy": 0.6609739140948523, | |
| "eval_hfsummary_loss": 0.6455074548721313, | |
| "eval_hfsummary_runtime": 1241.9993, | |
| "eval_hfsummary_samples_per_second": 26.637, | |
| "eval_hfsummary_steps_per_second": 2.664, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_gptsynthetic_accuracy": 0.9975867269984917, | |
| "eval_gptsynthetic_loss": 0.007792285177856684, | |
| "eval_gptsynthetic_runtime": 45.7802, | |
| "eval_gptsynthetic_samples_per_second": 72.411, | |
| "eval_gptsynthetic_steps_per_second": 7.252, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 9.335219243376313e-06, | |
| "loss": 0.4483, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 9.325259077137371e-06, | |
| "loss": 0.4317, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 9.315230245499886e-06, | |
| "loss": 0.4285, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 9.305132907675987e-06, | |
| "loss": 0.4044, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 9.294967223965377e-06, | |
| "loss": 0.4418, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 9.284733355752775e-06, | |
| "loss": 0.4336, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 9.274431465505357e-06, | |
| "loss": 0.4481, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 9.264061716770183e-06, | |
| "loss": 0.4294, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 9.253624274171595e-06, | |
| "loss": 0.4442, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 9.243119303408606e-06, | |
| "loss": 0.4618, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 9.232546971252265e-06, | |
| "loss": 0.4286, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.22190744554302e-06, | |
| "loss": 0.4247, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.211200895188035e-06, | |
| "loss": 0.4145, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 9.200427490158532e-06, | |
| "loss": 0.4019, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 9.189587401487073e-06, | |
| "loss": 0.4154, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 9.178680801264855e-06, | |
| "loss": 0.4267, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 9.167707862638973e-06, | |
| "loss": 0.4344, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 9.156668759809678e-06, | |
| "loss": 0.4095, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 9.145563668027599e-06, | |
| "loss": 0.4053, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 9.134392763590978e-06, | |
| "loss": 0.4244, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_webgpt_accuracy": 0.585291113381001, | |
| "eval_webgpt_loss": 0.6761194467544556, | |
| "eval_webgpt_runtime": 63.2755, | |
| "eval_webgpt_samples_per_second": 61.888, | |
| "eval_webgpt_steps_per_second": 6.195, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_hfsummary_accuracy": 0.6690142973732733, | |
| "eval_hfsummary_loss": 0.6501129269599915, | |
| "eval_hfsummary_runtime": 1243.8141, | |
| "eval_hfsummary_samples_per_second": 26.598, | |
| "eval_hfsummary_steps_per_second": 2.66, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_gptsynthetic_accuracy": 0.9978883861236802, | |
| "eval_gptsynthetic_loss": 0.0076271467842161655, | |
| "eval_gptsynthetic_runtime": 45.8118, | |
| "eval_gptsynthetic_samples_per_second": 72.361, | |
| "eval_gptsynthetic_steps_per_second": 7.247, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 9.124282826330867e-06, | |
| "loss": 0.4132, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 9.112987367291958e-06, | |
| "loss": 0.4089, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 9.101626612761673e-06, | |
| "loss": 0.42, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 9.090200743097004e-06, | |
| "loss": 0.3961, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 9.078709939688683e-06, | |
| "loss": 0.4139, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 9.06715438495829e-06, | |
| "loss": 0.4021, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 9.055534262355358e-06, | |
| "loss": 0.4246, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 9.043849756354469e-06, | |
| "loss": 0.4233, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.032101052452318e-06, | |
| "loss": 0.4121, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.020288337164766e-06, | |
| "loss": 0.4299, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 9.00841179802389e-06, | |
| "loss": 0.3955, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 8.996471623574997e-06, | |
| "loss": 0.4185, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 8.984468003373625e-06, | |
| "loss": 0.4086, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 8.97240112798255e-06, | |
| "loss": 0.4061, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 8.960271188968753e-06, | |
| "loss": 0.3859, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 8.948078378900368e-06, | |
| "loss": 0.3951, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 8.935822891343647e-06, | |
| "loss": 0.3914, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 8.923504920859864e-06, | |
| "loss": 0.3672, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 8.911124663002243e-06, | |
| "loss": 0.385, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 8.898682314312852e-06, | |
| "loss": 0.3932, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "eval_webgpt_accuracy": 0.5681818181818182, | |
| "eval_webgpt_loss": 0.6895765662193298, | |
| "eval_webgpt_runtime": 63.5181, | |
| "eval_webgpt_samples_per_second": 61.652, | |
| "eval_webgpt_steps_per_second": 6.171, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "eval_hfsummary_accuracy": 0.6638757065562373, | |
| "eval_hfsummary_loss": 0.6736522912979126, | |
| "eval_hfsummary_runtime": 1274.8625, | |
| "eval_hfsummary_samples_per_second": 25.95, | |
| "eval_hfsummary_steps_per_second": 2.596, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "eval_gptsynthetic_accuracy": 0.9984917043740573, | |
| "eval_gptsynthetic_loss": 0.006684631574898958, | |
| "eval_gptsynthetic_runtime": 46.0527, | |
| "eval_gptsynthetic_samples_per_second": 71.983, | |
| "eval_gptsynthetic_steps_per_second": 7.209, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 8.886178072319464e-06, | |
| "loss": 0.3868, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 8.873612135532451e-06, | |
| "loss": 0.3712, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 8.860984703441608e-06, | |
| "loss": 0.4153, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 8.848295976512996e-06, | |
| "loss": 0.4364, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 8.835546156185765e-06, | |
| "loss": 0.3733, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 8.822735444868941e-06, | |
| "loss": 0.389, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 8.809864045938227e-06, | |
| "loss": 0.3869, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 8.796932163732768e-06, | |
| "loss": 0.3782, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 8.783940003551902e-06, | |
| "loss": 0.4087, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 8.770887771651916e-06, | |
| "loss": 0.3849, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 8.757775675242757e-06, | |
| "loss": 0.3848, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 8.744603922484746e-06, | |
| "loss": 0.4036, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 8.731372722485276e-06, | |
| "loss": 0.3931, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 8.718082285295496e-06, | |
| "loss": 0.3909, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 8.70473282190696e-06, | |
| "loss": 0.3912, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 8.691324544248303e-06, | |
| "loss": 0.3874, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 8.677857665181854e-06, | |
| "loss": 0.3878, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 8.664332398500269e-06, | |
| "loss": 0.4063, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 8.650748958923128e-06, | |
| "loss": 0.3794, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 8.637107562093537e-06, | |
| "loss": 0.3514, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "eval_webgpt_accuracy": 0.5840143003064351, | |
| "eval_webgpt_loss": 0.6855098605155945, | |
| "eval_webgpt_runtime": 63.4383, | |
| "eval_webgpt_samples_per_second": 61.729, | |
| "eval_webgpt_steps_per_second": 6.179, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "eval_hfsummary_accuracy": 0.6732158510413203, | |
| "eval_hfsummary_loss": 0.6792841553688049, | |
| "eval_hfsummary_runtime": 1256.3167, | |
| "eval_hfsummary_samples_per_second": 26.333, | |
| "eval_hfsummary_steps_per_second": 2.634, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "eval_gptsynthetic_accuracy": 0.9978883861236802, | |
| "eval_gptsynthetic_loss": 0.00653717340901494, | |
| "eval_gptsynthetic_runtime": 45.9265, | |
| "eval_gptsynthetic_samples_per_second": 72.181, | |
| "eval_gptsynthetic_steps_per_second": 7.229, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 8.623408424574695e-06, | |
| "loss": 0.3742, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 8.609651763846462e-06, | |
| "loss": 0.3696, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 8.595837798301908e-06, | |
| "loss": 0.3943, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 8.581966747243835e-06, | |
| "loss": 0.3928, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 8.568038830881305e-06, | |
| "loss": 0.3753, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 8.554054270326144e-06, | |
| "loss": 0.3594, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 8.54001328758943e-06, | |
| "loss": 0.3674, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 8.525916105577967e-06, | |
| "loss": 0.3714, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 8.511762948090745e-06, | |
| "loss": 0.3937, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 8.49755403981539e-06, | |
| "loss": 0.3848, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 8.4832896063246e-06, | |
| "loss": 0.3709, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 8.46896987407256e-06, | |
| "loss": 0.4056, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 8.45459507039134e-06, | |
| "loss": 0.3794, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 8.440165423487302e-06, | |
| "loss": 0.3597, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 8.425681162437464e-06, | |
| "loss": 0.3773, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 8.41114251718587e-06, | |
| "loss": 0.3678, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 8.396549718539934e-06, | |
| "loss": 0.3831, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 8.381902998166782e-06, | |
| "loss": 0.3873, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 8.367202588589569e-06, | |
| "loss": 0.3775, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 8.352448723183792e-06, | |
| "loss": 0.3936, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "eval_webgpt_accuracy": 0.5760980592441267, | |
| "eval_webgpt_loss": 0.6807132959365845, | |
| "eval_webgpt_runtime": 68.3724, | |
| "eval_webgpt_samples_per_second": 57.275, | |
| "eval_webgpt_steps_per_second": 5.733, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "eval_hfsummary_accuracy": 0.6699815615270683, | |
| "eval_hfsummary_loss": 0.6664829254150391, | |
| "eval_hfsummary_runtime": 1323.7739, | |
| "eval_hfsummary_samples_per_second": 24.991, | |
| "eval_hfsummary_steps_per_second": 2.5, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "eval_gptsynthetic_accuracy": 0.9984917043740573, | |
| "eval_gptsynthetic_loss": 0.006850704550743103, | |
| "eval_gptsynthetic_runtime": 48.9916, | |
| "eval_gptsynthetic_samples_per_second": 67.665, | |
| "eval_gptsynthetic_steps_per_second": 6.777, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 8.337641636173582e-06, | |
| "loss": 0.3647, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 8.322781562627987e-06, | |
| "loss": 0.3815, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 8.307868738457236e-06, | |
| "loss": 0.3979, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 8.292903400409005e-06, | |
| "loss": 0.3685, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 8.27788578606464e-06, | |
| "loss": 0.3498, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 8.262816133835412e-06, | |
| "loss": 0.3641, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 8.247694682958699e-06, | |
| "loss": 0.4006, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 8.232521673494216e-06, | |
| "loss": 0.3857, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 8.217297346320192e-06, | |
| "loss": 0.3838, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 8.202021943129548e-06, | |
| "loss": 0.3979, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 8.186695706426053e-06, | |
| "loss": 0.3589, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 8.17131887952049e-06, | |
| "loss": 0.3853, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 8.155891706526773e-06, | |
| "loss": 0.3698, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 8.140414432358094e-06, | |
| "loss": 0.3637, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 8.124887302723014e-06, | |
| "loss": 0.3742, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 8.109310564121577e-06, | |
| "loss": 0.3849, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 8.09368446384139e-06, | |
| "loss": 0.3721, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 8.078009249953693e-06, | |
| "loss": 0.3818, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 8.062285171309437e-06, | |
| "loss": 0.384, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 8.046512477535315e-06, | |
| "loss": 0.3567, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "eval_webgpt_accuracy": 0.5822267620020429, | |
| "eval_webgpt_loss": 0.6815086007118225, | |
| "eval_webgpt_runtime": 68.1221, | |
| "eval_webgpt_samples_per_second": 57.485, | |
| "eval_webgpt_steps_per_second": 5.754, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "eval_hfsummary_accuracy": 0.6710697337000877, | |
| "eval_hfsummary_loss": 0.6821005940437317, | |
| "eval_hfsummary_runtime": 1319.5773, | |
| "eval_hfsummary_samples_per_second": 25.071, | |
| "eval_hfsummary_steps_per_second": 2.508, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "eval_gptsynthetic_accuracy": 0.9978883861236802, | |
| "eval_gptsynthetic_loss": 0.005701866466552019, | |
| "eval_gptsynthetic_runtime": 48.8957, | |
| "eval_gptsynthetic_samples_per_second": 67.797, | |
| "eval_gptsynthetic_steps_per_second": 6.79, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 8.030691419029806e-06, | |
| "loss": 0.3813, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 8.014822246959203e-06, | |
| "loss": 0.3645, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 7.998905213253627e-06, | |
| "loss": 0.3752, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 7.982940570603014e-06, | |
| "loss": 0.4075, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 7.966928572453123e-06, | |
| "loss": 0.3823, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 7.950869473001494e-06, | |
| "loss": 0.3776, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 7.93476352719343e-06, | |
| "loss": 0.3689, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 7.91861099071793e-06, | |
| "loss": 0.3883, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 7.902412120003649e-06, | |
| "loss": 0.3736, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 7.886167172214814e-06, | |
| "loss": 0.3363, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 7.86987640524715e-06, | |
| "loss": 0.3866, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 7.853540077723779e-06, | |
| "loss": 0.3717, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 7.837158448991112e-06, | |
| "loss": 0.3665, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 7.820731779114746e-06, | |
| "loss": 0.3814, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 7.804260328875322e-06, | |
| "loss": 0.3703, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 7.787744359764388e-06, | |
| "loss": 0.3896, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 7.77118413398025e-06, | |
| "loss": 0.3566, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 7.754579914423806e-06, | |
| "loss": 0.3743, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 7.737931964694378e-06, | |
| "loss": 0.3657, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 7.721240549085523e-06, | |
| "loss": 0.3956, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_webgpt_accuracy": 0.5799284984678243, | |
| "eval_webgpt_loss": 0.6848716139793396, | |
| "eval_webgpt_runtime": 67.9306, | |
| "eval_webgpt_samples_per_second": 57.647, | |
| "eval_webgpt_steps_per_second": 5.771, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_hfsummary_accuracy": 0.6674122661185503, | |
| "eval_hfsummary_loss": 0.6731451153755188, | |
| "eval_hfsummary_runtime": 1273.6757, | |
| "eval_hfsummary_samples_per_second": 25.974, | |
| "eval_hfsummary_steps_per_second": 2.598, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_gptsynthetic_accuracy": 0.9984917043740573, | |
| "eval_gptsynthetic_loss": 0.005726581439375877, | |
| "eval_gptsynthetic_runtime": 48.6337, | |
| "eval_gptsynthetic_samples_per_second": 68.163, | |
| "eval_gptsynthetic_steps_per_second": 6.827, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 7.704505932580835e-06, | |
| "loss": 0.3617, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 7.687728380849746e-06, | |
| "loss": 0.3944, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 7.6709081602433e-06, | |
| "loss": 0.347, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 7.654045537789929e-06, | |
| "loss": 0.3657, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 7.637140781191214e-06, | |
| "loss": 0.3874, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 7.6201941588176355e-06, | |
| "loss": 0.3875, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 7.603205939704308e-06, | |
| "loss": 0.371, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 7.586176393546714e-06, | |
| "loss": 0.4006, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 7.569105790696423e-06, | |
| "loss": 0.3721, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 7.551994402156794e-06, | |
| "loss": 0.3825, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 7.5348424995786814e-06, | |
| "loss": 0.3688, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 7.517650355256116e-06, | |
| "loss": 0.3698, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 7.500418242121982e-06, | |
| "loss": 0.3623, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 7.483146433743686e-06, | |
| "loss": 0.3739, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 7.465835204318822e-06, | |
| "loss": 0.3708, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 7.448484828670799e-06, | |
| "loss": 0.3344, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 7.431095582244495e-06, | |
| "loss": 0.3548, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 7.413667741101881e-06, | |
| "loss": 0.3702, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 7.3962015819176324e-06, | |
| "loss": 0.3361, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 7.378697381974744e-06, | |
| "loss": 0.4019, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_webgpt_accuracy": 0.5891215526046987, | |
| "eval_webgpt_loss": 0.6808570623397827, | |
| "eval_webgpt_runtime": 63.3758, | |
| "eval_webgpt_samples_per_second": 61.79, | |
| "eval_webgpt_steps_per_second": 6.185, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_hfsummary_accuracy": 0.6569839494604479, | |
| "eval_hfsummary_loss": 0.692561686038971, | |
| "eval_hfsummary_runtime": 1242.943, | |
| "eval_hfsummary_samples_per_second": 26.617, | |
| "eval_hfsummary_steps_per_second": 2.662, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_gptsynthetic_accuracy": 0.9987933634992459, | |
| "eval_gptsynthetic_loss": 0.005168143659830093, | |
| "eval_gptsynthetic_runtime": 45.8689, | |
| "eval_gptsynthetic_samples_per_second": 72.271, | |
| "eval_gptsynthetic_steps_per_second": 7.238, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 7.361155419160121e-06, | |
| "loss": 0.3573, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 7.343575971960171e-06, | |
| "loss": 0.3677, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 7.325959319456386e-06, | |
| "loss": 0.365, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 7.308305741320907e-06, | |
| "loss": 0.3241, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 7.290615517812084e-06, | |
| "loss": 0.3639, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 7.27288892977003e-06, | |
| "loss": 0.3535, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 7.255126258612163e-06, | |
| "loss": 0.364, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 7.237327786328732e-06, | |
| "loss": 0.3728, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 7.219493795478347e-06, | |
| "loss": 0.3598, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 7.201624569183492e-06, | |
| "loss": 0.3749, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 7.183720391126028e-06, | |
| "loss": 0.3361, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 7.16578154554269e-06, | |
| "loss": 0.3383, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 7.147808317220578e-06, | |
| "loss": 0.3424, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 7.12980099149263e-06, | |
| "loss": 0.3627, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 7.111759854233096e-06, | |
| "loss": 0.3498, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 7.093685191853003e-06, | |
| "loss": 0.3838, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 7.075577291295599e-06, | |
| "loss": 0.3638, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 7.0574364400318014e-06, | |
| "loss": 0.3737, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 7.0392629260556425e-06, | |
| "loss": 0.367, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 7.021057037879681e-06, | |
| "loss": 0.3778, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "eval_webgpt_accuracy": 0.5845250255362615, | |
| "eval_webgpt_loss": 0.6846094727516174, | |
| "eval_webgpt_runtime": 63.3427, | |
| "eval_webgpt_samples_per_second": 61.822, | |
| "eval_webgpt_steps_per_second": 6.189, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "eval_hfsummary_accuracy": 0.6690445243780794, | |
| "eval_hfsummary_loss": 0.6792688369750977, | |
| "eval_hfsummary_runtime": 1245.6911, | |
| "eval_hfsummary_samples_per_second": 26.558, | |
| "eval_hfsummary_steps_per_second": 2.656, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "eval_gptsynthetic_accuracy": 0.9984917043740573, | |
| "eval_gptsynthetic_loss": 0.005154031794518232, | |
| "eval_gptsynthetic_runtime": 45.8488, | |
| "eval_gptsynthetic_samples_per_second": 72.303, | |
| "eval_gptsynthetic_steps_per_second": 7.241, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 7.002819064530437e-06, | |
| "loss": 0.3706, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 6.984549295543795e-06, | |
| "loss": 0.3743, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 6.966248020960404e-06, | |
| "loss": 0.3613, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 6.947915531321085e-06, | |
| "loss": 0.3375, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 6.929552117662211e-06, | |
| "loss": 0.3515, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 6.911158071511084e-06, | |
| "loss": 0.357, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 6.892733684881313e-06, | |
| "loss": 0.3267, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 6.874279250268174e-06, | |
| "loss": 0.3562, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 6.85579506064397e-06, | |
| "loss": 0.361, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 6.837281409453376e-06, | |
| "loss": 0.3452, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 6.818738590608782e-06, | |
| "loss": 0.348, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 6.800166898485632e-06, | |
| "loss": 0.3496, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 6.781566627917738e-06, | |
| "loss": 0.3548, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 6.762938074192615e-06, | |
| "loss": 0.3302, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 6.744281533046782e-06, | |
| "loss": 0.3447, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 6.725597300661071e-06, | |
| "loss": 0.3615, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 6.706885673655924e-06, | |
| "loss": 0.3263, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 6.688146949086688e-06, | |
| "loss": 0.3258, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 6.669381424438892e-06, | |
| "loss": 0.3483, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 6.650589397623528e-06, | |
| "loss": 0.3643, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "eval_webgpt_accuracy": 0.5893769152196119, | |
| "eval_webgpt_loss": 0.6877950429916382, | |
| "eval_webgpt_runtime": 63.3493, | |
| "eval_webgpt_samples_per_second": 61.816, | |
| "eval_webgpt_steps_per_second": 6.188, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "eval_hfsummary_accuracy": 0.6664147749599492, | |
| "eval_hfsummary_loss": 0.7294792532920837, | |
| "eval_hfsummary_runtime": 1242.1345, | |
| "eval_hfsummary_samples_per_second": 26.634, | |
| "eval_hfsummary_steps_per_second": 2.664, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "eval_gptsynthetic_accuracy": 0.9993966817496229, | |
| "eval_gptsynthetic_loss": 0.005264561623334885, | |
| "eval_gptsynthetic_runtime": 45.9664, | |
| "eval_gptsynthetic_samples_per_second": 72.118, | |
| "eval_gptsynthetic_steps_per_second": 7.223, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 6.6317711669723265e-06, | |
| "loss": 0.3443, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 6.61292703123301e-06, | |
| "loss": 0.3539, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 6.594057289564557e-06, | |
| "loss": 0.3123, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 6.575162241532453e-06, | |
| "loss": 0.3047, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 6.556242187103934e-06, | |
| "loss": 0.3687, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 6.53729742664322e-06, | |
| "loss": 0.3652, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 6.518328260906753e-06, | |
| "loss": 0.3493, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 6.49933499103842e-06, | |
| "loss": 0.3637, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 6.480317918564767e-06, | |
| "loss": 0.3464, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 6.4612773453902244e-06, | |
| "loss": 0.3376, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 6.4422135737923e-06, | |
| "loss": 0.3473, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 6.423126906416789e-06, | |
| "loss": 0.3356, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 6.40401764627297e-06, | |
| "loss": 0.3374, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 6.384886096728789e-06, | |
| "loss": 0.3544, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 6.365732561506044e-06, | |
| "loss": 0.3361, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 6.346557344675568e-06, | |
| "loss": 0.3689, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 6.327360750652402e-06, | |
| "loss": 0.359, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 6.308143084190954e-06, | |
| "loss": 0.3423, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 6.2889046503801685e-06, | |
| "loss": 0.3392, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 6.269645754638682e-06, | |
| "loss": 0.342, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "eval_webgpt_accuracy": 0.5842696629213483, | |
| "eval_webgpt_loss": 0.6836753487586975, | |
| "eval_webgpt_runtime": 63.4162, | |
| "eval_webgpt_samples_per_second": 61.751, | |
| "eval_webgpt_steps_per_second": 6.181, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "eval_hfsummary_accuracy": 0.6713720037481486, | |
| "eval_hfsummary_loss": 0.7460022568702698, | |
| "eval_hfsummary_runtime": 1243.8848, | |
| "eval_hfsummary_samples_per_second": 26.597, | |
| "eval_hfsummary_steps_per_second": 2.66, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "eval_gptsynthetic_accuracy": 0.9987933634992459, | |
| "eval_gptsynthetic_loss": 0.005401162896305323, | |
| "eval_gptsynthetic_runtime": 46.0774, | |
| "eval_gptsynthetic_samples_per_second": 71.944, | |
| "eval_gptsynthetic_steps_per_second": 7.205, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 6.250366702709971e-06, | |
| "loss": 0.3678, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 6.231067800657502e-06, | |
| "loss": 0.3446, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 6.211749354859871e-06, | |
| "loss": 0.3247, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 6.192411672005937e-06, | |
| "loss": 0.3639, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.173055059089958e-06, | |
| "loss": 0.3581, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.1536798234067175e-06, | |
| "loss": 0.3423, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 6.134286272546639e-06, | |
| "loss": 0.3623, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 6.11487471439091e-06, | |
| "loss": 0.321, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.0954454571065925e-06, | |
| "loss": 0.3363, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.0759988091417266e-06, | |
| "loss": 0.3392, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 6.0565350792204406e-06, | |
| "loss": 0.3458, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 6.037054576338044e-06, | |
| "loss": 0.3742, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 6.0175576097561265e-06, | |
| "loss": 0.3518, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 5.998044488997645e-06, | |
| "loss": 0.3489, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 5.9785155238420135e-06, | |
| "loss": 0.3411, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 5.958971024320179e-06, | |
| "loss": 0.3326, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 5.9394113007097045e-06, | |
| "loss": 0.322, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 5.919836663529845e-06, | |
| "loss": 0.3343, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 5.900247423536611e-06, | |
| "loss": 0.3551, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 5.880643891717842e-06, | |
| "loss": 0.3344, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "eval_webgpt_accuracy": 0.5906537282941777, | |
| "eval_webgpt_loss": 0.6790869832038879, | |
| "eval_webgpt_runtime": 63.367, | |
| "eval_webgpt_samples_per_second": 61.799, | |
| "eval_webgpt_steps_per_second": 6.186, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "eval_hfsummary_accuracy": 0.6683795302723453, | |
| "eval_hfsummary_loss": 0.7165785431861877, | |
| "eval_hfsummary_runtime": 1243.3678, | |
| "eval_hfsummary_samples_per_second": 26.608, | |
| "eval_hfsummary_steps_per_second": 2.661, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "eval_gptsynthetic_accuracy": 0.9984917043740573, | |
| "eval_gptsynthetic_loss": 0.005581920500844717, | |
| "eval_gptsynthetic_runtime": 46.0581, | |
| "eval_gptsynthetic_samples_per_second": 71.974, | |
| "eval_gptsynthetic_steps_per_second": 7.208, | |
| "step": 4000 | |
| } | |
| ], | |
| "max_steps": 4192, | |
| "num_train_epochs": 2, | |
| "total_flos": 2.4953197931002634e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |