vincentwi's picture
Bulk upload sft-svgeez-blocks-20251101T005904Z (+3000)
5b07c7e verified
{
"best_metric": 0.32749298214912415,
"best_model_checkpoint": "sft-svgeez-blocks-20251101T005904Z/checkpoint-1000",
"epoch": 0.3290981535242886,
"eval_steps": 100,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009402804386408247,
"grad_norm": 1.0298110246658325,
"learning_rate": 0.0004999990597085097,
"loss": 0.9462,
"step": 10
},
{
"epoch": 0.0018805608772816493,
"grad_norm": 0.5167974233627319,
"learning_rate": 0.0004999981194170193,
"loss": 0.551,
"step": 20
},
{
"epoch": 0.002820841315922474,
"grad_norm": 0.40330085158348083,
"learning_rate": 0.0004999971791255289,
"loss": 0.509,
"step": 30
},
{
"epoch": 0.0037611217545632986,
"grad_norm": 0.3540050983428955,
"learning_rate": 0.0004999962388340385,
"loss": 0.4456,
"step": 40
},
{
"epoch": 0.004701402193204123,
"grad_norm": 0.4030086100101471,
"learning_rate": 0.0004999952985425483,
"loss": 0.4583,
"step": 50
},
{
"epoch": 0.005641682631844948,
"grad_norm": 0.5594690442085266,
"learning_rate": 0.0004999943582510579,
"loss": 0.4327,
"step": 60
},
{
"epoch": 0.006581963070485772,
"grad_norm": 0.3243617117404938,
"learning_rate": 0.0004999934179595675,
"loss": 0.4256,
"step": 70
},
{
"epoch": 0.007522243509126597,
"grad_norm": 0.3709908723831177,
"learning_rate": 0.0004999924776680771,
"loss": 0.4215,
"step": 80
},
{
"epoch": 0.008462523947767421,
"grad_norm": 0.3056599795818329,
"learning_rate": 0.0004999915373765868,
"loss": 0.4193,
"step": 90
},
{
"epoch": 0.009402804386408246,
"grad_norm": 0.34599125385284424,
"learning_rate": 0.0004999905970850964,
"loss": 0.4168,
"step": 100
},
{
"epoch": 0.009402804386408246,
"eval_loss": 0.39591750502586365,
"eval_runtime": 23.584,
"eval_samples_per_second": 2.12,
"eval_steps_per_second": 0.297,
"step": 100
},
{
"epoch": 0.010343084825049071,
"grad_norm": 0.4576414227485657,
"learning_rate": 0.000499989656793606,
"loss": 0.4149,
"step": 110
},
{
"epoch": 0.011283365263689896,
"grad_norm": 0.3045823574066162,
"learning_rate": 0.0004999887165021157,
"loss": 0.4045,
"step": 120
},
{
"epoch": 0.01222364570233072,
"grad_norm": 0.33852407336235046,
"learning_rate": 0.0004999877762106253,
"loss": 0.389,
"step": 130
},
{
"epoch": 0.013163926140971545,
"grad_norm": 0.3256877064704895,
"learning_rate": 0.0004999868359191349,
"loss": 0.4015,
"step": 140
},
{
"epoch": 0.01410420657961237,
"grad_norm": 0.33292344212532043,
"learning_rate": 0.0004999858956276445,
"loss": 0.4029,
"step": 150
},
{
"epoch": 0.015044487018253195,
"grad_norm": 0.4369369149208069,
"learning_rate": 0.0004999849553361542,
"loss": 0.3838,
"step": 160
},
{
"epoch": 0.01598476745689402,
"grad_norm": 0.26670923829078674,
"learning_rate": 0.0004999840150446638,
"loss": 0.3628,
"step": 170
},
{
"epoch": 0.016925047895534843,
"grad_norm": 0.2846743166446686,
"learning_rate": 0.0004999830747531736,
"loss": 0.4065,
"step": 180
},
{
"epoch": 0.01786532833417567,
"grad_norm": 0.3469683527946472,
"learning_rate": 0.0004999821344616832,
"loss": 0.3773,
"step": 190
},
{
"epoch": 0.018805608772816493,
"grad_norm": 0.2996629476547241,
"learning_rate": 0.0004999811941701928,
"loss": 0.3879,
"step": 200
},
{
"epoch": 0.018805608772816493,
"eval_loss": 0.37008655071258545,
"eval_runtime": 23.5018,
"eval_samples_per_second": 2.127,
"eval_steps_per_second": 0.298,
"step": 200
},
{
"epoch": 0.019745889211457316,
"grad_norm": 0.3994966149330139,
"learning_rate": 0.0004999802538787024,
"loss": 0.4097,
"step": 210
},
{
"epoch": 0.020686169650098143,
"grad_norm": 0.4101763069629669,
"learning_rate": 0.0004999793135872121,
"loss": 0.4091,
"step": 220
},
{
"epoch": 0.021626450088738966,
"grad_norm": 0.3082752823829651,
"learning_rate": 0.0004999783732957217,
"loss": 0.3997,
"step": 230
},
{
"epoch": 0.022566730527379793,
"grad_norm": 0.32445040345191956,
"learning_rate": 0.0004999774330042313,
"loss": 0.4026,
"step": 240
},
{
"epoch": 0.023507010966020616,
"grad_norm": 0.37064942717552185,
"learning_rate": 0.000499976492712741,
"loss": 0.3608,
"step": 250
},
{
"epoch": 0.02444729140466144,
"grad_norm": 0.3851112723350525,
"learning_rate": 0.0004999755524212506,
"loss": 0.3889,
"step": 260
},
{
"epoch": 0.025387571843302266,
"grad_norm": 0.25871652364730835,
"learning_rate": 0.0004999746121297602,
"loss": 0.3681,
"step": 270
},
{
"epoch": 0.02632785228194309,
"grad_norm": 0.34905803203582764,
"learning_rate": 0.0004999736718382698,
"loss": 0.3822,
"step": 280
},
{
"epoch": 0.027268132720583912,
"grad_norm": 0.3242252767086029,
"learning_rate": 0.0004999727315467795,
"loss": 0.3992,
"step": 290
},
{
"epoch": 0.02820841315922474,
"grad_norm": 0.359190434217453,
"learning_rate": 0.0004999717912552891,
"loss": 0.4125,
"step": 300
},
{
"epoch": 0.02820841315922474,
"eval_loss": 0.3554922044277191,
"eval_runtime": 23.4721,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 300
},
{
"epoch": 0.029148693597865562,
"grad_norm": 0.2699418067932129,
"learning_rate": 0.0004999708509637987,
"loss": 0.3724,
"step": 310
},
{
"epoch": 0.03008897403650639,
"grad_norm": 0.3270084261894226,
"learning_rate": 0.0004999699106723084,
"loss": 0.3878,
"step": 320
},
{
"epoch": 0.031029254475147212,
"grad_norm": 0.32981258630752563,
"learning_rate": 0.0004999689703808181,
"loss": 0.3944,
"step": 330
},
{
"epoch": 0.03196953491378804,
"grad_norm": 0.3394843637943268,
"learning_rate": 0.0004999680300893277,
"loss": 0.3673,
"step": 340
},
{
"epoch": 0.03290981535242886,
"grad_norm": 0.32261306047439575,
"learning_rate": 0.0004999670897978374,
"loss": 0.4093,
"step": 350
},
{
"epoch": 0.033850095791069686,
"grad_norm": 0.2744098901748657,
"learning_rate": 0.000499966149506347,
"loss": 0.3488,
"step": 360
},
{
"epoch": 0.03479037622971051,
"grad_norm": 0.26060158014297485,
"learning_rate": 0.0004999652092148566,
"loss": 0.3753,
"step": 370
},
{
"epoch": 0.03573065666835134,
"grad_norm": 0.37372156977653503,
"learning_rate": 0.0004999642689233663,
"loss": 0.3452,
"step": 380
},
{
"epoch": 0.03667093710699216,
"grad_norm": 0.18904727697372437,
"learning_rate": 0.0004999633286318759,
"loss": 0.3356,
"step": 390
},
{
"epoch": 0.037611217545632986,
"grad_norm": 0.3282572627067566,
"learning_rate": 0.0004999623883403855,
"loss": 0.3852,
"step": 400
},
{
"epoch": 0.037611217545632986,
"eval_loss": 0.35085391998291016,
"eval_runtime": 23.4698,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 400
},
{
"epoch": 0.03855149798427381,
"grad_norm": 0.2760799527168274,
"learning_rate": 0.0004999614480488951,
"loss": 0.3551,
"step": 410
},
{
"epoch": 0.03949177842291463,
"grad_norm": 0.276055246591568,
"learning_rate": 0.0004999605077574048,
"loss": 0.3423,
"step": 420
},
{
"epoch": 0.04043205886155546,
"grad_norm": 0.24734950065612793,
"learning_rate": 0.0004999595674659144,
"loss": 0.353,
"step": 430
},
{
"epoch": 0.041372339300196286,
"grad_norm": 0.26533621549606323,
"learning_rate": 0.000499958627174424,
"loss": 0.3876,
"step": 440
},
{
"epoch": 0.042312619738837105,
"grad_norm": 0.3085733950138092,
"learning_rate": 0.0004999576868829337,
"loss": 0.3701,
"step": 450
},
{
"epoch": 0.04325290017747793,
"grad_norm": 0.2946242690086365,
"learning_rate": 0.0004999567465914434,
"loss": 0.3897,
"step": 460
},
{
"epoch": 0.04419318061611876,
"grad_norm": 0.2757997214794159,
"learning_rate": 0.000499955806299953,
"loss": 0.343,
"step": 470
},
{
"epoch": 0.045133461054759585,
"grad_norm": 0.3827458620071411,
"learning_rate": 0.0004999548660084626,
"loss": 0.4293,
"step": 480
},
{
"epoch": 0.046073741493400405,
"grad_norm": 0.2692118287086487,
"learning_rate": 0.0004999539257169723,
"loss": 0.3621,
"step": 490
},
{
"epoch": 0.04701402193204123,
"grad_norm": 0.3991619050502777,
"learning_rate": 0.0004999529854254819,
"loss": 0.3646,
"step": 500
},
{
"epoch": 0.04701402193204123,
"eval_loss": 0.3504406809806824,
"eval_runtime": 23.45,
"eval_samples_per_second": 2.132,
"eval_steps_per_second": 0.299,
"step": 500
},
{
"epoch": 0.04795430237068206,
"grad_norm": 0.27486175298690796,
"learning_rate": 0.0004999520451339916,
"loss": 0.3791,
"step": 510
},
{
"epoch": 0.04889458280932288,
"grad_norm": 0.37556731700897217,
"learning_rate": 0.0004999511048425012,
"loss": 0.3781,
"step": 520
},
{
"epoch": 0.049834863247963705,
"grad_norm": 0.34461846947669983,
"learning_rate": 0.0004999501645510108,
"loss": 0.3809,
"step": 530
},
{
"epoch": 0.05077514368660453,
"grad_norm": 0.27397575974464417,
"learning_rate": 0.0004999492242595204,
"loss": 0.3601,
"step": 540
},
{
"epoch": 0.05171542412524535,
"grad_norm": 0.3164900541305542,
"learning_rate": 0.0004999482839680301,
"loss": 0.3666,
"step": 550
},
{
"epoch": 0.05265570456388618,
"grad_norm": 0.2617564797401428,
"learning_rate": 0.0004999473436765397,
"loss": 0.3555,
"step": 560
},
{
"epoch": 0.053595985002527005,
"grad_norm": 0.2753169536590576,
"learning_rate": 0.0004999464033850493,
"loss": 0.351,
"step": 570
},
{
"epoch": 0.054536265441167825,
"grad_norm": 0.41116294264793396,
"learning_rate": 0.0004999454630935591,
"loss": 0.3667,
"step": 580
},
{
"epoch": 0.05547654587980865,
"grad_norm": 0.3187413215637207,
"learning_rate": 0.0004999445228020687,
"loss": 0.3279,
"step": 590
},
{
"epoch": 0.05641682631844948,
"grad_norm": 0.3258746862411499,
"learning_rate": 0.0004999435825105783,
"loss": 0.3827,
"step": 600
},
{
"epoch": 0.05641682631844948,
"eval_loss": 0.3369905948638916,
"eval_runtime": 23.4639,
"eval_samples_per_second": 2.131,
"eval_steps_per_second": 0.298,
"step": 600
},
{
"epoch": 0.057357106757090305,
"grad_norm": 0.2566978633403778,
"learning_rate": 0.0004999426422190879,
"loss": 0.3674,
"step": 610
},
{
"epoch": 0.058297387195731125,
"grad_norm": 0.22622275352478027,
"learning_rate": 0.0004999417019275976,
"loss": 0.3553,
"step": 620
},
{
"epoch": 0.05923766763437195,
"grad_norm": 0.2654806971549988,
"learning_rate": 0.0004999407616361072,
"loss": 0.3475,
"step": 630
},
{
"epoch": 0.06017794807301278,
"grad_norm": 0.3390786051750183,
"learning_rate": 0.0004999398213446168,
"loss": 0.3796,
"step": 640
},
{
"epoch": 0.0611182285116536,
"grad_norm": 0.2524166405200958,
"learning_rate": 0.0004999388810531264,
"loss": 0.3635,
"step": 650
},
{
"epoch": 0.062058508950294425,
"grad_norm": 0.2794824242591858,
"learning_rate": 0.0004999379407616361,
"loss": 0.3527,
"step": 660
},
{
"epoch": 0.06299878938893524,
"grad_norm": 0.1920047104358673,
"learning_rate": 0.0004999370004701457,
"loss": 0.336,
"step": 670
},
{
"epoch": 0.06393906982757608,
"grad_norm": 0.29339122772216797,
"learning_rate": 0.0004999360601786554,
"loss": 0.3654,
"step": 680
},
{
"epoch": 0.0648793502662169,
"grad_norm": 0.3135516345500946,
"learning_rate": 0.000499935119887165,
"loss": 0.3653,
"step": 690
},
{
"epoch": 0.06581963070485772,
"grad_norm": 0.2592516541481018,
"learning_rate": 0.0004999341795956747,
"loss": 0.3746,
"step": 700
},
{
"epoch": 0.06581963070485772,
"eval_loss": 0.3413720726966858,
"eval_runtime": 23.4709,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 700
},
{
"epoch": 0.06675991114349855,
"grad_norm": 0.35426065325737,
"learning_rate": 0.0004999332393041844,
"loss": 0.3485,
"step": 710
},
{
"epoch": 0.06770019158213937,
"grad_norm": 0.22092205286026,
"learning_rate": 0.000499932299012694,
"loss": 0.3813,
"step": 720
},
{
"epoch": 0.06864047202078019,
"grad_norm": 0.31250303983688354,
"learning_rate": 0.0004999313587212036,
"loss": 0.3431,
"step": 730
},
{
"epoch": 0.06958075245942102,
"grad_norm": 0.3179270029067993,
"learning_rate": 0.0004999304184297132,
"loss": 0.3671,
"step": 740
},
{
"epoch": 0.07052103289806184,
"grad_norm": 0.2921147644519806,
"learning_rate": 0.0004999294781382229,
"loss": 0.3479,
"step": 750
},
{
"epoch": 0.07146131333670268,
"grad_norm": 0.2732735872268677,
"learning_rate": 0.0004999285378467325,
"loss": 0.382,
"step": 760
},
{
"epoch": 0.0724015937753435,
"grad_norm": 0.2021898329257965,
"learning_rate": 0.0004999275975552421,
"loss": 0.3342,
"step": 770
},
{
"epoch": 0.07334187421398432,
"grad_norm": 0.2908518314361572,
"learning_rate": 0.0004999266572637517,
"loss": 0.3512,
"step": 780
},
{
"epoch": 0.07428215465262515,
"grad_norm": 0.4414116144180298,
"learning_rate": 0.0004999257169722614,
"loss": 0.3505,
"step": 790
},
{
"epoch": 0.07522243509126597,
"grad_norm": 0.3312692940235138,
"learning_rate": 0.000499924776680771,
"loss": 0.3734,
"step": 800
},
{
"epoch": 0.07522243509126597,
"eval_loss": 0.3315250277519226,
"eval_runtime": 23.4745,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 800
},
{
"epoch": 0.07616271552990679,
"grad_norm": 0.2597099244594574,
"learning_rate": 0.0004999238363892806,
"loss": 0.3493,
"step": 810
},
{
"epoch": 0.07710299596854762,
"grad_norm": 0.3256906270980835,
"learning_rate": 0.0004999228960977903,
"loss": 0.3392,
"step": 820
},
{
"epoch": 0.07804327640718844,
"grad_norm": 0.3068447709083557,
"learning_rate": 0.0004999219558063,
"loss": 0.354,
"step": 830
},
{
"epoch": 0.07898355684582926,
"grad_norm": 0.3219122588634491,
"learning_rate": 0.0004999210155148097,
"loss": 0.3484,
"step": 840
},
{
"epoch": 0.0799238372844701,
"grad_norm": 0.2608373761177063,
"learning_rate": 0.0004999200752233193,
"loss": 0.3492,
"step": 850
},
{
"epoch": 0.08086411772311092,
"grad_norm": 0.3617205023765564,
"learning_rate": 0.0004999191349318289,
"loss": 0.3705,
"step": 860
},
{
"epoch": 0.08180439816175174,
"grad_norm": 0.2846531569957733,
"learning_rate": 0.0004999181946403385,
"loss": 0.3302,
"step": 870
},
{
"epoch": 0.08274467860039257,
"grad_norm": 0.18934500217437744,
"learning_rate": 0.0004999172543488482,
"loss": 0.3426,
"step": 880
},
{
"epoch": 0.08368495903903339,
"grad_norm": 0.3601396977901459,
"learning_rate": 0.0004999163140573578,
"loss": 0.3447,
"step": 890
},
{
"epoch": 0.08462523947767421,
"grad_norm": 0.3532989025115967,
"learning_rate": 0.0004999153737658674,
"loss": 0.3852,
"step": 900
},
{
"epoch": 0.08462523947767421,
"eval_loss": 0.33136090636253357,
"eval_runtime": 23.4849,
"eval_samples_per_second": 2.129,
"eval_steps_per_second": 0.298,
"step": 900
},
{
"epoch": 0.08556551991631504,
"grad_norm": 0.3184758424758911,
"learning_rate": 0.0004999144334743771,
"loss": 0.3579,
"step": 910
},
{
"epoch": 0.08650580035495586,
"grad_norm": 0.25410690903663635,
"learning_rate": 0.0004999134931828867,
"loss": 0.3566,
"step": 920
},
{
"epoch": 0.08744608079359668,
"grad_norm": 0.2750070095062256,
"learning_rate": 0.0004999125528913963,
"loss": 0.3635,
"step": 930
},
{
"epoch": 0.08838636123223752,
"grad_norm": 0.2559872567653656,
"learning_rate": 0.0004999116125999059,
"loss": 0.3448,
"step": 940
},
{
"epoch": 0.08932664167087834,
"grad_norm": 0.29350370168685913,
"learning_rate": 0.0004999106723084156,
"loss": 0.3601,
"step": 950
},
{
"epoch": 0.09026692210951917,
"grad_norm": 0.24211618304252625,
"learning_rate": 0.0004999097320169253,
"loss": 0.3473,
"step": 960
},
{
"epoch": 0.09120720254815999,
"grad_norm": 0.23498541116714478,
"learning_rate": 0.0004999087917254349,
"loss": 0.3326,
"step": 970
},
{
"epoch": 0.09214748298680081,
"grad_norm": 0.283318430185318,
"learning_rate": 0.0004999078514339445,
"loss": 0.3631,
"step": 980
},
{
"epoch": 0.09308776342544164,
"grad_norm": 0.31163477897644043,
"learning_rate": 0.0004999069111424542,
"loss": 0.3651,
"step": 990
},
{
"epoch": 0.09402804386408246,
"grad_norm": 0.337285578250885,
"learning_rate": 0.0004999059708509638,
"loss": 0.3422,
"step": 1000
},
{
"epoch": 0.09402804386408246,
"eval_loss": 0.32749298214912415,
"eval_runtime": 23.4901,
"eval_samples_per_second": 2.129,
"eval_steps_per_second": 0.298,
"step": 1000
},
{
"epoch": 0.09496832430272328,
"grad_norm": 0.31577935814857483,
"learning_rate": 0.0004999050305594735,
"loss": 0.3468,
"step": 1010
},
{
"epoch": 0.09590860474136412,
"grad_norm": 0.2720831036567688,
"learning_rate": 0.0004999040902679831,
"loss": 0.368,
"step": 1020
},
{
"epoch": 0.09684888518000494,
"grad_norm": 76.24451446533203,
"learning_rate": 0.0004999031499764927,
"loss": 0.9196,
"step": 1030
},
{
"epoch": 0.09778916561864576,
"grad_norm": 0.8638234734535217,
"learning_rate": 0.0004999022096850024,
"loss": 0.5762,
"step": 1040
},
{
"epoch": 0.09872944605728659,
"grad_norm": 1.1701819896697998,
"learning_rate": 0.000499901269393512,
"loss": 0.4064,
"step": 1050
},
{
"epoch": 0.09966972649592741,
"grad_norm": 0.5215079188346863,
"learning_rate": 0.0004999003291020216,
"loss": 0.3978,
"step": 1060
},
{
"epoch": 0.10061000693456823,
"grad_norm": 13.439558982849121,
"learning_rate": 0.0004998993888105312,
"loss": 0.4368,
"step": 1070
},
{
"epoch": 0.10155028737320906,
"grad_norm": 0.7944605350494385,
"learning_rate": 0.000499898448519041,
"loss": 0.401,
"step": 1080
},
{
"epoch": 0.10249056781184988,
"grad_norm": 1.53361177444458,
"learning_rate": 0.0004998975082275506,
"loss": 0.463,
"step": 1090
},
{
"epoch": 0.1034308482504907,
"grad_norm": 186.09454345703125,
"learning_rate": 0.0004998965679360602,
"loss": 3.0032,
"step": 1100
},
{
"epoch": 0.1034308482504907,
"eval_loss": 1.301244854927063,
"eval_runtime": 23.4353,
"eval_samples_per_second": 2.134,
"eval_steps_per_second": 0.299,
"step": 1100
},
{
"epoch": 0.10437112868913154,
"grad_norm": 43.43181228637695,
"learning_rate": 0.0004998956276445698,
"loss": 15.2307,
"step": 1110
},
{
"epoch": 0.10531140912777236,
"grad_norm": 34.82008743286133,
"learning_rate": 0.0004998946873530795,
"loss": 23.3276,
"step": 1120
},
{
"epoch": 0.10625168956641318,
"grad_norm": 14.430869102478027,
"learning_rate": 0.0004998937470615891,
"loss": 9.6299,
"step": 1130
},
{
"epoch": 0.10719197000505401,
"grad_norm": 5.102875232696533,
"learning_rate": 0.0004998928067700987,
"loss": 5.5005,
"step": 1140
},
{
"epoch": 0.10813225044369483,
"grad_norm": 6.0567827224731445,
"learning_rate": 0.0004998918664786084,
"loss": 3.8907,
"step": 1150
},
{
"epoch": 0.10907253088233565,
"grad_norm": 3.007990837097168,
"learning_rate": 0.000499890926187118,
"loss": 3.3603,
"step": 1160
},
{
"epoch": 0.11001281132097648,
"grad_norm": 2.6830623149871826,
"learning_rate": 0.0004998899858956277,
"loss": 3.225,
"step": 1170
},
{
"epoch": 0.1109530917596173,
"grad_norm": 5.232982635498047,
"learning_rate": 0.0004998890456041373,
"loss": 3.0515,
"step": 1180
},
{
"epoch": 0.11189337219825814,
"grad_norm": 2.788641929626465,
"learning_rate": 0.0004998881053126469,
"loss": 3.2129,
"step": 1190
},
{
"epoch": 0.11283365263689896,
"grad_norm": 3.4526102542877197,
"learning_rate": 0.0004998871650211565,
"loss": 3.0174,
"step": 1200
},
{
"epoch": 0.11283365263689896,
"eval_loss": 3.0720202922821045,
"eval_runtime": 23.0809,
"eval_samples_per_second": 2.166,
"eval_steps_per_second": 0.303,
"step": 1200
},
{
"epoch": 0.11377393307553978,
"grad_norm": 1.8571786880493164,
"learning_rate": 0.0004998862247296662,
"loss": 3.0736,
"step": 1210
},
{
"epoch": 0.11471421351418061,
"grad_norm": 1.886637568473816,
"learning_rate": 0.0004998852844381759,
"loss": 2.9814,
"step": 1220
},
{
"epoch": 0.11565449395282143,
"grad_norm": 2.7894158363342285,
"learning_rate": 0.0004998843441466855,
"loss": 3.6537,
"step": 1230
},
{
"epoch": 0.11659477439146225,
"grad_norm": 1.016867995262146,
"learning_rate": 0.0004998834038551951,
"loss": 2.9557,
"step": 1240
},
{
"epoch": 0.11753505483010308,
"grad_norm": 2.153256893157959,
"learning_rate": 0.0004998824635637048,
"loss": 3.013,
"step": 1250
},
{
"epoch": 0.1184753352687439,
"grad_norm": 3.2111806869506836,
"learning_rate": 0.0004998815232722144,
"loss": 2.9692,
"step": 1260
},
{
"epoch": 0.11941561570738472,
"grad_norm": 1.3798747062683105,
"learning_rate": 0.000499880582980724,
"loss": 2.962,
"step": 1270
},
{
"epoch": 0.12035589614602556,
"grad_norm": 1.0820046663284302,
"learning_rate": 0.0004998796426892337,
"loss": 2.9139,
"step": 1280
},
{
"epoch": 0.12129617658466638,
"grad_norm": 2.0282669067382812,
"learning_rate": 0.0004998787023977433,
"loss": 2.9114,
"step": 1290
},
{
"epoch": 0.1222364570233072,
"grad_norm": 1.1617417335510254,
"learning_rate": 0.0004998777621062529,
"loss": 2.8328,
"step": 1300
},
{
"epoch": 0.1222364570233072,
"eval_loss": 2.820981740951538,
"eval_runtime": 23.1533,
"eval_samples_per_second": 2.16,
"eval_steps_per_second": 0.302,
"step": 1300
},
{
"epoch": 0.12317673746194803,
"grad_norm": 1.6711792945861816,
"learning_rate": 0.0004998768218147625,
"loss": 2.8469,
"step": 1310
},
{
"epoch": 0.12411701790058885,
"grad_norm": 2.5305206775665283,
"learning_rate": 0.0004998758815232722,
"loss": 2.9003,
"step": 1320
},
{
"epoch": 0.12505729833922968,
"grad_norm": 1.6004911661148071,
"learning_rate": 0.0004998749412317818,
"loss": 2.8594,
"step": 1330
},
{
"epoch": 0.1259975787778705,
"grad_norm": 1.7168315649032593,
"learning_rate": 0.0004998740009402916,
"loss": 2.8353,
"step": 1340
},
{
"epoch": 0.12693785921651132,
"grad_norm": 565.294921875,
"learning_rate": 0.0004998730606488012,
"loss": 2.8984,
"step": 1350
},
{
"epoch": 0.12787813965515216,
"grad_norm": 4.877725124359131,
"learning_rate": 0.0004998721203573108,
"loss": 11.9556,
"step": 1360
},
{
"epoch": 0.12881842009379296,
"grad_norm": 2.709376811981201,
"learning_rate": 0.0004998711800658205,
"loss": 3.237,
"step": 1370
},
{
"epoch": 0.1297587005324338,
"grad_norm": 1.6279547214508057,
"learning_rate": 0.0004998702397743301,
"loss": 2.9603,
"step": 1380
},
{
"epoch": 0.13069898097107463,
"grad_norm": 2.7646572589874268,
"learning_rate": 0.0004998692994828397,
"loss": 3.0022,
"step": 1390
},
{
"epoch": 0.13163926140971544,
"grad_norm": 3.1686997413635254,
"learning_rate": 0.0004998683591913493,
"loss": 2.9407,
"step": 1400
},
{
"epoch": 0.13163926140971544,
"eval_loss": 2.8426969051361084,
"eval_runtime": 23.0531,
"eval_samples_per_second": 2.169,
"eval_steps_per_second": 0.304,
"step": 1400
},
{
"epoch": 0.13257954184835627,
"grad_norm": 1.504326581954956,
"learning_rate": 0.000499867418899859,
"loss": 2.8927,
"step": 1410
},
{
"epoch": 0.1335198222869971,
"grad_norm": 1.797013282775879,
"learning_rate": 0.0004998664786083686,
"loss": 2.8821,
"step": 1420
},
{
"epoch": 0.1344601027256379,
"grad_norm": 1.7129989862442017,
"learning_rate": 0.0004998655383168782,
"loss": 2.8462,
"step": 1430
},
{
"epoch": 0.13540038316427874,
"grad_norm": 556.15869140625,
"learning_rate": 0.0004998645980253878,
"loss": 3.6939,
"step": 1440
},
{
"epoch": 0.13634066360291958,
"grad_norm": 131.738525390625,
"learning_rate": 0.0004998636577338975,
"loss": 4.5958,
"step": 1450
},
{
"epoch": 0.13728094404156038,
"grad_norm": 14.271523475646973,
"learning_rate": 0.0004998627174424071,
"loss": 11.9558,
"step": 1460
},
{
"epoch": 0.13822122448020122,
"grad_norm": 36.148284912109375,
"learning_rate": 0.0004998617771509167,
"loss": 8.5098,
"step": 1470
},
{
"epoch": 0.13916150491884205,
"grad_norm": 2.539207696914673,
"learning_rate": 0.0004998608368594265,
"loss": 8.0085,
"step": 1480
},
{
"epoch": 0.14010178535748288,
"grad_norm": 2.990248918533325,
"learning_rate": 0.0004998598965679361,
"loss": 7.787,
"step": 1490
},
{
"epoch": 0.1410420657961237,
"grad_norm": 20.940954208374023,
"learning_rate": 0.0004998589562764458,
"loss": 6.7366,
"step": 1500
},
{
"epoch": 0.1410420657961237,
"eval_loss": 6.252957820892334,
"eval_runtime": 23.2195,
"eval_samples_per_second": 2.153,
"eval_steps_per_second": 0.301,
"step": 1500
},
{
"epoch": 0.14198234623476452,
"grad_norm": 2.390953779220581,
"learning_rate": 0.0004998580159849554,
"loss": 5.7212,
"step": 1510
},
{
"epoch": 0.14292262667340536,
"grad_norm": 3.413762092590332,
"learning_rate": 0.000499857075693465,
"loss": 4.7978,
"step": 1520
},
{
"epoch": 0.14386290711204616,
"grad_norm": 4.431119441986084,
"learning_rate": 0.0004998561354019746,
"loss": 4.8096,
"step": 1530
},
{
"epoch": 0.144803187550687,
"grad_norm": 427.5173645019531,
"learning_rate": 0.0004998551951104843,
"loss": 4.6306,
"step": 1540
},
{
"epoch": 0.14574346798932783,
"grad_norm": 7.0377678871154785,
"learning_rate": 0.0004998542548189939,
"loss": 7.1266,
"step": 1550
},
{
"epoch": 0.14668374842796864,
"grad_norm": 5.000734329223633,
"learning_rate": 0.0004998533145275035,
"loss": 5.2694,
"step": 1560
},
{
"epoch": 0.14762402886660947,
"grad_norm": 239.30496215820312,
"learning_rate": 0.0004998523742360131,
"loss": 4.5831,
"step": 1570
},
{
"epoch": 0.1485643093052503,
"grad_norm": 2.7288706302642822,
"learning_rate": 0.0004998514339445228,
"loss": 4.1964,
"step": 1580
},
{
"epoch": 0.1495045897438911,
"grad_norm": 6616.22509765625,
"learning_rate": 0.0004998504936530324,
"loss": 4.9638,
"step": 1590
},
{
"epoch": 0.15044487018253194,
"grad_norm": 5570.67822265625,
"learning_rate": 0.000499849553361542,
"loss": 10.6239,
"step": 1600
},
{
"epoch": 0.15044487018253194,
"eval_loss": 17.817279815673828,
"eval_runtime": 23.3339,
"eval_samples_per_second": 2.143,
"eval_steps_per_second": 0.3,
"step": 1600
},
{
"epoch": 0.15138515062117278,
"grad_norm": 313.7498474121094,
"learning_rate": 0.0004998486130700518,
"loss": 18.1327,
"step": 1610
},
{
"epoch": 0.15232543105981358,
"grad_norm": 333.5940856933594,
"learning_rate": 0.0004998476727785614,
"loss": 10.4126,
"step": 1620
},
{
"epoch": 0.15326571149845442,
"grad_norm": 6.390124797821045,
"learning_rate": 0.000499846732487071,
"loss": 25.1629,
"step": 1630
},
{
"epoch": 0.15420599193709525,
"grad_norm": 328.04443359375,
"learning_rate": 0.0004998457921955807,
"loss": 14.1132,
"step": 1640
},
{
"epoch": 0.15514627237573606,
"grad_norm": 412.7569885253906,
"learning_rate": 0.0004998448519040903,
"loss": 22.5887,
"step": 1650
},
{
"epoch": 0.1560865528143769,
"grad_norm": 56.239742279052734,
"learning_rate": 0.0004998439116125999,
"loss": 16.1849,
"step": 1660
},
{
"epoch": 0.15702683325301772,
"grad_norm": 135.1570281982422,
"learning_rate": 0.0004998429713211096,
"loss": 11.6209,
"step": 1670
},
{
"epoch": 0.15796711369165853,
"grad_norm": 5.806948661804199,
"learning_rate": 0.0004998420310296192,
"loss": 7.57,
"step": 1680
},
{
"epoch": 0.15890739413029936,
"grad_norm": 2.2571475505828857,
"learning_rate": 0.0004998410907381288,
"loss": 6.2436,
"step": 1690
},
{
"epoch": 0.1598476745689402,
"grad_norm": 3.34604549407959,
"learning_rate": 0.0004998401504466384,
"loss": 5.9399,
"step": 1700
},
{
"epoch": 0.1598476745689402,
"eval_loss": 5.785297870635986,
"eval_runtime": 23.2051,
"eval_samples_per_second": 2.155,
"eval_steps_per_second": 0.302,
"step": 1700
},
{
"epoch": 0.160787955007581,
"grad_norm": 132.94261169433594,
"learning_rate": 0.0004998392101551481,
"loss": 8.0906,
"step": 1710
},
{
"epoch": 0.16172823544622184,
"grad_norm": 18.748916625976562,
"learning_rate": 0.0004998382698636577,
"loss": 12.5014,
"step": 1720
},
{
"epoch": 0.16266851588486267,
"grad_norm": 30.582904815673828,
"learning_rate": 0.0004998373295721673,
"loss": 12.5519,
"step": 1730
},
{
"epoch": 0.16360879632350347,
"grad_norm": 4.576255798339844,
"learning_rate": 0.0004998363892806771,
"loss": 5.5364,
"step": 1740
},
{
"epoch": 0.1645490767621443,
"grad_norm": 2.24480938911438,
"learning_rate": 0.0004998354489891867,
"loss": 4.0297,
"step": 1750
},
{
"epoch": 0.16548935720078514,
"grad_norm": 1.5888965129852295,
"learning_rate": 0.0004998345086976963,
"loss": 3.1305,
"step": 1760
},
{
"epoch": 0.16642963763942595,
"grad_norm": 1.2058303356170654,
"learning_rate": 0.0004998335684062059,
"loss": 2.8331,
"step": 1770
},
{
"epoch": 0.16736991807806678,
"grad_norm": 12.554035186767578,
"learning_rate": 0.0004998326281147156,
"loss": 2.664,
"step": 1780
},
{
"epoch": 0.16831019851670762,
"grad_norm": 3.1639647483825684,
"learning_rate": 0.0004998316878232252,
"loss": 4.2002,
"step": 1790
},
{
"epoch": 0.16925047895534842,
"grad_norm": 27.61470603942871,
"learning_rate": 0.0004998307475317348,
"loss": 2.916,
"step": 1800
},
{
"epoch": 0.16925047895534842,
"eval_loss": 2.9362294673919678,
"eval_runtime": 23.3374,
"eval_samples_per_second": 2.142,
"eval_steps_per_second": 0.3,
"step": 1800
},
{
"epoch": 0.17019075939398925,
"grad_norm": 2.170280694961548,
"learning_rate": 0.0004998298072402445,
"loss": 2.6845,
"step": 1810
},
{
"epoch": 0.1711310398326301,
"grad_norm": 0.7772692441940308,
"learning_rate": 0.0004998288669487541,
"loss": 2.5667,
"step": 1820
},
{
"epoch": 0.1720713202712709,
"grad_norm": 3.4145731925964355,
"learning_rate": 0.0004998279266572638,
"loss": 2.5062,
"step": 1830
},
{
"epoch": 0.17301160070991173,
"grad_norm": 1.2681496143341064,
"learning_rate": 0.0004998269863657734,
"loss": 2.465,
"step": 1840
},
{
"epoch": 0.17395188114855256,
"grad_norm": 1.6735230684280396,
"learning_rate": 0.000499826046074283,
"loss": 2.4389,
"step": 1850
},
{
"epoch": 0.17489216158719337,
"grad_norm": 8.941509246826172,
"learning_rate": 0.0004998251057827926,
"loss": 2.4641,
"step": 1860
},
{
"epoch": 0.1758324420258342,
"grad_norm": 1.6397817134857178,
"learning_rate": 0.0004998241654913024,
"loss": 2.6467,
"step": 1870
},
{
"epoch": 0.17677272246447504,
"grad_norm": 1.612599492073059,
"learning_rate": 0.000499823225199812,
"loss": 2.4431,
"step": 1880
},
{
"epoch": 0.17771300290311584,
"grad_norm": 3.2251834869384766,
"learning_rate": 0.0004998222849083216,
"loss": 2.1863,
"step": 1890
},
{
"epoch": 0.17865328334175667,
"grad_norm": 0.8718411326408386,
"learning_rate": 0.0004998213446168312,
"loss": 2.1126,
"step": 1900
},
{
"epoch": 0.17865328334175667,
"eval_loss": 2.0840113162994385,
"eval_runtime": 23.3136,
"eval_samples_per_second": 2.145,
"eval_steps_per_second": 0.3,
"step": 1900
},
{
"epoch": 0.1795935637803975,
"grad_norm": 1.0821764469146729,
"learning_rate": 0.0004998204043253409,
"loss": 2.0873,
"step": 1910
},
{
"epoch": 0.18053384421903834,
"grad_norm": 0.78948575258255,
"learning_rate": 0.0004998194640338505,
"loss": 2.0275,
"step": 1920
},
{
"epoch": 0.18147412465767915,
"grad_norm": 1.659589171409607,
"learning_rate": 0.0004998185237423601,
"loss": 1.9527,
"step": 1930
},
{
"epoch": 0.18241440509631998,
"grad_norm": 1.2942698001861572,
"learning_rate": 0.0004998175834508698,
"loss": 2.0333,
"step": 1940
},
{
"epoch": 0.18335468553496082,
"grad_norm": 1.4142636060714722,
"learning_rate": 0.0004998166431593794,
"loss": 2.1385,
"step": 1950
},
{
"epoch": 0.18429496597360162,
"grad_norm": 1.0006928443908691,
"learning_rate": 0.000499815702867889,
"loss": 2.1326,
"step": 1960
},
{
"epoch": 0.18523524641224245,
"grad_norm": 2.224665641784668,
"learning_rate": 0.0004998147625763987,
"loss": 1.9148,
"step": 1970
},
{
"epoch": 0.1861755268508833,
"grad_norm": 1.2691782712936401,
"learning_rate": 0.0004998138222849083,
"loss": 1.9243,
"step": 1980
},
{
"epoch": 0.1871158072895241,
"grad_norm": 0.6900967955589294,
"learning_rate": 0.000499812881993418,
"loss": 2.0292,
"step": 1990
},
{
"epoch": 0.18805608772816493,
"grad_norm": 0.5726048350334167,
"learning_rate": 0.0004998119417019277,
"loss": 1.9845,
"step": 2000
},
{
"epoch": 0.18805608772816493,
"eval_loss": 1.867889404296875,
"eval_runtime": 23.2923,
"eval_samples_per_second": 2.147,
"eval_steps_per_second": 0.301,
"step": 2000
},
{
"epoch": 0.18899636816680576,
"grad_norm": 1.1626911163330078,
"learning_rate": 0.0004998110014104373,
"loss": 1.8727,
"step": 2010
},
{
"epoch": 0.18993664860544657,
"grad_norm": 1.16129732131958,
"learning_rate": 0.0004998100611189469,
"loss": 1.8666,
"step": 2020
},
{
"epoch": 0.1908769290440874,
"grad_norm": 0.680419921875,
"learning_rate": 0.0004998091208274565,
"loss": 1.7736,
"step": 2030
},
{
"epoch": 0.19181720948272823,
"grad_norm": 0.830495297908783,
"learning_rate": 0.0004998081805359662,
"loss": 1.8423,
"step": 2040
},
{
"epoch": 0.19275748992136904,
"grad_norm": 1.4571064710617065,
"learning_rate": 0.0004998072402444758,
"loss": 1.7769,
"step": 2050
},
{
"epoch": 0.19369777036000987,
"grad_norm": 1.1363842487335205,
"learning_rate": 0.0004998062999529854,
"loss": 1.8004,
"step": 2060
},
{
"epoch": 0.1946380507986507,
"grad_norm": 0.9440115094184875,
"learning_rate": 0.0004998053596614951,
"loss": 1.809,
"step": 2070
},
{
"epoch": 0.19557833123729151,
"grad_norm": 0.7449987530708313,
"learning_rate": 0.0004998044193700047,
"loss": 1.81,
"step": 2080
},
{
"epoch": 0.19651861167593235,
"grad_norm": 2.2598133087158203,
"learning_rate": 0.0004998034790785143,
"loss": 1.7547,
"step": 2090
},
{
"epoch": 0.19745889211457318,
"grad_norm": 0.42398279905319214,
"learning_rate": 0.0004998025387870239,
"loss": 1.7931,
"step": 2100
},
{
"epoch": 0.19745889211457318,
"eval_loss": 1.7216485738754272,
"eval_runtime": 23.3215,
"eval_samples_per_second": 2.144,
"eval_steps_per_second": 0.3,
"step": 2100
},
{
"epoch": 0.198399172553214,
"grad_norm": 0.644854724407196,
"learning_rate": 0.0004998015984955336,
"loss": 1.7352,
"step": 2110
},
{
"epoch": 0.19933945299185482,
"grad_norm": 0.8509730696678162,
"learning_rate": 0.0004998006582040432,
"loss": 1.7574,
"step": 2120
},
{
"epoch": 0.20027973343049565,
"grad_norm": 3.4616425037384033,
"learning_rate": 0.000499799717912553,
"loss": 1.7036,
"step": 2130
},
{
"epoch": 0.20122001386913646,
"grad_norm": 3.8178634643554688,
"learning_rate": 0.0004997987776210626,
"loss": 1.8085,
"step": 2140
},
{
"epoch": 0.2021602943077773,
"grad_norm": 2.551295042037964,
"learning_rate": 0.0004997978373295722,
"loss": 1.909,
"step": 2150
},
{
"epoch": 0.20310057474641813,
"grad_norm": 1.2975966930389404,
"learning_rate": 0.0004997968970380818,
"loss": 1.8048,
"step": 2160
},
{
"epoch": 0.20404085518505893,
"grad_norm": 5.3859782218933105,
"learning_rate": 0.0004997959567465915,
"loss": 1.7722,
"step": 2170
},
{
"epoch": 0.20498113562369977,
"grad_norm": 1.2567672729492188,
"learning_rate": 0.0004997950164551011,
"loss": 1.8425,
"step": 2180
},
{
"epoch": 0.2059214160623406,
"grad_norm": 0.9618350267410278,
"learning_rate": 0.0004997940761636107,
"loss": 1.732,
"step": 2190
},
{
"epoch": 0.2068616965009814,
"grad_norm": 0.9928431510925293,
"learning_rate": 0.0004997931358721204,
"loss": 1.738,
"step": 2200
},
{
"epoch": 0.2068616965009814,
"eval_loss": 1.6831611394882202,
"eval_runtime": 23.4087,
"eval_samples_per_second": 2.136,
"eval_steps_per_second": 0.299,
"step": 2200
},
{
"epoch": 0.20780197693962224,
"grad_norm": 1.1968138217926025,
"learning_rate": 0.00049979219558063,
"loss": 1.8048,
"step": 2210
},
{
"epoch": 0.20874225737826307,
"grad_norm": 7.316364765167236,
"learning_rate": 0.0004997912552891396,
"loss": 1.7093,
"step": 2220
},
{
"epoch": 0.20968253781690388,
"grad_norm": 3.233787775039673,
"learning_rate": 0.0004997903149976492,
"loss": 1.784,
"step": 2230
},
{
"epoch": 0.2106228182555447,
"grad_norm": 5.9083943367004395,
"learning_rate": 0.0004997893747061589,
"loss": 1.7086,
"step": 2240
},
{
"epoch": 0.21156309869418555,
"grad_norm": 1.5619888305664062,
"learning_rate": 0.0004997884344146686,
"loss": 1.731,
"step": 2250
},
{
"epoch": 0.21250337913282635,
"grad_norm": 7.746670722961426,
"learning_rate": 0.0004997874941231782,
"loss": 1.743,
"step": 2260
},
{
"epoch": 0.2134436595714672,
"grad_norm": 1.831390380859375,
"learning_rate": 0.0004997865538316878,
"loss": 1.737,
"step": 2270
},
{
"epoch": 0.21438394001010802,
"grad_norm": 1.6762924194335938,
"learning_rate": 0.0004997856135401975,
"loss": 1.7683,
"step": 2280
},
{
"epoch": 0.21532422044874883,
"grad_norm": 1.1603010892868042,
"learning_rate": 0.0004997846732487071,
"loss": 1.6562,
"step": 2290
},
{
"epoch": 0.21626450088738966,
"grad_norm": 2.6877505779266357,
"learning_rate": 0.0004997837329572168,
"loss": 1.6558,
"step": 2300
},
{
"epoch": 0.21626450088738966,
"eval_loss": 1.589247465133667,
"eval_runtime": 23.3396,
"eval_samples_per_second": 2.142,
"eval_steps_per_second": 0.3,
"step": 2300
},
{
"epoch": 0.2172047813260305,
"grad_norm": 1.8261255025863647,
"learning_rate": 0.0004997827926657264,
"loss": 1.5507,
"step": 2310
},
{
"epoch": 0.2181450617646713,
"grad_norm": 2.71986722946167,
"learning_rate": 0.000499781852374236,
"loss": 1.6683,
"step": 2320
},
{
"epoch": 0.21908534220331213,
"grad_norm": 5.95999002456665,
"learning_rate": 0.0004997809120827457,
"loss": 1.6454,
"step": 2330
},
{
"epoch": 0.22002562264195297,
"grad_norm": 1.8024688959121704,
"learning_rate": 0.0004997799717912553,
"loss": 1.555,
"step": 2340
},
{
"epoch": 0.2209659030805938,
"grad_norm": 3.2990245819091797,
"learning_rate": 0.0004997790314997649,
"loss": 1.5852,
"step": 2350
},
{
"epoch": 0.2219061835192346,
"grad_norm": 1.2326282262802124,
"learning_rate": 0.0004997780912082745,
"loss": 1.5787,
"step": 2360
},
{
"epoch": 0.22284646395787544,
"grad_norm": 1.383510708808899,
"learning_rate": 0.0004997771509167842,
"loss": 1.6182,
"step": 2370
},
{
"epoch": 0.22378674439651627,
"grad_norm": 0.9165681004524231,
"learning_rate": 0.0004997762106252939,
"loss": 1.6288,
"step": 2380
},
{
"epoch": 0.22472702483515708,
"grad_norm": 0.948382556438446,
"learning_rate": 0.0004997752703338035,
"loss": 1.531,
"step": 2390
},
{
"epoch": 0.2256673052737979,
"grad_norm": 2.0418496131896973,
"learning_rate": 0.0004997743300423132,
"loss": 1.5406,
"step": 2400
},
{
"epoch": 0.2256673052737979,
"eval_loss": 1.5306854248046875,
"eval_runtime": 23.3808,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.299,
"step": 2400
},
{
"epoch": 0.22660758571243875,
"grad_norm": 1.046777606010437,
"learning_rate": 0.0004997733897508228,
"loss": 1.5605,
"step": 2410
},
{
"epoch": 0.22754786615107955,
"grad_norm": 0.8935179710388184,
"learning_rate": 0.0004997724494593324,
"loss": 1.6534,
"step": 2420
},
{
"epoch": 0.2284881465897204,
"grad_norm": 0.9660417437553406,
"learning_rate": 0.000499771509167842,
"loss": 1.596,
"step": 2430
},
{
"epoch": 0.22942842702836122,
"grad_norm": 2.3814828395843506,
"learning_rate": 0.0004997705688763517,
"loss": 1.597,
"step": 2440
},
{
"epoch": 0.23036870746700203,
"grad_norm": 0.8121759295463562,
"learning_rate": 0.0004997696285848613,
"loss": 1.5244,
"step": 2450
},
{
"epoch": 0.23130898790564286,
"grad_norm": 0.5510159134864807,
"learning_rate": 0.000499768688293371,
"loss": 1.428,
"step": 2460
},
{
"epoch": 0.2322492683442837,
"grad_norm": 1.1485031843185425,
"learning_rate": 0.0004997677480018806,
"loss": 1.4615,
"step": 2470
},
{
"epoch": 0.2331895487829245,
"grad_norm": 0.5952764749526978,
"learning_rate": 0.0004997668077103902,
"loss": 1.4398,
"step": 2480
},
{
"epoch": 0.23412982922156533,
"grad_norm": 0.9520612955093384,
"learning_rate": 0.0004997658674188998,
"loss": 1.4441,
"step": 2490
},
{
"epoch": 0.23507010966020617,
"grad_norm": 0.6743489503860474,
"learning_rate": 0.0004997649271274095,
"loss": 1.4732,
"step": 2500
},
{
"epoch": 0.23507010966020617,
"eval_loss": 1.4287002086639404,
"eval_runtime": 23.4405,
"eval_samples_per_second": 2.133,
"eval_steps_per_second": 0.299,
"step": 2500
},
{
"epoch": 0.23601039009884697,
"grad_norm": 4.528964042663574,
"learning_rate": 0.0004997639868359192,
"loss": 1.62,
"step": 2510
},
{
"epoch": 0.2369506705374878,
"grad_norm": 4.225714206695557,
"learning_rate": 0.0004997630465444288,
"loss": 1.6018,
"step": 2520
},
{
"epoch": 0.23789095097612864,
"grad_norm": 2.7450878620147705,
"learning_rate": 0.0004997621062529385,
"loss": 1.6816,
"step": 2530
},
{
"epoch": 0.23883123141476945,
"grad_norm": 10.117358207702637,
"learning_rate": 0.0004997611659614481,
"loss": 1.5368,
"step": 2540
},
{
"epoch": 0.23977151185341028,
"grad_norm": 2.393857479095459,
"learning_rate": 0.0004997602256699577,
"loss": 1.4701,
"step": 2550
},
{
"epoch": 0.2407117922920511,
"grad_norm": 1.3840343952178955,
"learning_rate": 0.0004997592853784673,
"loss": 1.4058,
"step": 2560
},
{
"epoch": 0.24165207273069192,
"grad_norm": 0.7663922309875488,
"learning_rate": 0.000499758345086977,
"loss": 1.438,
"step": 2570
},
{
"epoch": 0.24259235316933275,
"grad_norm": 0.7590422034263611,
"learning_rate": 0.0004997574047954866,
"loss": 1.4687,
"step": 2580
},
{
"epoch": 0.2435326336079736,
"grad_norm": 0.7124899625778198,
"learning_rate": 0.0004997564645039962,
"loss": 1.3658,
"step": 2590
},
{
"epoch": 0.2444729140466144,
"grad_norm": 11.045734405517578,
"learning_rate": 0.0004997555242125058,
"loss": 1.4189,
"step": 2600
},
{
"epoch": 0.2444729140466144,
"eval_loss": 1.5365136861801147,
"eval_runtime": 23.3831,
"eval_samples_per_second": 2.138,
"eval_steps_per_second": 0.299,
"step": 2600
},
{
"epoch": 0.24541319448525523,
"grad_norm": 2.682727098464966,
"learning_rate": 0.0004997545839210155,
"loss": 1.4508,
"step": 2610
},
{
"epoch": 0.24635347492389606,
"grad_norm": 0.9717720150947571,
"learning_rate": 0.0004997536436295252,
"loss": 1.4464,
"step": 2620
},
{
"epoch": 0.24729375536253687,
"grad_norm": 1.8743679523468018,
"learning_rate": 0.0004997527033380348,
"loss": 1.4021,
"step": 2630
},
{
"epoch": 0.2482340358011777,
"grad_norm": 1.289175033569336,
"learning_rate": 0.0004997517630465445,
"loss": 1.357,
"step": 2640
},
{
"epoch": 0.24917431623981853,
"grad_norm": 0.9077774286270142,
"learning_rate": 0.0004997508227550541,
"loss": 1.3944,
"step": 2650
},
{
"epoch": 0.25011459667845937,
"grad_norm": 2.2975730895996094,
"learning_rate": 0.0004997498824635638,
"loss": 1.3348,
"step": 2660
},
{
"epoch": 0.2510548771171002,
"grad_norm": 2.115493059158325,
"learning_rate": 0.0004997489421720734,
"loss": 1.3377,
"step": 2670
},
{
"epoch": 0.251995157555741,
"grad_norm": 1.3777943849563599,
"learning_rate": 0.000499748001880583,
"loss": 1.3416,
"step": 2680
},
{
"epoch": 0.25293543799438184,
"grad_norm": 1.3281394243240356,
"learning_rate": 0.0004997470615890926,
"loss": 1.3459,
"step": 2690
},
{
"epoch": 0.25387571843302265,
"grad_norm": 2.637329339981079,
"learning_rate": 0.0004997461212976023,
"loss": 1.34,
"step": 2700
},
{
"epoch": 0.25387571843302265,
"eval_loss": 1.296167254447937,
"eval_runtime": 23.3786,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.299,
"step": 2700
},
{
"epoch": 0.25481599887166345,
"grad_norm": 672.3903198242188,
"learning_rate": 0.0004997451810061119,
"loss": 1.3394,
"step": 2710
},
{
"epoch": 0.2557562793103043,
"grad_norm": 5.488917350769043,
"learning_rate": 0.0004997442407146215,
"loss": 1.6872,
"step": 2720
},
{
"epoch": 0.2566965597489451,
"grad_norm": 4.547423362731934,
"learning_rate": 0.0004997433004231312,
"loss": 1.801,
"step": 2730
},
{
"epoch": 0.2576368401875859,
"grad_norm": 4.342709541320801,
"learning_rate": 0.0004997423601316408,
"loss": 1.6263,
"step": 2740
},
{
"epoch": 0.2585771206262268,
"grad_norm": 2.8083598613739014,
"learning_rate": 0.0004997414198401504,
"loss": 1.4672,
"step": 2750
},
{
"epoch": 0.2595174010648676,
"grad_norm": 12.495756149291992,
"learning_rate": 0.00049974047954866,
"loss": 1.5147,
"step": 2760
},
{
"epoch": 0.2604576815035084,
"grad_norm": 1.7585903406143188,
"learning_rate": 0.0004997395392571698,
"loss": 1.3448,
"step": 2770
},
{
"epoch": 0.26139796194214926,
"grad_norm": 3.717991828918457,
"learning_rate": 0.0004997385989656794,
"loss": 1.3659,
"step": 2780
},
{
"epoch": 0.26233824238079007,
"grad_norm": 2.144385814666748,
"learning_rate": 0.0004997376586741891,
"loss": 1.3789,
"step": 2790
},
{
"epoch": 0.26327852281943087,
"grad_norm": 2.580501079559326,
"learning_rate": 0.0004997367183826987,
"loss": 1.3448,
"step": 2800
},
{
"epoch": 0.26327852281943087,
"eval_loss": 1.302243947982788,
"eval_runtime": 23.3817,
"eval_samples_per_second": 2.138,
"eval_steps_per_second": 0.299,
"step": 2800
},
{
"epoch": 0.26421880325807173,
"grad_norm": 1.1073391437530518,
"learning_rate": 0.0004997357780912083,
"loss": 1.3094,
"step": 2810
},
{
"epoch": 0.26515908369671254,
"grad_norm": 8.599945068359375,
"learning_rate": 0.0004997348377997179,
"loss": 1.3772,
"step": 2820
},
{
"epoch": 0.26609936413535334,
"grad_norm": 2.3976120948791504,
"learning_rate": 0.0004997338975082276,
"loss": 1.3514,
"step": 2830
},
{
"epoch": 0.2670396445739942,
"grad_norm": 1.5972365140914917,
"learning_rate": 0.0004997329572167372,
"loss": 1.3456,
"step": 2840
},
{
"epoch": 0.267979925012635,
"grad_norm": 0.8447502255439758,
"learning_rate": 0.0004997320169252468,
"loss": 1.3476,
"step": 2850
},
{
"epoch": 0.2689202054512758,
"grad_norm": 27.381818771362305,
"learning_rate": 0.0004997310766337565,
"loss": 1.4778,
"step": 2860
},
{
"epoch": 0.2698604858899167,
"grad_norm": 765.4880981445312,
"learning_rate": 0.0004997301363422661,
"loss": 1.4474,
"step": 2870
},
{
"epoch": 0.2708007663285575,
"grad_norm": 216.736083984375,
"learning_rate": 0.0004997291960507757,
"loss": 1.5884,
"step": 2880
},
{
"epoch": 0.2717410467671983,
"grad_norm": 93.36593627929688,
"learning_rate": 0.0004997282557592853,
"loss": 1.6368,
"step": 2890
},
{
"epoch": 0.27268132720583915,
"grad_norm": 20.37236785888672,
"learning_rate": 0.0004997273154677951,
"loss": 1.509,
"step": 2900
},
{
"epoch": 0.27268132720583915,
"eval_loss": 1.399726152420044,
"eval_runtime": 23.4532,
"eval_samples_per_second": 2.132,
"eval_steps_per_second": 0.298,
"step": 2900
},
{
"epoch": 0.27362160764447996,
"grad_norm": 1902.80908203125,
"learning_rate": 0.0004997263751763047,
"loss": 1.9083,
"step": 2910
},
{
"epoch": 0.27456188808312076,
"grad_norm": 1722.0035400390625,
"learning_rate": 0.0004997254348848143,
"loss": 3.0141,
"step": 2920
},
{
"epoch": 0.2755021685217616,
"grad_norm": 99672.4609375,
"learning_rate": 0.0004997244945933239,
"loss": 3.9002,
"step": 2930
},
{
"epoch": 0.27644244896040243,
"grad_norm": 354.4461975097656,
"learning_rate": 0.0004997235543018336,
"loss": 3.8713,
"step": 2940
},
{
"epoch": 0.27738272939904324,
"grad_norm": 10427.4267578125,
"learning_rate": 0.0004997226140103432,
"loss": 6.3345,
"step": 2950
},
{
"epoch": 0.2783230098376841,
"grad_norm": 27135.962890625,
"learning_rate": 0.0004997216737188529,
"loss": 7.5059,
"step": 2960
},
{
"epoch": 0.2792632902763249,
"grad_norm": 8999.4658203125,
"learning_rate": 0.0004997207334273625,
"loss": 10.6852,
"step": 2970
},
{
"epoch": 0.28020357071496577,
"grad_norm": 4206.79296875,
"learning_rate": 0.0004997197931358721,
"loss": 10.5719,
"step": 2980
},
{
"epoch": 0.28114385115360657,
"grad_norm": 2594.4755859375,
"learning_rate": 0.0004997188528443818,
"loss": 7.0528,
"step": 2990
},
{
"epoch": 0.2820841315922474,
"grad_norm": 4728.44873046875,
"learning_rate": 0.0004997179125528914,
"loss": 11.2643,
"step": 3000
},
{
"epoch": 0.2820841315922474,
"eval_loss": 16.3485164642334,
"eval_runtime": 23.5167,
"eval_samples_per_second": 2.126,
"eval_steps_per_second": 0.298,
"step": 3000
},
{
"epoch": 0.28302441203088824,
"grad_norm": 53164.84765625,
"learning_rate": 0.000499716972261401,
"loss": 16.4994,
"step": 3010
},
{
"epoch": 0.28396469246952905,
"grad_norm": 37518.87890625,
"learning_rate": 0.0004997160319699106,
"loss": 15.7442,
"step": 3020
},
{
"epoch": 0.28490497290816985,
"grad_norm": 508.87457275390625,
"learning_rate": 0.0004997150916784204,
"loss": 20.826,
"step": 3030
},
{
"epoch": 0.2858452533468107,
"grad_norm": 1002924.4375,
"learning_rate": 0.00049971415138693,
"loss": 18.8212,
"step": 3040
},
{
"epoch": 0.2867855337854515,
"grad_norm": 1554.9080810546875,
"learning_rate": 0.0004997132110954396,
"loss": 16.4891,
"step": 3050
},
{
"epoch": 0.2877258142240923,
"grad_norm": 8659.119140625,
"learning_rate": 0.0004997122708039492,
"loss": 15.4686,
"step": 3060
},
{
"epoch": 0.2886660946627332,
"grad_norm": 1640.42431640625,
"learning_rate": 0.0004997113305124589,
"loss": 15.3862,
"step": 3070
},
{
"epoch": 0.289606375101374,
"grad_norm": 137.72244262695312,
"learning_rate": 0.0004997103902209685,
"loss": 11.182,
"step": 3080
},
{
"epoch": 0.2905466555400148,
"grad_norm": 91.92054748535156,
"learning_rate": 0.0004997094499294781,
"loss": 7.7615,
"step": 3090
},
{
"epoch": 0.29148693597865566,
"grad_norm": 7.070523262023926,
"learning_rate": 0.0004997085096379878,
"loss": 4.8142,
"step": 3100
},
{
"epoch": 0.29148693597865566,
"eval_loss": 3.298832416534424,
"eval_runtime": 23.4277,
"eval_samples_per_second": 2.134,
"eval_steps_per_second": 0.299,
"step": 3100
},
{
"epoch": 0.29242721641729646,
"grad_norm": 24.24034309387207,
"learning_rate": 0.0004997075693464974,
"loss": 2.6325,
"step": 3110
},
{
"epoch": 0.29336749685593727,
"grad_norm": 6.838109970092773,
"learning_rate": 0.0004997066290550071,
"loss": 2.2983,
"step": 3120
},
{
"epoch": 0.29430777729457813,
"grad_norm": 3.7729222774505615,
"learning_rate": 0.0004997056887635167,
"loss": 2.1722,
"step": 3130
},
{
"epoch": 0.29524805773321894,
"grad_norm": 5.889744281768799,
"learning_rate": 0.0004997047484720263,
"loss": 1.9276,
"step": 3140
},
{
"epoch": 0.29618833817185974,
"grad_norm": 5.473685264587402,
"learning_rate": 0.0004997038081805359,
"loss": 1.863,
"step": 3150
},
{
"epoch": 0.2971286186105006,
"grad_norm": 32.09117889404297,
"learning_rate": 0.0004997028678890457,
"loss": 1.9157,
"step": 3160
},
{
"epoch": 0.2980688990491414,
"grad_norm": 11.561924934387207,
"learning_rate": 0.0004997019275975553,
"loss": 1.8909,
"step": 3170
},
{
"epoch": 0.2990091794877822,
"grad_norm": 63.77189254760742,
"learning_rate": 0.0004997009873060649,
"loss": 2.0655,
"step": 3180
},
{
"epoch": 0.2999494599264231,
"grad_norm": 24.669649124145508,
"learning_rate": 0.0004997000470145746,
"loss": 2.1521,
"step": 3190
},
{
"epoch": 0.3008897403650639,
"grad_norm": 121.44429779052734,
"learning_rate": 0.0004996991067230842,
"loss": 1.9115,
"step": 3200
},
{
"epoch": 0.3008897403650639,
"eval_loss": 1.8945567607879639,
"eval_runtime": 23.2984,
"eval_samples_per_second": 2.146,
"eval_steps_per_second": 0.3,
"step": 3200
},
{
"epoch": 0.3018300208037047,
"grad_norm": 3.656569242477417,
"learning_rate": 0.0004996981664315938,
"loss": 1.8723,
"step": 3210
},
{
"epoch": 0.30277030124234555,
"grad_norm": 277.7506103515625,
"learning_rate": 0.0004996972261401034,
"loss": 1.8027,
"step": 3220
},
{
"epoch": 0.30371058168098636,
"grad_norm": 1.6577140092849731,
"learning_rate": 0.0004996962858486131,
"loss": 1.7684,
"step": 3230
},
{
"epoch": 0.30465086211962716,
"grad_norm": 1.2478042840957642,
"learning_rate": 0.0004996953455571227,
"loss": 1.6064,
"step": 3240
},
{
"epoch": 0.305591142558268,
"grad_norm": 2.421694278717041,
"learning_rate": 0.0004996944052656323,
"loss": 1.5867,
"step": 3250
},
{
"epoch": 0.30653142299690883,
"grad_norm": 2.4946632385253906,
"learning_rate": 0.0004996934649741419,
"loss": 1.4262,
"step": 3260
},
{
"epoch": 0.30747170343554964,
"grad_norm": 6.713217258453369,
"learning_rate": 0.0004996925246826516,
"loss": 1.457,
"step": 3270
},
{
"epoch": 0.3084119838741905,
"grad_norm": 34.560733795166016,
"learning_rate": 0.0004996915843911612,
"loss": 1.3478,
"step": 3280
},
{
"epoch": 0.3093522643128313,
"grad_norm": 1.8289706707000732,
"learning_rate": 0.000499690644099671,
"loss": 1.3625,
"step": 3290
},
{
"epoch": 0.3102925447514721,
"grad_norm": 2.612109661102295,
"learning_rate": 0.0004996897038081806,
"loss": 1.3903,
"step": 3300
},
{
"epoch": 0.3102925447514721,
"eval_loss": 1.3252699375152588,
"eval_runtime": 23.2988,
"eval_samples_per_second": 2.146,
"eval_steps_per_second": 0.3,
"step": 3300
},
{
"epoch": 0.31123282519011297,
"grad_norm": 1.1180143356323242,
"learning_rate": 0.0004996887635166902,
"loss": 1.3746,
"step": 3310
},
{
"epoch": 0.3121731056287538,
"grad_norm": 0.6881751418113708,
"learning_rate": 0.0004996878232251999,
"loss": 1.2946,
"step": 3320
},
{
"epoch": 0.3131133860673946,
"grad_norm": 2.2330400943756104,
"learning_rate": 0.0004996868829337095,
"loss": 1.2771,
"step": 3330
},
{
"epoch": 0.31405366650603544,
"grad_norm": 7.836499214172363,
"learning_rate": 0.0004996859426422191,
"loss": 1.2854,
"step": 3340
},
{
"epoch": 0.31499394694467625,
"grad_norm": 2.003983497619629,
"learning_rate": 0.0004996850023507287,
"loss": 1.307,
"step": 3350
},
{
"epoch": 0.31593422738331706,
"grad_norm": 1.6156337261199951,
"learning_rate": 0.0004996840620592384,
"loss": 1.303,
"step": 3360
},
{
"epoch": 0.3168745078219579,
"grad_norm": 1.6269570589065552,
"learning_rate": 0.000499683121767748,
"loss": 1.2844,
"step": 3370
},
{
"epoch": 0.3178147882605987,
"grad_norm": 1.5026659965515137,
"learning_rate": 0.0004996821814762576,
"loss": 1.2973,
"step": 3380
},
{
"epoch": 0.31875506869923953,
"grad_norm": 0.9272472858428955,
"learning_rate": 0.0004996812411847672,
"loss": 1.2933,
"step": 3390
},
{
"epoch": 0.3196953491378804,
"grad_norm": 2.079253911972046,
"learning_rate": 0.0004996803008932769,
"loss": 1.2522,
"step": 3400
},
{
"epoch": 0.3196953491378804,
"eval_loss": 1.2658365964889526,
"eval_runtime": 23.3524,
"eval_samples_per_second": 2.141,
"eval_steps_per_second": 0.3,
"step": 3400
},
{
"epoch": 0.3206356295765212,
"grad_norm": 2.5354573726654053,
"learning_rate": 0.0004996793606017865,
"loss": 1.3229,
"step": 3410
},
{
"epoch": 0.321575910015162,
"grad_norm": 2.1072871685028076,
"learning_rate": 0.0004996784203102962,
"loss": 1.2549,
"step": 3420
},
{
"epoch": 0.32251619045380286,
"grad_norm": 2.9021806716918945,
"learning_rate": 0.0004996774800188059,
"loss": 1.2416,
"step": 3430
},
{
"epoch": 0.32345647089244367,
"grad_norm": 2.3692095279693604,
"learning_rate": 0.0004996765397273155,
"loss": 1.2761,
"step": 3440
},
{
"epoch": 0.3243967513310845,
"grad_norm": 0.8599464297294617,
"learning_rate": 0.0004996755994358252,
"loss": 1.238,
"step": 3450
},
{
"epoch": 0.32533703176972534,
"grad_norm": 0.9026036858558655,
"learning_rate": 0.0004996746591443348,
"loss": 1.2586,
"step": 3460
},
{
"epoch": 0.32627731220836614,
"grad_norm": 1.5152987241744995,
"learning_rate": 0.0004996737188528444,
"loss": 1.2273,
"step": 3470
},
{
"epoch": 0.32721759264700695,
"grad_norm": 1.3079991340637207,
"learning_rate": 0.000499672778561354,
"loss": 1.2022,
"step": 3480
},
{
"epoch": 0.3281578730856478,
"grad_norm": 1.8832062482833862,
"learning_rate": 0.0004996718382698637,
"loss": 1.158,
"step": 3490
},
{
"epoch": 0.3290981535242886,
"grad_norm": 0.6279626488685608,
"learning_rate": 0.0004996708979783733,
"loss": 1.2091,
"step": 3500
},
{
"epoch": 0.3290981535242886,
"eval_loss": 1.178521752357483,
"eval_runtime": 23.3708,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.3,
"step": 3500
}
],
"logging_steps": 10,
"max_steps": 5317500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0403739277983744e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}