vincentwi's picture
Bulk upload sft-svgeez-blocks-20251101T005904Z (+3000)
fe2f510 verified
{
"best_metric": 0.32749298214912415,
"best_model_checkpoint": "sft-svgeez-blocks-20251101T005904Z/checkpoint-1000",
"epoch": 0.23507010966020617,
"eval_steps": 100,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009402804386408247,
"grad_norm": 1.0298110246658325,
"learning_rate": 0.0004999990597085097,
"loss": 0.9462,
"step": 10
},
{
"epoch": 0.0018805608772816493,
"grad_norm": 0.5167974233627319,
"learning_rate": 0.0004999981194170193,
"loss": 0.551,
"step": 20
},
{
"epoch": 0.002820841315922474,
"grad_norm": 0.40330085158348083,
"learning_rate": 0.0004999971791255289,
"loss": 0.509,
"step": 30
},
{
"epoch": 0.0037611217545632986,
"grad_norm": 0.3540050983428955,
"learning_rate": 0.0004999962388340385,
"loss": 0.4456,
"step": 40
},
{
"epoch": 0.004701402193204123,
"grad_norm": 0.4030086100101471,
"learning_rate": 0.0004999952985425483,
"loss": 0.4583,
"step": 50
},
{
"epoch": 0.005641682631844948,
"grad_norm": 0.5594690442085266,
"learning_rate": 0.0004999943582510579,
"loss": 0.4327,
"step": 60
},
{
"epoch": 0.006581963070485772,
"grad_norm": 0.3243617117404938,
"learning_rate": 0.0004999934179595675,
"loss": 0.4256,
"step": 70
},
{
"epoch": 0.007522243509126597,
"grad_norm": 0.3709908723831177,
"learning_rate": 0.0004999924776680771,
"loss": 0.4215,
"step": 80
},
{
"epoch": 0.008462523947767421,
"grad_norm": 0.3056599795818329,
"learning_rate": 0.0004999915373765868,
"loss": 0.4193,
"step": 90
},
{
"epoch": 0.009402804386408246,
"grad_norm": 0.34599125385284424,
"learning_rate": 0.0004999905970850964,
"loss": 0.4168,
"step": 100
},
{
"epoch": 0.009402804386408246,
"eval_loss": 0.39591750502586365,
"eval_runtime": 23.584,
"eval_samples_per_second": 2.12,
"eval_steps_per_second": 0.297,
"step": 100
},
{
"epoch": 0.010343084825049071,
"grad_norm": 0.4576414227485657,
"learning_rate": 0.000499989656793606,
"loss": 0.4149,
"step": 110
},
{
"epoch": 0.011283365263689896,
"grad_norm": 0.3045823574066162,
"learning_rate": 0.0004999887165021157,
"loss": 0.4045,
"step": 120
},
{
"epoch": 0.01222364570233072,
"grad_norm": 0.33852407336235046,
"learning_rate": 0.0004999877762106253,
"loss": 0.389,
"step": 130
},
{
"epoch": 0.013163926140971545,
"grad_norm": 0.3256877064704895,
"learning_rate": 0.0004999868359191349,
"loss": 0.4015,
"step": 140
},
{
"epoch": 0.01410420657961237,
"grad_norm": 0.33292344212532043,
"learning_rate": 0.0004999858956276445,
"loss": 0.4029,
"step": 150
},
{
"epoch": 0.015044487018253195,
"grad_norm": 0.4369369149208069,
"learning_rate": 0.0004999849553361542,
"loss": 0.3838,
"step": 160
},
{
"epoch": 0.01598476745689402,
"grad_norm": 0.26670923829078674,
"learning_rate": 0.0004999840150446638,
"loss": 0.3628,
"step": 170
},
{
"epoch": 0.016925047895534843,
"grad_norm": 0.2846743166446686,
"learning_rate": 0.0004999830747531736,
"loss": 0.4065,
"step": 180
},
{
"epoch": 0.01786532833417567,
"grad_norm": 0.3469683527946472,
"learning_rate": 0.0004999821344616832,
"loss": 0.3773,
"step": 190
},
{
"epoch": 0.018805608772816493,
"grad_norm": 0.2996629476547241,
"learning_rate": 0.0004999811941701928,
"loss": 0.3879,
"step": 200
},
{
"epoch": 0.018805608772816493,
"eval_loss": 0.37008655071258545,
"eval_runtime": 23.5018,
"eval_samples_per_second": 2.127,
"eval_steps_per_second": 0.298,
"step": 200
},
{
"epoch": 0.019745889211457316,
"grad_norm": 0.3994966149330139,
"learning_rate": 0.0004999802538787024,
"loss": 0.4097,
"step": 210
},
{
"epoch": 0.020686169650098143,
"grad_norm": 0.4101763069629669,
"learning_rate": 0.0004999793135872121,
"loss": 0.4091,
"step": 220
},
{
"epoch": 0.021626450088738966,
"grad_norm": 0.3082752823829651,
"learning_rate": 0.0004999783732957217,
"loss": 0.3997,
"step": 230
},
{
"epoch": 0.022566730527379793,
"grad_norm": 0.32445040345191956,
"learning_rate": 0.0004999774330042313,
"loss": 0.4026,
"step": 240
},
{
"epoch": 0.023507010966020616,
"grad_norm": 0.37064942717552185,
"learning_rate": 0.000499976492712741,
"loss": 0.3608,
"step": 250
},
{
"epoch": 0.02444729140466144,
"grad_norm": 0.3851112723350525,
"learning_rate": 0.0004999755524212506,
"loss": 0.3889,
"step": 260
},
{
"epoch": 0.025387571843302266,
"grad_norm": 0.25871652364730835,
"learning_rate": 0.0004999746121297602,
"loss": 0.3681,
"step": 270
},
{
"epoch": 0.02632785228194309,
"grad_norm": 0.34905803203582764,
"learning_rate": 0.0004999736718382698,
"loss": 0.3822,
"step": 280
},
{
"epoch": 0.027268132720583912,
"grad_norm": 0.3242252767086029,
"learning_rate": 0.0004999727315467795,
"loss": 0.3992,
"step": 290
},
{
"epoch": 0.02820841315922474,
"grad_norm": 0.359190434217453,
"learning_rate": 0.0004999717912552891,
"loss": 0.4125,
"step": 300
},
{
"epoch": 0.02820841315922474,
"eval_loss": 0.3554922044277191,
"eval_runtime": 23.4721,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 300
},
{
"epoch": 0.029148693597865562,
"grad_norm": 0.2699418067932129,
"learning_rate": 0.0004999708509637987,
"loss": 0.3724,
"step": 310
},
{
"epoch": 0.03008897403650639,
"grad_norm": 0.3270084261894226,
"learning_rate": 0.0004999699106723084,
"loss": 0.3878,
"step": 320
},
{
"epoch": 0.031029254475147212,
"grad_norm": 0.32981258630752563,
"learning_rate": 0.0004999689703808181,
"loss": 0.3944,
"step": 330
},
{
"epoch": 0.03196953491378804,
"grad_norm": 0.3394843637943268,
"learning_rate": 0.0004999680300893277,
"loss": 0.3673,
"step": 340
},
{
"epoch": 0.03290981535242886,
"grad_norm": 0.32261306047439575,
"learning_rate": 0.0004999670897978374,
"loss": 0.4093,
"step": 350
},
{
"epoch": 0.033850095791069686,
"grad_norm": 0.2744098901748657,
"learning_rate": 0.000499966149506347,
"loss": 0.3488,
"step": 360
},
{
"epoch": 0.03479037622971051,
"grad_norm": 0.26060158014297485,
"learning_rate": 0.0004999652092148566,
"loss": 0.3753,
"step": 370
},
{
"epoch": 0.03573065666835134,
"grad_norm": 0.37372156977653503,
"learning_rate": 0.0004999642689233663,
"loss": 0.3452,
"step": 380
},
{
"epoch": 0.03667093710699216,
"grad_norm": 0.18904727697372437,
"learning_rate": 0.0004999633286318759,
"loss": 0.3356,
"step": 390
},
{
"epoch": 0.037611217545632986,
"grad_norm": 0.3282572627067566,
"learning_rate": 0.0004999623883403855,
"loss": 0.3852,
"step": 400
},
{
"epoch": 0.037611217545632986,
"eval_loss": 0.35085391998291016,
"eval_runtime": 23.4698,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 400
},
{
"epoch": 0.03855149798427381,
"grad_norm": 0.2760799527168274,
"learning_rate": 0.0004999614480488951,
"loss": 0.3551,
"step": 410
},
{
"epoch": 0.03949177842291463,
"grad_norm": 0.276055246591568,
"learning_rate": 0.0004999605077574048,
"loss": 0.3423,
"step": 420
},
{
"epoch": 0.04043205886155546,
"grad_norm": 0.24734950065612793,
"learning_rate": 0.0004999595674659144,
"loss": 0.353,
"step": 430
},
{
"epoch": 0.041372339300196286,
"grad_norm": 0.26533621549606323,
"learning_rate": 0.000499958627174424,
"loss": 0.3876,
"step": 440
},
{
"epoch": 0.042312619738837105,
"grad_norm": 0.3085733950138092,
"learning_rate": 0.0004999576868829337,
"loss": 0.3701,
"step": 450
},
{
"epoch": 0.04325290017747793,
"grad_norm": 0.2946242690086365,
"learning_rate": 0.0004999567465914434,
"loss": 0.3897,
"step": 460
},
{
"epoch": 0.04419318061611876,
"grad_norm": 0.2757997214794159,
"learning_rate": 0.000499955806299953,
"loss": 0.343,
"step": 470
},
{
"epoch": 0.045133461054759585,
"grad_norm": 0.3827458620071411,
"learning_rate": 0.0004999548660084626,
"loss": 0.4293,
"step": 480
},
{
"epoch": 0.046073741493400405,
"grad_norm": 0.2692118287086487,
"learning_rate": 0.0004999539257169723,
"loss": 0.3621,
"step": 490
},
{
"epoch": 0.04701402193204123,
"grad_norm": 0.3991619050502777,
"learning_rate": 0.0004999529854254819,
"loss": 0.3646,
"step": 500
},
{
"epoch": 0.04701402193204123,
"eval_loss": 0.3504406809806824,
"eval_runtime": 23.45,
"eval_samples_per_second": 2.132,
"eval_steps_per_second": 0.299,
"step": 500
},
{
"epoch": 0.04795430237068206,
"grad_norm": 0.27486175298690796,
"learning_rate": 0.0004999520451339916,
"loss": 0.3791,
"step": 510
},
{
"epoch": 0.04889458280932288,
"grad_norm": 0.37556731700897217,
"learning_rate": 0.0004999511048425012,
"loss": 0.3781,
"step": 520
},
{
"epoch": 0.049834863247963705,
"grad_norm": 0.34461846947669983,
"learning_rate": 0.0004999501645510108,
"loss": 0.3809,
"step": 530
},
{
"epoch": 0.05077514368660453,
"grad_norm": 0.27397575974464417,
"learning_rate": 0.0004999492242595204,
"loss": 0.3601,
"step": 540
},
{
"epoch": 0.05171542412524535,
"grad_norm": 0.3164900541305542,
"learning_rate": 0.0004999482839680301,
"loss": 0.3666,
"step": 550
},
{
"epoch": 0.05265570456388618,
"grad_norm": 0.2617564797401428,
"learning_rate": 0.0004999473436765397,
"loss": 0.3555,
"step": 560
},
{
"epoch": 0.053595985002527005,
"grad_norm": 0.2753169536590576,
"learning_rate": 0.0004999464033850493,
"loss": 0.351,
"step": 570
},
{
"epoch": 0.054536265441167825,
"grad_norm": 0.41116294264793396,
"learning_rate": 0.0004999454630935591,
"loss": 0.3667,
"step": 580
},
{
"epoch": 0.05547654587980865,
"grad_norm": 0.3187413215637207,
"learning_rate": 0.0004999445228020687,
"loss": 0.3279,
"step": 590
},
{
"epoch": 0.05641682631844948,
"grad_norm": 0.3258746862411499,
"learning_rate": 0.0004999435825105783,
"loss": 0.3827,
"step": 600
},
{
"epoch": 0.05641682631844948,
"eval_loss": 0.3369905948638916,
"eval_runtime": 23.4639,
"eval_samples_per_second": 2.131,
"eval_steps_per_second": 0.298,
"step": 600
},
{
"epoch": 0.057357106757090305,
"grad_norm": 0.2566978633403778,
"learning_rate": 0.0004999426422190879,
"loss": 0.3674,
"step": 610
},
{
"epoch": 0.058297387195731125,
"grad_norm": 0.22622275352478027,
"learning_rate": 0.0004999417019275976,
"loss": 0.3553,
"step": 620
},
{
"epoch": 0.05923766763437195,
"grad_norm": 0.2654806971549988,
"learning_rate": 0.0004999407616361072,
"loss": 0.3475,
"step": 630
},
{
"epoch": 0.06017794807301278,
"grad_norm": 0.3390786051750183,
"learning_rate": 0.0004999398213446168,
"loss": 0.3796,
"step": 640
},
{
"epoch": 0.0611182285116536,
"grad_norm": 0.2524166405200958,
"learning_rate": 0.0004999388810531264,
"loss": 0.3635,
"step": 650
},
{
"epoch": 0.062058508950294425,
"grad_norm": 0.2794824242591858,
"learning_rate": 0.0004999379407616361,
"loss": 0.3527,
"step": 660
},
{
"epoch": 0.06299878938893524,
"grad_norm": 0.1920047104358673,
"learning_rate": 0.0004999370004701457,
"loss": 0.336,
"step": 670
},
{
"epoch": 0.06393906982757608,
"grad_norm": 0.29339122772216797,
"learning_rate": 0.0004999360601786554,
"loss": 0.3654,
"step": 680
},
{
"epoch": 0.0648793502662169,
"grad_norm": 0.3135516345500946,
"learning_rate": 0.000499935119887165,
"loss": 0.3653,
"step": 690
},
{
"epoch": 0.06581963070485772,
"grad_norm": 0.2592516541481018,
"learning_rate": 0.0004999341795956747,
"loss": 0.3746,
"step": 700
},
{
"epoch": 0.06581963070485772,
"eval_loss": 0.3413720726966858,
"eval_runtime": 23.4709,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 700
},
{
"epoch": 0.06675991114349855,
"grad_norm": 0.35426065325737,
"learning_rate": 0.0004999332393041844,
"loss": 0.3485,
"step": 710
},
{
"epoch": 0.06770019158213937,
"grad_norm": 0.22092205286026,
"learning_rate": 0.000499932299012694,
"loss": 0.3813,
"step": 720
},
{
"epoch": 0.06864047202078019,
"grad_norm": 0.31250303983688354,
"learning_rate": 0.0004999313587212036,
"loss": 0.3431,
"step": 730
},
{
"epoch": 0.06958075245942102,
"grad_norm": 0.3179270029067993,
"learning_rate": 0.0004999304184297132,
"loss": 0.3671,
"step": 740
},
{
"epoch": 0.07052103289806184,
"grad_norm": 0.2921147644519806,
"learning_rate": 0.0004999294781382229,
"loss": 0.3479,
"step": 750
},
{
"epoch": 0.07146131333670268,
"grad_norm": 0.2732735872268677,
"learning_rate": 0.0004999285378467325,
"loss": 0.382,
"step": 760
},
{
"epoch": 0.0724015937753435,
"grad_norm": 0.2021898329257965,
"learning_rate": 0.0004999275975552421,
"loss": 0.3342,
"step": 770
},
{
"epoch": 0.07334187421398432,
"grad_norm": 0.2908518314361572,
"learning_rate": 0.0004999266572637517,
"loss": 0.3512,
"step": 780
},
{
"epoch": 0.07428215465262515,
"grad_norm": 0.4414116144180298,
"learning_rate": 0.0004999257169722614,
"loss": 0.3505,
"step": 790
},
{
"epoch": 0.07522243509126597,
"grad_norm": 0.3312692940235138,
"learning_rate": 0.000499924776680771,
"loss": 0.3734,
"step": 800
},
{
"epoch": 0.07522243509126597,
"eval_loss": 0.3315250277519226,
"eval_runtime": 23.4745,
"eval_samples_per_second": 2.13,
"eval_steps_per_second": 0.298,
"step": 800
},
{
"epoch": 0.07616271552990679,
"grad_norm": 0.2597099244594574,
"learning_rate": 0.0004999238363892806,
"loss": 0.3493,
"step": 810
},
{
"epoch": 0.07710299596854762,
"grad_norm": 0.3256906270980835,
"learning_rate": 0.0004999228960977903,
"loss": 0.3392,
"step": 820
},
{
"epoch": 0.07804327640718844,
"grad_norm": 0.3068447709083557,
"learning_rate": 0.0004999219558063,
"loss": 0.354,
"step": 830
},
{
"epoch": 0.07898355684582926,
"grad_norm": 0.3219122588634491,
"learning_rate": 0.0004999210155148097,
"loss": 0.3484,
"step": 840
},
{
"epoch": 0.0799238372844701,
"grad_norm": 0.2608373761177063,
"learning_rate": 0.0004999200752233193,
"loss": 0.3492,
"step": 850
},
{
"epoch": 0.08086411772311092,
"grad_norm": 0.3617205023765564,
"learning_rate": 0.0004999191349318289,
"loss": 0.3705,
"step": 860
},
{
"epoch": 0.08180439816175174,
"grad_norm": 0.2846531569957733,
"learning_rate": 0.0004999181946403385,
"loss": 0.3302,
"step": 870
},
{
"epoch": 0.08274467860039257,
"grad_norm": 0.18934500217437744,
"learning_rate": 0.0004999172543488482,
"loss": 0.3426,
"step": 880
},
{
"epoch": 0.08368495903903339,
"grad_norm": 0.3601396977901459,
"learning_rate": 0.0004999163140573578,
"loss": 0.3447,
"step": 890
},
{
"epoch": 0.08462523947767421,
"grad_norm": 0.3532989025115967,
"learning_rate": 0.0004999153737658674,
"loss": 0.3852,
"step": 900
},
{
"epoch": 0.08462523947767421,
"eval_loss": 0.33136090636253357,
"eval_runtime": 23.4849,
"eval_samples_per_second": 2.129,
"eval_steps_per_second": 0.298,
"step": 900
},
{
"epoch": 0.08556551991631504,
"grad_norm": 0.3184758424758911,
"learning_rate": 0.0004999144334743771,
"loss": 0.3579,
"step": 910
},
{
"epoch": 0.08650580035495586,
"grad_norm": 0.25410690903663635,
"learning_rate": 0.0004999134931828867,
"loss": 0.3566,
"step": 920
},
{
"epoch": 0.08744608079359668,
"grad_norm": 0.2750070095062256,
"learning_rate": 0.0004999125528913963,
"loss": 0.3635,
"step": 930
},
{
"epoch": 0.08838636123223752,
"grad_norm": 0.2559872567653656,
"learning_rate": 0.0004999116125999059,
"loss": 0.3448,
"step": 940
},
{
"epoch": 0.08932664167087834,
"grad_norm": 0.29350370168685913,
"learning_rate": 0.0004999106723084156,
"loss": 0.3601,
"step": 950
},
{
"epoch": 0.09026692210951917,
"grad_norm": 0.24211618304252625,
"learning_rate": 0.0004999097320169253,
"loss": 0.3473,
"step": 960
},
{
"epoch": 0.09120720254815999,
"grad_norm": 0.23498541116714478,
"learning_rate": 0.0004999087917254349,
"loss": 0.3326,
"step": 970
},
{
"epoch": 0.09214748298680081,
"grad_norm": 0.283318430185318,
"learning_rate": 0.0004999078514339445,
"loss": 0.3631,
"step": 980
},
{
"epoch": 0.09308776342544164,
"grad_norm": 0.31163477897644043,
"learning_rate": 0.0004999069111424542,
"loss": 0.3651,
"step": 990
},
{
"epoch": 0.09402804386408246,
"grad_norm": 0.337285578250885,
"learning_rate": 0.0004999059708509638,
"loss": 0.3422,
"step": 1000
},
{
"epoch": 0.09402804386408246,
"eval_loss": 0.32749298214912415,
"eval_runtime": 23.4901,
"eval_samples_per_second": 2.129,
"eval_steps_per_second": 0.298,
"step": 1000
},
{
"epoch": 0.09496832430272328,
"grad_norm": 0.31577935814857483,
"learning_rate": 0.0004999050305594735,
"loss": 0.3468,
"step": 1010
},
{
"epoch": 0.09590860474136412,
"grad_norm": 0.2720831036567688,
"learning_rate": 0.0004999040902679831,
"loss": 0.368,
"step": 1020
},
{
"epoch": 0.09684888518000494,
"grad_norm": 76.24451446533203,
"learning_rate": 0.0004999031499764927,
"loss": 0.9196,
"step": 1030
},
{
"epoch": 0.09778916561864576,
"grad_norm": 0.8638234734535217,
"learning_rate": 0.0004999022096850024,
"loss": 0.5762,
"step": 1040
},
{
"epoch": 0.09872944605728659,
"grad_norm": 1.1701819896697998,
"learning_rate": 0.000499901269393512,
"loss": 0.4064,
"step": 1050
},
{
"epoch": 0.09966972649592741,
"grad_norm": 0.5215079188346863,
"learning_rate": 0.0004999003291020216,
"loss": 0.3978,
"step": 1060
},
{
"epoch": 0.10061000693456823,
"grad_norm": 13.439558982849121,
"learning_rate": 0.0004998993888105312,
"loss": 0.4368,
"step": 1070
},
{
"epoch": 0.10155028737320906,
"grad_norm": 0.7944605350494385,
"learning_rate": 0.000499898448519041,
"loss": 0.401,
"step": 1080
},
{
"epoch": 0.10249056781184988,
"grad_norm": 1.53361177444458,
"learning_rate": 0.0004998975082275506,
"loss": 0.463,
"step": 1090
},
{
"epoch": 0.1034308482504907,
"grad_norm": 186.09454345703125,
"learning_rate": 0.0004998965679360602,
"loss": 3.0032,
"step": 1100
},
{
"epoch": 0.1034308482504907,
"eval_loss": 1.301244854927063,
"eval_runtime": 23.4353,
"eval_samples_per_second": 2.134,
"eval_steps_per_second": 0.299,
"step": 1100
},
{
"epoch": 0.10437112868913154,
"grad_norm": 43.43181228637695,
"learning_rate": 0.0004998956276445698,
"loss": 15.2307,
"step": 1110
},
{
"epoch": 0.10531140912777236,
"grad_norm": 34.82008743286133,
"learning_rate": 0.0004998946873530795,
"loss": 23.3276,
"step": 1120
},
{
"epoch": 0.10625168956641318,
"grad_norm": 14.430869102478027,
"learning_rate": 0.0004998937470615891,
"loss": 9.6299,
"step": 1130
},
{
"epoch": 0.10719197000505401,
"grad_norm": 5.102875232696533,
"learning_rate": 0.0004998928067700987,
"loss": 5.5005,
"step": 1140
},
{
"epoch": 0.10813225044369483,
"grad_norm": 6.0567827224731445,
"learning_rate": 0.0004998918664786084,
"loss": 3.8907,
"step": 1150
},
{
"epoch": 0.10907253088233565,
"grad_norm": 3.007990837097168,
"learning_rate": 0.000499890926187118,
"loss": 3.3603,
"step": 1160
},
{
"epoch": 0.11001281132097648,
"grad_norm": 2.6830623149871826,
"learning_rate": 0.0004998899858956277,
"loss": 3.225,
"step": 1170
},
{
"epoch": 0.1109530917596173,
"grad_norm": 5.232982635498047,
"learning_rate": 0.0004998890456041373,
"loss": 3.0515,
"step": 1180
},
{
"epoch": 0.11189337219825814,
"grad_norm": 2.788641929626465,
"learning_rate": 0.0004998881053126469,
"loss": 3.2129,
"step": 1190
},
{
"epoch": 0.11283365263689896,
"grad_norm": 3.4526102542877197,
"learning_rate": 0.0004998871650211565,
"loss": 3.0174,
"step": 1200
},
{
"epoch": 0.11283365263689896,
"eval_loss": 3.0720202922821045,
"eval_runtime": 23.0809,
"eval_samples_per_second": 2.166,
"eval_steps_per_second": 0.303,
"step": 1200
},
{
"epoch": 0.11377393307553978,
"grad_norm": 1.8571786880493164,
"learning_rate": 0.0004998862247296662,
"loss": 3.0736,
"step": 1210
},
{
"epoch": 0.11471421351418061,
"grad_norm": 1.886637568473816,
"learning_rate": 0.0004998852844381759,
"loss": 2.9814,
"step": 1220
},
{
"epoch": 0.11565449395282143,
"grad_norm": 2.7894158363342285,
"learning_rate": 0.0004998843441466855,
"loss": 3.6537,
"step": 1230
},
{
"epoch": 0.11659477439146225,
"grad_norm": 1.016867995262146,
"learning_rate": 0.0004998834038551951,
"loss": 2.9557,
"step": 1240
},
{
"epoch": 0.11753505483010308,
"grad_norm": 2.153256893157959,
"learning_rate": 0.0004998824635637048,
"loss": 3.013,
"step": 1250
},
{
"epoch": 0.1184753352687439,
"grad_norm": 3.2111806869506836,
"learning_rate": 0.0004998815232722144,
"loss": 2.9692,
"step": 1260
},
{
"epoch": 0.11941561570738472,
"grad_norm": 1.3798747062683105,
"learning_rate": 0.000499880582980724,
"loss": 2.962,
"step": 1270
},
{
"epoch": 0.12035589614602556,
"grad_norm": 1.0820046663284302,
"learning_rate": 0.0004998796426892337,
"loss": 2.9139,
"step": 1280
},
{
"epoch": 0.12129617658466638,
"grad_norm": 2.0282669067382812,
"learning_rate": 0.0004998787023977433,
"loss": 2.9114,
"step": 1290
},
{
"epoch": 0.1222364570233072,
"grad_norm": 1.1617417335510254,
"learning_rate": 0.0004998777621062529,
"loss": 2.8328,
"step": 1300
},
{
"epoch": 0.1222364570233072,
"eval_loss": 2.820981740951538,
"eval_runtime": 23.1533,
"eval_samples_per_second": 2.16,
"eval_steps_per_second": 0.302,
"step": 1300
},
{
"epoch": 0.12317673746194803,
"grad_norm": 1.6711792945861816,
"learning_rate": 0.0004998768218147625,
"loss": 2.8469,
"step": 1310
},
{
"epoch": 0.12411701790058885,
"grad_norm": 2.5305206775665283,
"learning_rate": 0.0004998758815232722,
"loss": 2.9003,
"step": 1320
},
{
"epoch": 0.12505729833922968,
"grad_norm": 1.6004911661148071,
"learning_rate": 0.0004998749412317818,
"loss": 2.8594,
"step": 1330
},
{
"epoch": 0.1259975787778705,
"grad_norm": 1.7168315649032593,
"learning_rate": 0.0004998740009402916,
"loss": 2.8353,
"step": 1340
},
{
"epoch": 0.12693785921651132,
"grad_norm": 565.294921875,
"learning_rate": 0.0004998730606488012,
"loss": 2.8984,
"step": 1350
},
{
"epoch": 0.12787813965515216,
"grad_norm": 4.877725124359131,
"learning_rate": 0.0004998721203573108,
"loss": 11.9556,
"step": 1360
},
{
"epoch": 0.12881842009379296,
"grad_norm": 2.709376811981201,
"learning_rate": 0.0004998711800658205,
"loss": 3.237,
"step": 1370
},
{
"epoch": 0.1297587005324338,
"grad_norm": 1.6279547214508057,
"learning_rate": 0.0004998702397743301,
"loss": 2.9603,
"step": 1380
},
{
"epoch": 0.13069898097107463,
"grad_norm": 2.7646572589874268,
"learning_rate": 0.0004998692994828397,
"loss": 3.0022,
"step": 1390
},
{
"epoch": 0.13163926140971544,
"grad_norm": 3.1686997413635254,
"learning_rate": 0.0004998683591913493,
"loss": 2.9407,
"step": 1400
},
{
"epoch": 0.13163926140971544,
"eval_loss": 2.8426969051361084,
"eval_runtime": 23.0531,
"eval_samples_per_second": 2.169,
"eval_steps_per_second": 0.304,
"step": 1400
},
{
"epoch": 0.13257954184835627,
"grad_norm": 1.504326581954956,
"learning_rate": 0.000499867418899859,
"loss": 2.8927,
"step": 1410
},
{
"epoch": 0.1335198222869971,
"grad_norm": 1.797013282775879,
"learning_rate": 0.0004998664786083686,
"loss": 2.8821,
"step": 1420
},
{
"epoch": 0.1344601027256379,
"grad_norm": 1.7129989862442017,
"learning_rate": 0.0004998655383168782,
"loss": 2.8462,
"step": 1430
},
{
"epoch": 0.13540038316427874,
"grad_norm": 556.15869140625,
"learning_rate": 0.0004998645980253878,
"loss": 3.6939,
"step": 1440
},
{
"epoch": 0.13634066360291958,
"grad_norm": 131.738525390625,
"learning_rate": 0.0004998636577338975,
"loss": 4.5958,
"step": 1450
},
{
"epoch": 0.13728094404156038,
"grad_norm": 14.271523475646973,
"learning_rate": 0.0004998627174424071,
"loss": 11.9558,
"step": 1460
},
{
"epoch": 0.13822122448020122,
"grad_norm": 36.148284912109375,
"learning_rate": 0.0004998617771509167,
"loss": 8.5098,
"step": 1470
},
{
"epoch": 0.13916150491884205,
"grad_norm": 2.539207696914673,
"learning_rate": 0.0004998608368594265,
"loss": 8.0085,
"step": 1480
},
{
"epoch": 0.14010178535748288,
"grad_norm": 2.990248918533325,
"learning_rate": 0.0004998598965679361,
"loss": 7.787,
"step": 1490
},
{
"epoch": 0.1410420657961237,
"grad_norm": 20.940954208374023,
"learning_rate": 0.0004998589562764458,
"loss": 6.7366,
"step": 1500
},
{
"epoch": 0.1410420657961237,
"eval_loss": 6.252957820892334,
"eval_runtime": 23.2195,
"eval_samples_per_second": 2.153,
"eval_steps_per_second": 0.301,
"step": 1500
},
{
"epoch": 0.14198234623476452,
"grad_norm": 2.390953779220581,
"learning_rate": 0.0004998580159849554,
"loss": 5.7212,
"step": 1510
},
{
"epoch": 0.14292262667340536,
"grad_norm": 3.413762092590332,
"learning_rate": 0.000499857075693465,
"loss": 4.7978,
"step": 1520
},
{
"epoch": 0.14386290711204616,
"grad_norm": 4.431119441986084,
"learning_rate": 0.0004998561354019746,
"loss": 4.8096,
"step": 1530
},
{
"epoch": 0.144803187550687,
"grad_norm": 427.5173645019531,
"learning_rate": 0.0004998551951104843,
"loss": 4.6306,
"step": 1540
},
{
"epoch": 0.14574346798932783,
"grad_norm": 7.0377678871154785,
"learning_rate": 0.0004998542548189939,
"loss": 7.1266,
"step": 1550
},
{
"epoch": 0.14668374842796864,
"grad_norm": 5.000734329223633,
"learning_rate": 0.0004998533145275035,
"loss": 5.2694,
"step": 1560
},
{
"epoch": 0.14762402886660947,
"grad_norm": 239.30496215820312,
"learning_rate": 0.0004998523742360131,
"loss": 4.5831,
"step": 1570
},
{
"epoch": 0.1485643093052503,
"grad_norm": 2.7288706302642822,
"learning_rate": 0.0004998514339445228,
"loss": 4.1964,
"step": 1580
},
{
"epoch": 0.1495045897438911,
"grad_norm": 6616.22509765625,
"learning_rate": 0.0004998504936530324,
"loss": 4.9638,
"step": 1590
},
{
"epoch": 0.15044487018253194,
"grad_norm": 5570.67822265625,
"learning_rate": 0.000499849553361542,
"loss": 10.6239,
"step": 1600
},
{
"epoch": 0.15044487018253194,
"eval_loss": 17.817279815673828,
"eval_runtime": 23.3339,
"eval_samples_per_second": 2.143,
"eval_steps_per_second": 0.3,
"step": 1600
},
{
"epoch": 0.15138515062117278,
"grad_norm": 313.7498474121094,
"learning_rate": 0.0004998486130700518,
"loss": 18.1327,
"step": 1610
},
{
"epoch": 0.15232543105981358,
"grad_norm": 333.5940856933594,
"learning_rate": 0.0004998476727785614,
"loss": 10.4126,
"step": 1620
},
{
"epoch": 0.15326571149845442,
"grad_norm": 6.390124797821045,
"learning_rate": 0.000499846732487071,
"loss": 25.1629,
"step": 1630
},
{
"epoch": 0.15420599193709525,
"grad_norm": 328.04443359375,
"learning_rate": 0.0004998457921955807,
"loss": 14.1132,
"step": 1640
},
{
"epoch": 0.15514627237573606,
"grad_norm": 412.7569885253906,
"learning_rate": 0.0004998448519040903,
"loss": 22.5887,
"step": 1650
},
{
"epoch": 0.1560865528143769,
"grad_norm": 56.239742279052734,
"learning_rate": 0.0004998439116125999,
"loss": 16.1849,
"step": 1660
},
{
"epoch": 0.15702683325301772,
"grad_norm": 135.1570281982422,
"learning_rate": 0.0004998429713211096,
"loss": 11.6209,
"step": 1670
},
{
"epoch": 0.15796711369165853,
"grad_norm": 5.806948661804199,
"learning_rate": 0.0004998420310296192,
"loss": 7.57,
"step": 1680
},
{
"epoch": 0.15890739413029936,
"grad_norm": 2.2571475505828857,
"learning_rate": 0.0004998410907381288,
"loss": 6.2436,
"step": 1690
},
{
"epoch": 0.1598476745689402,
"grad_norm": 3.34604549407959,
"learning_rate": 0.0004998401504466384,
"loss": 5.9399,
"step": 1700
},
{
"epoch": 0.1598476745689402,
"eval_loss": 5.785297870635986,
"eval_runtime": 23.2051,
"eval_samples_per_second": 2.155,
"eval_steps_per_second": 0.302,
"step": 1700
},
{
"epoch": 0.160787955007581,
"grad_norm": 132.94261169433594,
"learning_rate": 0.0004998392101551481,
"loss": 8.0906,
"step": 1710
},
{
"epoch": 0.16172823544622184,
"grad_norm": 18.748916625976562,
"learning_rate": 0.0004998382698636577,
"loss": 12.5014,
"step": 1720
},
{
"epoch": 0.16266851588486267,
"grad_norm": 30.582904815673828,
"learning_rate": 0.0004998373295721673,
"loss": 12.5519,
"step": 1730
},
{
"epoch": 0.16360879632350347,
"grad_norm": 4.576255798339844,
"learning_rate": 0.0004998363892806771,
"loss": 5.5364,
"step": 1740
},
{
"epoch": 0.1645490767621443,
"grad_norm": 2.24480938911438,
"learning_rate": 0.0004998354489891867,
"loss": 4.0297,
"step": 1750
},
{
"epoch": 0.16548935720078514,
"grad_norm": 1.5888965129852295,
"learning_rate": 0.0004998345086976963,
"loss": 3.1305,
"step": 1760
},
{
"epoch": 0.16642963763942595,
"grad_norm": 1.2058303356170654,
"learning_rate": 0.0004998335684062059,
"loss": 2.8331,
"step": 1770
},
{
"epoch": 0.16736991807806678,
"grad_norm": 12.554035186767578,
"learning_rate": 0.0004998326281147156,
"loss": 2.664,
"step": 1780
},
{
"epoch": 0.16831019851670762,
"grad_norm": 3.1639647483825684,
"learning_rate": 0.0004998316878232252,
"loss": 4.2002,
"step": 1790
},
{
"epoch": 0.16925047895534842,
"grad_norm": 27.61470603942871,
"learning_rate": 0.0004998307475317348,
"loss": 2.916,
"step": 1800
},
{
"epoch": 0.16925047895534842,
"eval_loss": 2.9362294673919678,
"eval_runtime": 23.3374,
"eval_samples_per_second": 2.142,
"eval_steps_per_second": 0.3,
"step": 1800
},
{
"epoch": 0.17019075939398925,
"grad_norm": 2.170280694961548,
"learning_rate": 0.0004998298072402445,
"loss": 2.6845,
"step": 1810
},
{
"epoch": 0.1711310398326301,
"grad_norm": 0.7772692441940308,
"learning_rate": 0.0004998288669487541,
"loss": 2.5667,
"step": 1820
},
{
"epoch": 0.1720713202712709,
"grad_norm": 3.4145731925964355,
"learning_rate": 0.0004998279266572638,
"loss": 2.5062,
"step": 1830
},
{
"epoch": 0.17301160070991173,
"grad_norm": 1.2681496143341064,
"learning_rate": 0.0004998269863657734,
"loss": 2.465,
"step": 1840
},
{
"epoch": 0.17395188114855256,
"grad_norm": 1.6735230684280396,
"learning_rate": 0.000499826046074283,
"loss": 2.4389,
"step": 1850
},
{
"epoch": 0.17489216158719337,
"grad_norm": 8.941509246826172,
"learning_rate": 0.0004998251057827926,
"loss": 2.4641,
"step": 1860
},
{
"epoch": 0.1758324420258342,
"grad_norm": 1.6397817134857178,
"learning_rate": 0.0004998241654913024,
"loss": 2.6467,
"step": 1870
},
{
"epoch": 0.17677272246447504,
"grad_norm": 1.612599492073059,
"learning_rate": 0.000499823225199812,
"loss": 2.4431,
"step": 1880
},
{
"epoch": 0.17771300290311584,
"grad_norm": 3.2251834869384766,
"learning_rate": 0.0004998222849083216,
"loss": 2.1863,
"step": 1890
},
{
"epoch": 0.17865328334175667,
"grad_norm": 0.8718411326408386,
"learning_rate": 0.0004998213446168312,
"loss": 2.1126,
"step": 1900
},
{
"epoch": 0.17865328334175667,
"eval_loss": 2.0840113162994385,
"eval_runtime": 23.3136,
"eval_samples_per_second": 2.145,
"eval_steps_per_second": 0.3,
"step": 1900
},
{
"epoch": 0.1795935637803975,
"grad_norm": 1.0821764469146729,
"learning_rate": 0.0004998204043253409,
"loss": 2.0873,
"step": 1910
},
{
"epoch": 0.18053384421903834,
"grad_norm": 0.78948575258255,
"learning_rate": 0.0004998194640338505,
"loss": 2.0275,
"step": 1920
},
{
"epoch": 0.18147412465767915,
"grad_norm": 1.659589171409607,
"learning_rate": 0.0004998185237423601,
"loss": 1.9527,
"step": 1930
},
{
"epoch": 0.18241440509631998,
"grad_norm": 1.2942698001861572,
"learning_rate": 0.0004998175834508698,
"loss": 2.0333,
"step": 1940
},
{
"epoch": 0.18335468553496082,
"grad_norm": 1.4142636060714722,
"learning_rate": 0.0004998166431593794,
"loss": 2.1385,
"step": 1950
},
{
"epoch": 0.18429496597360162,
"grad_norm": 1.0006928443908691,
"learning_rate": 0.000499815702867889,
"loss": 2.1326,
"step": 1960
},
{
"epoch": 0.18523524641224245,
"grad_norm": 2.224665641784668,
"learning_rate": 0.0004998147625763987,
"loss": 1.9148,
"step": 1970
},
{
"epoch": 0.1861755268508833,
"grad_norm": 1.2691782712936401,
"learning_rate": 0.0004998138222849083,
"loss": 1.9243,
"step": 1980
},
{
"epoch": 0.1871158072895241,
"grad_norm": 0.6900967955589294,
"learning_rate": 0.000499812881993418,
"loss": 2.0292,
"step": 1990
},
{
"epoch": 0.18805608772816493,
"grad_norm": 0.5726048350334167,
"learning_rate": 0.0004998119417019277,
"loss": 1.9845,
"step": 2000
},
{
"epoch": 0.18805608772816493,
"eval_loss": 1.867889404296875,
"eval_runtime": 23.2923,
"eval_samples_per_second": 2.147,
"eval_steps_per_second": 0.301,
"step": 2000
},
{
"epoch": 0.18899636816680576,
"grad_norm": 1.1626911163330078,
"learning_rate": 0.0004998110014104373,
"loss": 1.8727,
"step": 2010
},
{
"epoch": 0.18993664860544657,
"grad_norm": 1.16129732131958,
"learning_rate": 0.0004998100611189469,
"loss": 1.8666,
"step": 2020
},
{
"epoch": 0.1908769290440874,
"grad_norm": 0.680419921875,
"learning_rate": 0.0004998091208274565,
"loss": 1.7736,
"step": 2030
},
{
"epoch": 0.19181720948272823,
"grad_norm": 0.830495297908783,
"learning_rate": 0.0004998081805359662,
"loss": 1.8423,
"step": 2040
},
{
"epoch": 0.19275748992136904,
"grad_norm": 1.4571064710617065,
"learning_rate": 0.0004998072402444758,
"loss": 1.7769,
"step": 2050
},
{
"epoch": 0.19369777036000987,
"grad_norm": 1.1363842487335205,
"learning_rate": 0.0004998062999529854,
"loss": 1.8004,
"step": 2060
},
{
"epoch": 0.1946380507986507,
"grad_norm": 0.9440115094184875,
"learning_rate": 0.0004998053596614951,
"loss": 1.809,
"step": 2070
},
{
"epoch": 0.19557833123729151,
"grad_norm": 0.7449987530708313,
"learning_rate": 0.0004998044193700047,
"loss": 1.81,
"step": 2080
},
{
"epoch": 0.19651861167593235,
"grad_norm": 2.2598133087158203,
"learning_rate": 0.0004998034790785143,
"loss": 1.7547,
"step": 2090
},
{
"epoch": 0.19745889211457318,
"grad_norm": 0.42398279905319214,
"learning_rate": 0.0004998025387870239,
"loss": 1.7931,
"step": 2100
},
{
"epoch": 0.19745889211457318,
"eval_loss": 1.7216485738754272,
"eval_runtime": 23.3215,
"eval_samples_per_second": 2.144,
"eval_steps_per_second": 0.3,
"step": 2100
},
{
"epoch": 0.198399172553214,
"grad_norm": 0.644854724407196,
"learning_rate": 0.0004998015984955336,
"loss": 1.7352,
"step": 2110
},
{
"epoch": 0.19933945299185482,
"grad_norm": 0.8509730696678162,
"learning_rate": 0.0004998006582040432,
"loss": 1.7574,
"step": 2120
},
{
"epoch": 0.20027973343049565,
"grad_norm": 3.4616425037384033,
"learning_rate": 0.000499799717912553,
"loss": 1.7036,
"step": 2130
},
{
"epoch": 0.20122001386913646,
"grad_norm": 3.8178634643554688,
"learning_rate": 0.0004997987776210626,
"loss": 1.8085,
"step": 2140
},
{
"epoch": 0.2021602943077773,
"grad_norm": 2.551295042037964,
"learning_rate": 0.0004997978373295722,
"loss": 1.909,
"step": 2150
},
{
"epoch": 0.20310057474641813,
"grad_norm": 1.2975966930389404,
"learning_rate": 0.0004997968970380818,
"loss": 1.8048,
"step": 2160
},
{
"epoch": 0.20404085518505893,
"grad_norm": 5.3859782218933105,
"learning_rate": 0.0004997959567465915,
"loss": 1.7722,
"step": 2170
},
{
"epoch": 0.20498113562369977,
"grad_norm": 1.2567672729492188,
"learning_rate": 0.0004997950164551011,
"loss": 1.8425,
"step": 2180
},
{
"epoch": 0.2059214160623406,
"grad_norm": 0.9618350267410278,
"learning_rate": 0.0004997940761636107,
"loss": 1.732,
"step": 2190
},
{
"epoch": 0.2068616965009814,
"grad_norm": 0.9928431510925293,
"learning_rate": 0.0004997931358721204,
"loss": 1.738,
"step": 2200
},
{
"epoch": 0.2068616965009814,
"eval_loss": 1.6831611394882202,
"eval_runtime": 23.4087,
"eval_samples_per_second": 2.136,
"eval_steps_per_second": 0.299,
"step": 2200
},
{
"epoch": 0.20780197693962224,
"grad_norm": 1.1968138217926025,
"learning_rate": 0.00049979219558063,
"loss": 1.8048,
"step": 2210
},
{
"epoch": 0.20874225737826307,
"grad_norm": 7.316364765167236,
"learning_rate": 0.0004997912552891396,
"loss": 1.7093,
"step": 2220
},
{
"epoch": 0.20968253781690388,
"grad_norm": 3.233787775039673,
"learning_rate": 0.0004997903149976492,
"loss": 1.784,
"step": 2230
},
{
"epoch": 0.2106228182555447,
"grad_norm": 5.9083943367004395,
"learning_rate": 0.0004997893747061589,
"loss": 1.7086,
"step": 2240
},
{
"epoch": 0.21156309869418555,
"grad_norm": 1.5619888305664062,
"learning_rate": 0.0004997884344146686,
"loss": 1.731,
"step": 2250
},
{
"epoch": 0.21250337913282635,
"grad_norm": 7.746670722961426,
"learning_rate": 0.0004997874941231782,
"loss": 1.743,
"step": 2260
},
{
"epoch": 0.2134436595714672,
"grad_norm": 1.831390380859375,
"learning_rate": 0.0004997865538316878,
"loss": 1.737,
"step": 2270
},
{
"epoch": 0.21438394001010802,
"grad_norm": 1.6762924194335938,
"learning_rate": 0.0004997856135401975,
"loss": 1.7683,
"step": 2280
},
{
"epoch": 0.21532422044874883,
"grad_norm": 1.1603010892868042,
"learning_rate": 0.0004997846732487071,
"loss": 1.6562,
"step": 2290
},
{
"epoch": 0.21626450088738966,
"grad_norm": 2.6877505779266357,
"learning_rate": 0.0004997837329572168,
"loss": 1.6558,
"step": 2300
},
{
"epoch": 0.21626450088738966,
"eval_loss": 1.589247465133667,
"eval_runtime": 23.3396,
"eval_samples_per_second": 2.142,
"eval_steps_per_second": 0.3,
"step": 2300
},
{
"epoch": 0.2172047813260305,
"grad_norm": 1.8261255025863647,
"learning_rate": 0.0004997827926657264,
"loss": 1.5507,
"step": 2310
},
{
"epoch": 0.2181450617646713,
"grad_norm": 2.71986722946167,
"learning_rate": 0.000499781852374236,
"loss": 1.6683,
"step": 2320
},
{
"epoch": 0.21908534220331213,
"grad_norm": 5.95999002456665,
"learning_rate": 0.0004997809120827457,
"loss": 1.6454,
"step": 2330
},
{
"epoch": 0.22002562264195297,
"grad_norm": 1.8024688959121704,
"learning_rate": 0.0004997799717912553,
"loss": 1.555,
"step": 2340
},
{
"epoch": 0.2209659030805938,
"grad_norm": 3.2990245819091797,
"learning_rate": 0.0004997790314997649,
"loss": 1.5852,
"step": 2350
},
{
"epoch": 0.2219061835192346,
"grad_norm": 1.2326282262802124,
"learning_rate": 0.0004997780912082745,
"loss": 1.5787,
"step": 2360
},
{
"epoch": 0.22284646395787544,
"grad_norm": 1.383510708808899,
"learning_rate": 0.0004997771509167842,
"loss": 1.6182,
"step": 2370
},
{
"epoch": 0.22378674439651627,
"grad_norm": 0.9165681004524231,
"learning_rate": 0.0004997762106252939,
"loss": 1.6288,
"step": 2380
},
{
"epoch": 0.22472702483515708,
"grad_norm": 0.948382556438446,
"learning_rate": 0.0004997752703338035,
"loss": 1.531,
"step": 2390
},
{
"epoch": 0.2256673052737979,
"grad_norm": 2.0418496131896973,
"learning_rate": 0.0004997743300423132,
"loss": 1.5406,
"step": 2400
},
{
"epoch": 0.2256673052737979,
"eval_loss": 1.5306854248046875,
"eval_runtime": 23.3808,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.299,
"step": 2400
},
{
"epoch": 0.22660758571243875,
"grad_norm": 1.046777606010437,
"learning_rate": 0.0004997733897508228,
"loss": 1.5605,
"step": 2410
},
{
"epoch": 0.22754786615107955,
"grad_norm": 0.8935179710388184,
"learning_rate": 0.0004997724494593324,
"loss": 1.6534,
"step": 2420
},
{
"epoch": 0.2284881465897204,
"grad_norm": 0.9660417437553406,
"learning_rate": 0.000499771509167842,
"loss": 1.596,
"step": 2430
},
{
"epoch": 0.22942842702836122,
"grad_norm": 2.3814828395843506,
"learning_rate": 0.0004997705688763517,
"loss": 1.597,
"step": 2440
},
{
"epoch": 0.23036870746700203,
"grad_norm": 0.8121759295463562,
"learning_rate": 0.0004997696285848613,
"loss": 1.5244,
"step": 2450
},
{
"epoch": 0.23130898790564286,
"grad_norm": 0.5510159134864807,
"learning_rate": 0.000499768688293371,
"loss": 1.428,
"step": 2460
},
{
"epoch": 0.2322492683442837,
"grad_norm": 1.1485031843185425,
"learning_rate": 0.0004997677480018806,
"loss": 1.4615,
"step": 2470
},
{
"epoch": 0.2331895487829245,
"grad_norm": 0.5952764749526978,
"learning_rate": 0.0004997668077103902,
"loss": 1.4398,
"step": 2480
},
{
"epoch": 0.23412982922156533,
"grad_norm": 0.9520612955093384,
"learning_rate": 0.0004997658674188998,
"loss": 1.4441,
"step": 2490
},
{
"epoch": 0.23507010966020617,
"grad_norm": 0.6743489503860474,
"learning_rate": 0.0004997649271274095,
"loss": 1.4732,
"step": 2500
},
{
"epoch": 0.23507010966020617,
"eval_loss": 1.4287002086639404,
"eval_runtime": 23.4405,
"eval_samples_per_second": 2.133,
"eval_steps_per_second": 0.299,
"step": 2500
}
],
"logging_steps": 10,
"max_steps": 5317500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.43124234141696e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}