Qwen2.5-Math-7B-Instruct-SFT / trainer_state.json
edbeeching's picture
edbeeching HF Staff
Model save
bbdcdf2 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.983240223463687,
"eval_steps": 500,
"global_step": 402,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.037243947858473,
"grad_norm": 44.700423237162,
"learning_rate": 1.2195121951219514e-06,
"loss": 8.9389,
"num_tokens": 20900161.0,
"step": 5
},
{
"epoch": 0.074487895716946,
"grad_norm": 39.97441938804836,
"learning_rate": 2.4390243902439027e-06,
"loss": 8.6107,
"num_tokens": 41840347.0,
"step": 10
},
{
"epoch": 0.11173184357541899,
"grad_norm": 28.68948027128856,
"learning_rate": 3.6585365853658537e-06,
"loss": 6.8935,
"num_tokens": 62811867.0,
"step": 15
},
{
"epoch": 0.148975791433892,
"grad_norm": 11.393457880936973,
"learning_rate": 4.8780487804878055e-06,
"loss": 5.0942,
"num_tokens": 83733147.0,
"step": 20
},
{
"epoch": 0.186219739292365,
"grad_norm": 6.569246393996523,
"learning_rate": 6.0975609756097564e-06,
"loss": 4.2535,
"num_tokens": 104633209.0,
"step": 25
},
{
"epoch": 0.22346368715083798,
"grad_norm": 3.904749769413194,
"learning_rate": 7.317073170731707e-06,
"loss": 3.7968,
"num_tokens": 125472598.0,
"step": 30
},
{
"epoch": 0.260707635009311,
"grad_norm": 2.378812176791007,
"learning_rate": 8.536585365853658e-06,
"loss": 3.3338,
"num_tokens": 146395759.0,
"step": 35
},
{
"epoch": 0.297951582867784,
"grad_norm": 17.248964649482218,
"learning_rate": 9.756097560975611e-06,
"loss": 2.7105,
"num_tokens": 167334938.0,
"step": 40
},
{
"epoch": 0.33519553072625696,
"grad_norm": 1.6088393318851981,
"learning_rate": 9.889196675900278e-06,
"loss": 2.0834,
"num_tokens": 188300024.0,
"step": 45
},
{
"epoch": 0.37243947858473,
"grad_norm": 1.0479372877130533,
"learning_rate": 9.750692520775623e-06,
"loss": 1.512,
"num_tokens": 209250087.0,
"step": 50
},
{
"epoch": 0.409683426443203,
"grad_norm": 0.5797721215381728,
"learning_rate": 9.61218836565097e-06,
"loss": 1.1791,
"num_tokens": 230103075.0,
"step": 55
},
{
"epoch": 0.44692737430167595,
"grad_norm": 0.40789177100348467,
"learning_rate": 9.473684210526315e-06,
"loss": 1.0089,
"num_tokens": 250974570.0,
"step": 60
},
{
"epoch": 0.48417132216014896,
"grad_norm": 0.35568228307746297,
"learning_rate": 9.335180055401662e-06,
"loss": 0.9109,
"num_tokens": 271920522.0,
"step": 65
},
{
"epoch": 0.521415270018622,
"grad_norm": 0.2773679085093437,
"learning_rate": 9.19667590027701e-06,
"loss": 0.8572,
"num_tokens": 292817720.0,
"step": 70
},
{
"epoch": 0.5586592178770949,
"grad_norm": 0.23151040916631538,
"learning_rate": 9.058171745152356e-06,
"loss": 0.8121,
"num_tokens": 313711263.0,
"step": 75
},
{
"epoch": 0.595903165735568,
"grad_norm": 0.21004592737808472,
"learning_rate": 8.919667590027701e-06,
"loss": 0.7738,
"num_tokens": 334628125.0,
"step": 80
},
{
"epoch": 0.633147113594041,
"grad_norm": 0.18493389037034969,
"learning_rate": 8.781163434903048e-06,
"loss": 0.7537,
"num_tokens": 355507604.0,
"step": 85
},
{
"epoch": 0.6703910614525139,
"grad_norm": 0.19478442065788568,
"learning_rate": 8.642659279778393e-06,
"loss": 0.7295,
"num_tokens": 376376000.0,
"step": 90
},
{
"epoch": 0.707635009310987,
"grad_norm": 0.17818497873491812,
"learning_rate": 8.50415512465374e-06,
"loss": 0.7044,
"num_tokens": 397347520.0,
"step": 95
},
{
"epoch": 0.74487895716946,
"grad_norm": 0.15661959846871895,
"learning_rate": 8.365650969529087e-06,
"loss": 0.6905,
"num_tokens": 418243978.0,
"step": 100
},
{
"epoch": 0.7821229050279329,
"grad_norm": 0.1583248766438081,
"learning_rate": 8.227146814404434e-06,
"loss": 0.678,
"num_tokens": 439124783.0,
"step": 105
},
{
"epoch": 0.819366852886406,
"grad_norm": 0.1385769107129147,
"learning_rate": 8.088642659279779e-06,
"loss": 0.6577,
"num_tokens": 460067060.0,
"step": 110
},
{
"epoch": 0.8566108007448789,
"grad_norm": 0.1495271309048296,
"learning_rate": 7.950138504155124e-06,
"loss": 0.657,
"num_tokens": 480973564.0,
"step": 115
},
{
"epoch": 0.8938547486033519,
"grad_norm": 0.14473840607744518,
"learning_rate": 7.811634349030471e-06,
"loss": 0.6384,
"num_tokens": 501920213.0,
"step": 120
},
{
"epoch": 0.931098696461825,
"grad_norm": 0.1422179408253768,
"learning_rate": 7.673130193905818e-06,
"loss": 0.6312,
"num_tokens": 522882452.0,
"step": 125
},
{
"epoch": 0.9683426443202979,
"grad_norm": 0.1385480000357401,
"learning_rate": 7.534626038781164e-06,
"loss": 0.6267,
"num_tokens": 543853972.0,
"step": 130
},
{
"epoch": 1.0,
"grad_norm": 0.11986499724240843,
"learning_rate": 7.396121883656511e-06,
"loss": 0.6186,
"num_tokens": 561623820.0,
"step": 135
},
{
"epoch": 1.037243947858473,
"grad_norm": 0.12711165190299312,
"learning_rate": 7.257617728531856e-06,
"loss": 0.6108,
"num_tokens": 582493384.0,
"step": 140
},
{
"epoch": 1.074487895716946,
"grad_norm": 0.13843436006681253,
"learning_rate": 7.119113573407203e-06,
"loss": 0.6071,
"num_tokens": 603371436.0,
"step": 145
},
{
"epoch": 1.111731843575419,
"grad_norm": 0.12877141358698244,
"learning_rate": 6.980609418282549e-06,
"loss": 0.5969,
"num_tokens": 624211040.0,
"step": 150
},
{
"epoch": 1.148975791433892,
"grad_norm": 0.11172803837159029,
"learning_rate": 6.842105263157896e-06,
"loss": 0.5961,
"num_tokens": 645080464.0,
"step": 155
},
{
"epoch": 1.186219739292365,
"grad_norm": 0.12257878721308095,
"learning_rate": 6.703601108033242e-06,
"loss": 0.5961,
"num_tokens": 665984359.0,
"step": 160
},
{
"epoch": 1.223463687150838,
"grad_norm": 0.12182656514914046,
"learning_rate": 6.565096952908588e-06,
"loss": 0.5903,
"num_tokens": 686922696.0,
"step": 165
},
{
"epoch": 1.260707635009311,
"grad_norm": 0.11249855500170149,
"learning_rate": 6.426592797783934e-06,
"loss": 0.5844,
"num_tokens": 707833603.0,
"step": 170
},
{
"epoch": 1.2979515828677841,
"grad_norm": 0.12449747176048374,
"learning_rate": 6.2880886426592805e-06,
"loss": 0.5803,
"num_tokens": 728796956.0,
"step": 175
},
{
"epoch": 1.3351955307262569,
"grad_norm": 0.12431311388507522,
"learning_rate": 6.1495844875346266e-06,
"loss": 0.5769,
"num_tokens": 749639247.0,
"step": 180
},
{
"epoch": 1.37243947858473,
"grad_norm": 0.11884858911346256,
"learning_rate": 6.011080332409973e-06,
"loss": 0.5769,
"num_tokens": 770548626.0,
"step": 185
},
{
"epoch": 1.409683426443203,
"grad_norm": 0.12315392451802569,
"learning_rate": 5.8725761772853194e-06,
"loss": 0.5738,
"num_tokens": 791439887.0,
"step": 190
},
{
"epoch": 1.446927374301676,
"grad_norm": 0.14923588588117534,
"learning_rate": 5.734072022160665e-06,
"loss": 0.5666,
"num_tokens": 812411407.0,
"step": 195
},
{
"epoch": 1.484171322160149,
"grad_norm": 0.12273387220504116,
"learning_rate": 5.5955678670360115e-06,
"loss": 0.5685,
"num_tokens": 833361095.0,
"step": 200
},
{
"epoch": 1.5214152700186219,
"grad_norm": 0.11370568104002803,
"learning_rate": 5.4570637119113575e-06,
"loss": 0.5675,
"num_tokens": 854291659.0,
"step": 205
},
{
"epoch": 1.558659217877095,
"grad_norm": 0.11804662768074886,
"learning_rate": 5.318559556786704e-06,
"loss": 0.5638,
"num_tokens": 875218064.0,
"step": 210
},
{
"epoch": 1.595903165735568,
"grad_norm": 0.11044864275419868,
"learning_rate": 5.180055401662051e-06,
"loss": 0.5625,
"num_tokens": 896144111.0,
"step": 215
},
{
"epoch": 1.633147113594041,
"grad_norm": 0.1185174239784444,
"learning_rate": 5.041551246537396e-06,
"loss": 0.564,
"num_tokens": 917103979.0,
"step": 220
},
{
"epoch": 1.670391061452514,
"grad_norm": 0.12026499691941232,
"learning_rate": 4.903047091412742e-06,
"loss": 0.5554,
"num_tokens": 938069583.0,
"step": 225
},
{
"epoch": 1.7076350093109869,
"grad_norm": 0.11487320551484248,
"learning_rate": 4.764542936288089e-06,
"loss": 0.5498,
"num_tokens": 958987689.0,
"step": 230
},
{
"epoch": 1.74487895716946,
"grad_norm": 0.121932717620048,
"learning_rate": 4.626038781163435e-06,
"loss": 0.558,
"num_tokens": 979951299.0,
"step": 235
},
{
"epoch": 1.7821229050279328,
"grad_norm": 0.11603410597547152,
"learning_rate": 4.487534626038781e-06,
"loss": 0.5568,
"num_tokens": 1000838678.0,
"step": 240
},
{
"epoch": 1.819366852886406,
"grad_norm": 0.10726484051752529,
"learning_rate": 4.349030470914128e-06,
"loss": 0.5485,
"num_tokens": 1021747806.0,
"step": 245
},
{
"epoch": 1.856610800744879,
"grad_norm": 0.13127354938696664,
"learning_rate": 4.210526315789474e-06,
"loss": 0.551,
"num_tokens": 1042682828.0,
"step": 250
},
{
"epoch": 1.893854748603352,
"grad_norm": 0.13042928050538638,
"learning_rate": 4.07202216066482e-06,
"loss": 0.5482,
"num_tokens": 1063637713.0,
"step": 255
},
{
"epoch": 1.931098696461825,
"grad_norm": 0.10378281749835497,
"learning_rate": 3.933518005540167e-06,
"loss": 0.5492,
"num_tokens": 1084551621.0,
"step": 260
},
{
"epoch": 1.9683426443202978,
"grad_norm": 0.11405027703876851,
"learning_rate": 3.7950138504155126e-06,
"loss": 0.5432,
"num_tokens": 1105456616.0,
"step": 265
},
{
"epoch": 2.0,
"grad_norm": 0.1188033142047116,
"learning_rate": 3.656509695290859e-06,
"loss": 0.5457,
"num_tokens": 1123247640.0,
"step": 270
},
{
"epoch": 2.037243947858473,
"grad_norm": 0.11511290129075778,
"learning_rate": 3.5180055401662054e-06,
"loss": 0.5416,
"num_tokens": 1144178840.0,
"step": 275
},
{
"epoch": 2.074487895716946,
"grad_norm": 0.11812786472280946,
"learning_rate": 3.3795013850415515e-06,
"loss": 0.5418,
"num_tokens": 1165088860.0,
"step": 280
},
{
"epoch": 2.111731843575419,
"grad_norm": 0.10270714280099584,
"learning_rate": 3.240997229916898e-06,
"loss": 0.5409,
"num_tokens": 1185985413.0,
"step": 285
},
{
"epoch": 2.148975791433892,
"grad_norm": 0.1096828174375488,
"learning_rate": 3.102493074792244e-06,
"loss": 0.5389,
"num_tokens": 1206900439.0,
"step": 290
},
{
"epoch": 2.186219739292365,
"grad_norm": 0.12117958971385849,
"learning_rate": 2.9639889196675903e-06,
"loss": 0.5373,
"num_tokens": 1227788909.0,
"step": 295
},
{
"epoch": 2.223463687150838,
"grad_norm": 0.10303636082477868,
"learning_rate": 2.8254847645429368e-06,
"loss": 0.5422,
"num_tokens": 1248631579.0,
"step": 300
},
{
"epoch": 2.260707635009311,
"grad_norm": 0.1055723015177993,
"learning_rate": 2.686980609418283e-06,
"loss": 0.5338,
"num_tokens": 1269568695.0,
"step": 305
},
{
"epoch": 2.297951582867784,
"grad_norm": 0.09853558468957078,
"learning_rate": 2.5484764542936292e-06,
"loss": 0.5331,
"num_tokens": 1290448726.0,
"step": 310
},
{
"epoch": 2.335195530726257,
"grad_norm": 0.09780439525165775,
"learning_rate": 2.4099722991689752e-06,
"loss": 0.5373,
"num_tokens": 1311374206.0,
"step": 315
},
{
"epoch": 2.37243947858473,
"grad_norm": 0.11343316486691782,
"learning_rate": 2.2714681440443217e-06,
"loss": 0.5324,
"num_tokens": 1332316908.0,
"step": 320
},
{
"epoch": 2.4096834264432028,
"grad_norm": 0.09861823968974112,
"learning_rate": 2.1329639889196677e-06,
"loss": 0.5291,
"num_tokens": 1353285538.0,
"step": 325
},
{
"epoch": 2.446927374301676,
"grad_norm": 0.09761284938648966,
"learning_rate": 1.994459833795014e-06,
"loss": 0.5284,
"num_tokens": 1374171123.0,
"step": 330
},
{
"epoch": 2.484171322160149,
"grad_norm": 0.10068212333923239,
"learning_rate": 1.8559556786703603e-06,
"loss": 0.5315,
"num_tokens": 1395101851.0,
"step": 335
},
{
"epoch": 2.521415270018622,
"grad_norm": 0.09595198002866547,
"learning_rate": 1.7174515235457066e-06,
"loss": 0.5274,
"num_tokens": 1416073371.0,
"step": 340
},
{
"epoch": 2.558659217877095,
"grad_norm": 0.09486136119692574,
"learning_rate": 1.5789473684210526e-06,
"loss": 0.5308,
"num_tokens": 1437018467.0,
"step": 345
},
{
"epoch": 2.5959031657355682,
"grad_norm": 0.09700270169515557,
"learning_rate": 1.4404432132963992e-06,
"loss": 0.5268,
"num_tokens": 1457919336.0,
"step": 350
},
{
"epoch": 2.633147113594041,
"grad_norm": 0.09888912058958187,
"learning_rate": 1.3019390581717452e-06,
"loss": 0.5296,
"num_tokens": 1478880064.0,
"step": 355
},
{
"epoch": 2.6703910614525137,
"grad_norm": 0.09588099838616412,
"learning_rate": 1.1634349030470915e-06,
"loss": 0.5336,
"num_tokens": 1499763351.0,
"step": 360
},
{
"epoch": 2.707635009310987,
"grad_norm": 0.08937394796037,
"learning_rate": 1.024930747922438e-06,
"loss": 0.528,
"num_tokens": 1520641603.0,
"step": 365
},
{
"epoch": 2.74487895716946,
"grad_norm": 0.09490522714269171,
"learning_rate": 8.86426592797784e-07,
"loss": 0.5307,
"num_tokens": 1541584460.0,
"step": 370
},
{
"epoch": 2.782122905027933,
"grad_norm": 0.09709816809585911,
"learning_rate": 7.479224376731302e-07,
"loss": 0.5262,
"num_tokens": 1562543224.0,
"step": 375
},
{
"epoch": 2.819366852886406,
"grad_norm": 0.0908607538920962,
"learning_rate": 6.094182825484765e-07,
"loss": 0.5284,
"num_tokens": 1583428775.0,
"step": 380
},
{
"epoch": 2.856610800744879,
"grad_norm": 0.08729376484195411,
"learning_rate": 4.7091412742382274e-07,
"loss": 0.5311,
"num_tokens": 1604332118.0,
"step": 385
},
{
"epoch": 2.893854748603352,
"grad_norm": 0.08813530444241563,
"learning_rate": 3.32409972299169e-07,
"loss": 0.5274,
"num_tokens": 1625289686.0,
"step": 390
},
{
"epoch": 2.931098696461825,
"grad_norm": 0.08717195128913408,
"learning_rate": 1.9390581717451524e-07,
"loss": 0.5278,
"num_tokens": 1646192159.0,
"step": 395
},
{
"epoch": 2.968342644320298,
"grad_norm": 0.08486932600724212,
"learning_rate": 5.54016620498615e-08,
"loss": 0.5278,
"num_tokens": 1667121360.0,
"step": 400
},
{
"epoch": 2.983240223463687,
"num_tokens": 1675463706.0,
"step": 402,
"total_flos": 6.2367671592178156e+19,
"train_loss": 1.1144484827174477,
"train_runtime": 39508.3376,
"train_samples_per_second": 1.304,
"train_steps_per_second": 0.01
}
],
"logging_steps": 5,
"max_steps": 402,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.2367671592178156e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}