jekunz's picture
Upload folder using huggingface_hub
fa417f6 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 500,
"global_step": 6880,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11627906976744186,
"grad_norm": 1.6077862977981567,
"learning_rate": 1.1511627906976744e-05,
"loss": 7.0526,
"step": 100
},
{
"epoch": 0.23255813953488372,
"grad_norm": 0.98894202709198,
"learning_rate": 2.313953488372093e-05,
"loss": 5.427,
"step": 200
},
{
"epoch": 0.3488372093023256,
"grad_norm": 0.7544777989387512,
"learning_rate": 3.4767441860465115e-05,
"loss": 4.7742,
"step": 300
},
{
"epoch": 0.46511627906976744,
"grad_norm": 1.1207612752914429,
"learning_rate": 4.639534883720931e-05,
"loss": 4.5344,
"step": 400
},
{
"epoch": 0.5813953488372093,
"grad_norm": 0.8379786610603333,
"learning_rate": 5.802325581395349e-05,
"loss": 4.3805,
"step": 500
},
{
"epoch": 0.6976744186046512,
"grad_norm": 0.7775167226791382,
"learning_rate": 6.965116279069767e-05,
"loss": 4.1699,
"step": 600
},
{
"epoch": 0.813953488372093,
"grad_norm": 0.7881819009780884,
"learning_rate": 8.127906976744186e-05,
"loss": 4.0952,
"step": 700
},
{
"epoch": 0.9302325581395349,
"grad_norm": 0.7651063799858093,
"learning_rate": 9.290697674418604e-05,
"loss": 4.0067,
"step": 800
},
{
"epoch": 1.0,
"eval_loss": 3.915109872817993,
"eval_runtime": 7.8813,
"eval_samples_per_second": 388.009,
"eval_steps_per_second": 48.596,
"step": 860
},
{
"epoch": 1.0465116279069768,
"grad_norm": 0.6977295279502869,
"learning_rate": 9.949612403100775e-05,
"loss": 3.855,
"step": 900
},
{
"epoch": 1.1627906976744187,
"grad_norm": 0.8410159349441528,
"learning_rate": 9.820413436692507e-05,
"loss": 3.8458,
"step": 1000
},
{
"epoch": 1.2790697674418605,
"grad_norm": 0.7741426825523376,
"learning_rate": 9.691214470284238e-05,
"loss": 3.7573,
"step": 1100
},
{
"epoch": 1.3953488372093024,
"grad_norm": 0.7446125745773315,
"learning_rate": 9.56201550387597e-05,
"loss": 3.6336,
"step": 1200
},
{
"epoch": 1.5116279069767442,
"grad_norm": 0.900859534740448,
"learning_rate": 9.4328165374677e-05,
"loss": 3.6147,
"step": 1300
},
{
"epoch": 1.627906976744186,
"grad_norm": 0.8706986308097839,
"learning_rate": 9.303617571059432e-05,
"loss": 3.5227,
"step": 1400
},
{
"epoch": 1.744186046511628,
"grad_norm": 0.9106377959251404,
"learning_rate": 9.174418604651163e-05,
"loss": 3.4545,
"step": 1500
},
{
"epoch": 1.8604651162790697,
"grad_norm": 0.7554797530174255,
"learning_rate": 9.045219638242895e-05,
"loss": 3.4257,
"step": 1600
},
{
"epoch": 1.9767441860465116,
"grad_norm": 0.8181759119033813,
"learning_rate": 8.916020671834626e-05,
"loss": 3.3851,
"step": 1700
},
{
"epoch": 2.0,
"eval_loss": 3.351266860961914,
"eval_runtime": 7.8508,
"eval_samples_per_second": 389.516,
"eval_steps_per_second": 48.785,
"step": 1720
},
{
"epoch": 2.0930232558139537,
"grad_norm": 0.783263623714447,
"learning_rate": 8.786821705426357e-05,
"loss": 3.2934,
"step": 1800
},
{
"epoch": 2.2093023255813953,
"grad_norm": 0.8410254716873169,
"learning_rate": 8.657622739018088e-05,
"loss": 3.2805,
"step": 1900
},
{
"epoch": 2.3255813953488373,
"grad_norm": 0.829415500164032,
"learning_rate": 8.52842377260982e-05,
"loss": 3.2845,
"step": 2000
},
{
"epoch": 2.441860465116279,
"grad_norm": 0.8594213128089905,
"learning_rate": 8.399224806201551e-05,
"loss": 3.1855,
"step": 2100
},
{
"epoch": 2.558139534883721,
"grad_norm": 1.0678484439849854,
"learning_rate": 8.270025839793282e-05,
"loss": 3.1735,
"step": 2200
},
{
"epoch": 2.6744186046511627,
"grad_norm": 0.7645172476768494,
"learning_rate": 8.140826873385013e-05,
"loss": 3.1673,
"step": 2300
},
{
"epoch": 2.7906976744186047,
"grad_norm": 0.838912844657898,
"learning_rate": 8.011627906976745e-05,
"loss": 3.1312,
"step": 2400
},
{
"epoch": 2.9069767441860463,
"grad_norm": 0.8251073360443115,
"learning_rate": 7.882428940568476e-05,
"loss": 3.0803,
"step": 2500
},
{
"epoch": 3.0,
"eval_loss": 3.090207815170288,
"eval_runtime": 7.8554,
"eval_samples_per_second": 389.286,
"eval_steps_per_second": 48.756,
"step": 2580
},
{
"epoch": 3.0232558139534884,
"grad_norm": 0.8788666725158691,
"learning_rate": 7.753229974160207e-05,
"loss": 3.05,
"step": 2600
},
{
"epoch": 3.13953488372093,
"grad_norm": 0.8054720163345337,
"learning_rate": 7.624031007751938e-05,
"loss": 2.9595,
"step": 2700
},
{
"epoch": 3.255813953488372,
"grad_norm": 0.8272735476493835,
"learning_rate": 7.49483204134367e-05,
"loss": 2.9702,
"step": 2800
},
{
"epoch": 3.3720930232558137,
"grad_norm": 0.8935261368751526,
"learning_rate": 7.365633074935401e-05,
"loss": 2.971,
"step": 2900
},
{
"epoch": 3.488372093023256,
"grad_norm": 0.8290767073631287,
"learning_rate": 7.236434108527133e-05,
"loss": 2.9443,
"step": 3000
},
{
"epoch": 3.604651162790698,
"grad_norm": 0.891353964805603,
"learning_rate": 7.107235142118863e-05,
"loss": 2.9608,
"step": 3100
},
{
"epoch": 3.7209302325581395,
"grad_norm": 1.282278299331665,
"learning_rate": 6.978036175710595e-05,
"loss": 2.9595,
"step": 3200
},
{
"epoch": 3.8372093023255816,
"grad_norm": 0.8332661986351013,
"learning_rate": 6.848837209302326e-05,
"loss": 2.9128,
"step": 3300
},
{
"epoch": 3.953488372093023,
"grad_norm": 0.8648365139961243,
"learning_rate": 6.719638242894058e-05,
"loss": 2.8727,
"step": 3400
},
{
"epoch": 4.0,
"eval_loss": 2.9329044818878174,
"eval_runtime": 7.8343,
"eval_samples_per_second": 390.333,
"eval_steps_per_second": 48.887,
"step": 3440
},
{
"epoch": 4.069767441860465,
"grad_norm": 0.8708928823471069,
"learning_rate": 6.590439276485788e-05,
"loss": 2.8547,
"step": 3500
},
{
"epoch": 4.186046511627907,
"grad_norm": 0.9987735152244568,
"learning_rate": 6.46124031007752e-05,
"loss": 2.7684,
"step": 3600
},
{
"epoch": 4.3023255813953485,
"grad_norm": 0.8593104481697083,
"learning_rate": 6.332041343669251e-05,
"loss": 2.7917,
"step": 3700
},
{
"epoch": 4.4186046511627906,
"grad_norm": 0.9889882206916809,
"learning_rate": 6.202842377260983e-05,
"loss": 2.8232,
"step": 3800
},
{
"epoch": 4.534883720930233,
"grad_norm": 0.9209152460098267,
"learning_rate": 6.073643410852713e-05,
"loss": 2.7823,
"step": 3900
},
{
"epoch": 4.651162790697675,
"grad_norm": 0.9376141428947449,
"learning_rate": 5.9444444444444445e-05,
"loss": 2.7623,
"step": 4000
},
{
"epoch": 4.767441860465116,
"grad_norm": 0.9320041537284851,
"learning_rate": 5.815245478036177e-05,
"loss": 2.7476,
"step": 4100
},
{
"epoch": 4.883720930232558,
"grad_norm": 0.8268842101097107,
"learning_rate": 5.686046511627907e-05,
"loss": 2.7621,
"step": 4200
},
{
"epoch": 5.0,
"grad_norm": 1.042984962463379,
"learning_rate": 5.5568475452196386e-05,
"loss": 2.7301,
"step": 4300
},
{
"epoch": 5.0,
"eval_loss": 2.827605724334717,
"eval_runtime": 7.8468,
"eval_samples_per_second": 389.714,
"eval_steps_per_second": 48.81,
"step": 4300
},
{
"epoch": 5.116279069767442,
"grad_norm": 0.931705117225647,
"learning_rate": 5.4276485788113695e-05,
"loss": 2.6633,
"step": 4400
},
{
"epoch": 5.232558139534884,
"grad_norm": 1.0284597873687744,
"learning_rate": 5.298449612403101e-05,
"loss": 2.6595,
"step": 4500
},
{
"epoch": 5.348837209302325,
"grad_norm": 1.0262751579284668,
"learning_rate": 5.169250645994832e-05,
"loss": 2.6781,
"step": 4600
},
{
"epoch": 5.465116279069767,
"grad_norm": 1.0172241926193237,
"learning_rate": 5.0400516795865635e-05,
"loss": 2.6229,
"step": 4700
},
{
"epoch": 5.5813953488372094,
"grad_norm": 0.8998900055885315,
"learning_rate": 4.910852713178295e-05,
"loss": 2.6155,
"step": 4800
},
{
"epoch": 5.6976744186046515,
"grad_norm": 0.9043959975242615,
"learning_rate": 4.781653746770026e-05,
"loss": 2.6454,
"step": 4900
},
{
"epoch": 5.813953488372093,
"grad_norm": 1.0376083850860596,
"learning_rate": 4.6524547803617576e-05,
"loss": 2.6783,
"step": 5000
},
{
"epoch": 5.930232558139535,
"grad_norm": 0.8958259224891663,
"learning_rate": 4.5232558139534885e-05,
"loss": 2.6266,
"step": 5100
},
{
"epoch": 6.0,
"eval_loss": 2.757228136062622,
"eval_runtime": 7.8611,
"eval_samples_per_second": 389.006,
"eval_steps_per_second": 48.721,
"step": 5160
},
{
"epoch": 6.046511627906977,
"grad_norm": 1.0552242994308472,
"learning_rate": 4.39405684754522e-05,
"loss": 2.5892,
"step": 5200
},
{
"epoch": 6.162790697674419,
"grad_norm": 1.0633459091186523,
"learning_rate": 4.264857881136951e-05,
"loss": 2.5359,
"step": 5300
},
{
"epoch": 6.27906976744186,
"grad_norm": 1.0308552980422974,
"learning_rate": 4.1356589147286825e-05,
"loss": 2.5536,
"step": 5400
},
{
"epoch": 6.395348837209302,
"grad_norm": 0.9943313598632812,
"learning_rate": 4.0064599483204134e-05,
"loss": 2.5282,
"step": 5500
},
{
"epoch": 6.511627906976744,
"grad_norm": 1.0665168762207031,
"learning_rate": 3.877260981912145e-05,
"loss": 2.5605,
"step": 5600
},
{
"epoch": 6.627906976744186,
"grad_norm": 0.9594394564628601,
"learning_rate": 3.748062015503876e-05,
"loss": 2.5349,
"step": 5700
},
{
"epoch": 6.7441860465116275,
"grad_norm": 1.0906480550765991,
"learning_rate": 3.6188630490956075e-05,
"loss": 2.5282,
"step": 5800
},
{
"epoch": 6.8604651162790695,
"grad_norm": 0.9688259363174438,
"learning_rate": 3.489664082687339e-05,
"loss": 2.5354,
"step": 5900
},
{
"epoch": 6.976744186046512,
"grad_norm": 1.0499610900878906,
"learning_rate": 3.36046511627907e-05,
"loss": 2.5551,
"step": 6000
},
{
"epoch": 7.0,
"eval_loss": 2.7097859382629395,
"eval_runtime": 7.8861,
"eval_samples_per_second": 387.773,
"eval_steps_per_second": 48.567,
"step": 6020
},
{
"epoch": 7.093023255813954,
"grad_norm": 1.0121361017227173,
"learning_rate": 3.2312661498708015e-05,
"loss": 2.473,
"step": 6100
},
{
"epoch": 7.209302325581396,
"grad_norm": 1.0595309734344482,
"learning_rate": 3.1020671834625324e-05,
"loss": 2.4383,
"step": 6200
},
{
"epoch": 7.325581395348837,
"grad_norm": 1.0951762199401855,
"learning_rate": 2.9728682170542636e-05,
"loss": 2.4518,
"step": 6300
},
{
"epoch": 7.441860465116279,
"grad_norm": 1.131641149520874,
"learning_rate": 2.843669250645995e-05,
"loss": 2.4143,
"step": 6400
},
{
"epoch": 7.558139534883721,
"grad_norm": 1.1722636222839355,
"learning_rate": 2.714470284237726e-05,
"loss": 2.5198,
"step": 6500
},
{
"epoch": 7.674418604651163,
"grad_norm": 1.0916329622268677,
"learning_rate": 2.5852713178294573e-05,
"loss": 2.4656,
"step": 6600
},
{
"epoch": 7.790697674418604,
"grad_norm": 1.102444052696228,
"learning_rate": 2.4560723514211886e-05,
"loss": 2.4779,
"step": 6700
},
{
"epoch": 7.906976744186046,
"grad_norm": 1.101860761642456,
"learning_rate": 2.32687338501292e-05,
"loss": 2.4783,
"step": 6800
},
{
"epoch": 8.0,
"eval_loss": 2.6801645755767822,
"eval_runtime": 7.8846,
"eval_samples_per_second": 387.844,
"eval_steps_per_second": 48.576,
"step": 6880
}
],
"logging_steps": 100,
"max_steps": 8600,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.7517692420096e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}