clip-roberta-finetuned / trainer_state.json
ihatetmat's picture
End of training
661a5b9 verified
raw
history blame contribute delete
No virus
10.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 27741,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05407159078620093,
"grad_norm": 19.944881439208984,
"learning_rate": 4.909880682022999e-05,
"loss": 1.3976,
"step": 500
},
{
"epoch": 0.10814318157240187,
"grad_norm": 35.43727493286133,
"learning_rate": 4.819761364045997e-05,
"loss": 0.9474,
"step": 1000
},
{
"epoch": 0.1622147723586028,
"grad_norm": 21.795238494873047,
"learning_rate": 4.7296420460689956e-05,
"loss": 0.8589,
"step": 1500
},
{
"epoch": 0.21628636314480373,
"grad_norm": 17.912302017211914,
"learning_rate": 4.639522728091994e-05,
"loss": 0.8071,
"step": 2000
},
{
"epoch": 0.27035795393100465,
"grad_norm": 16.4758243560791,
"learning_rate": 4.5494034101149925e-05,
"loss": 0.7429,
"step": 2500
},
{
"epoch": 0.3244295447172056,
"grad_norm": 14.498700141906738,
"learning_rate": 4.459284092137991e-05,
"loss": 0.7113,
"step": 3000
},
{
"epoch": 0.3785011355034065,
"grad_norm": 14.779314041137695,
"learning_rate": 4.3691647741609894e-05,
"loss": 0.6666,
"step": 3500
},
{
"epoch": 0.43257272628960747,
"grad_norm": 12.713911056518555,
"learning_rate": 4.279045456183988e-05,
"loss": 0.6366,
"step": 4000
},
{
"epoch": 0.48664431707580835,
"grad_norm": 16.784982681274414,
"learning_rate": 4.188926138206986e-05,
"loss": 0.6234,
"step": 4500
},
{
"epoch": 0.5407159078620093,
"grad_norm": 15.87082576751709,
"learning_rate": 4.098806820229985e-05,
"loss": 0.5871,
"step": 5000
},
{
"epoch": 0.5947874986482102,
"grad_norm": 8.966830253601074,
"learning_rate": 4.008687502252983e-05,
"loss": 0.5652,
"step": 5500
},
{
"epoch": 0.6488590894344112,
"grad_norm": 15.881940841674805,
"learning_rate": 3.9185681842759816e-05,
"loss": 0.5564,
"step": 6000
},
{
"epoch": 0.7029306802206121,
"grad_norm": 17.01358985900879,
"learning_rate": 3.82844886629898e-05,
"loss": 0.527,
"step": 6500
},
{
"epoch": 0.757002271006813,
"grad_norm": 16.689422607421875,
"learning_rate": 3.7383295483219785e-05,
"loss": 0.509,
"step": 7000
},
{
"epoch": 0.811073861793014,
"grad_norm": 10.947444915771484,
"learning_rate": 3.648210230344977e-05,
"loss": 0.4878,
"step": 7500
},
{
"epoch": 0.8651454525792149,
"grad_norm": 14.44914722442627,
"learning_rate": 3.558090912367975e-05,
"loss": 0.4656,
"step": 8000
},
{
"epoch": 0.9192170433654158,
"grad_norm": 13.648640632629395,
"learning_rate": 3.467971594390974e-05,
"loss": 0.4636,
"step": 8500
},
{
"epoch": 0.9732886341516167,
"grad_norm": 14.70873737335205,
"learning_rate": 3.3778522764139723e-05,
"loss": 0.4454,
"step": 9000
},
{
"epoch": 1.0273602249378178,
"grad_norm": 13.827669143676758,
"learning_rate": 3.287732958436971e-05,
"loss": 0.3965,
"step": 9500
},
{
"epoch": 1.0814318157240186,
"grad_norm": 13.648258209228516,
"learning_rate": 3.197613640459969e-05,
"loss": 0.3644,
"step": 10000
},
{
"epoch": 1.1355034065102196,
"grad_norm": 7.754871368408203,
"learning_rate": 3.107494322482968e-05,
"loss": 0.3546,
"step": 10500
},
{
"epoch": 1.1895749972964205,
"grad_norm": 10.9650239944458,
"learning_rate": 3.017375004505966e-05,
"loss": 0.3453,
"step": 11000
},
{
"epoch": 1.2436465880826213,
"grad_norm": 9.524118423461914,
"learning_rate": 2.9272556865289646e-05,
"loss": 0.3451,
"step": 11500
},
{
"epoch": 1.2977181788688223,
"grad_norm": 12.612396240234375,
"learning_rate": 2.837136368551963e-05,
"loss": 0.3447,
"step": 12000
},
{
"epoch": 1.3517897696550232,
"grad_norm": 17.665691375732422,
"learning_rate": 2.7470170505749615e-05,
"loss": 0.3358,
"step": 12500
},
{
"epoch": 1.4058613604412242,
"grad_norm": 15.416611671447754,
"learning_rate": 2.65689773259796e-05,
"loss": 0.3273,
"step": 13000
},
{
"epoch": 1.459932951227425,
"grad_norm": 7.313694953918457,
"learning_rate": 2.5667784146209584e-05,
"loss": 0.3177,
"step": 13500
},
{
"epoch": 1.5140045420136259,
"grad_norm": 7.84274435043335,
"learning_rate": 2.476659096643957e-05,
"loss": 0.3054,
"step": 14000
},
{
"epoch": 1.568076132799827,
"grad_norm": 8.8153657913208,
"learning_rate": 2.3865397786669553e-05,
"loss": 0.3005,
"step": 14500
},
{
"epoch": 1.622147723586028,
"grad_norm": 7.336580753326416,
"learning_rate": 2.2964204606899537e-05,
"loss": 0.2941,
"step": 15000
},
{
"epoch": 1.6762193143722288,
"grad_norm": 8.88096809387207,
"learning_rate": 2.2063011427129522e-05,
"loss": 0.2957,
"step": 15500
},
{
"epoch": 1.7302909051584296,
"grad_norm": 5.086656093597412,
"learning_rate": 2.1161818247359506e-05,
"loss": 0.2882,
"step": 16000
},
{
"epoch": 1.7843624959446307,
"grad_norm": 10.830957412719727,
"learning_rate": 2.026062506758949e-05,
"loss": 0.2842,
"step": 16500
},
{
"epoch": 1.8384340867308318,
"grad_norm": 8.022000312805176,
"learning_rate": 1.9359431887819472e-05,
"loss": 0.2724,
"step": 17000
},
{
"epoch": 1.8925056775170326,
"grad_norm": 11.823745727539062,
"learning_rate": 1.8458238708049457e-05,
"loss": 0.2682,
"step": 17500
},
{
"epoch": 1.9465772683032334,
"grad_norm": 10.353747367858887,
"learning_rate": 1.755704552827944e-05,
"loss": 0.2687,
"step": 18000
},
{
"epoch": 2.0006488590894342,
"grad_norm": 5.618934154510498,
"learning_rate": 1.6655852348509426e-05,
"loss": 0.2584,
"step": 18500
},
{
"epoch": 2.0547204498756355,
"grad_norm": 5.912876129150391,
"learning_rate": 1.575465916873941e-05,
"loss": 0.1984,
"step": 19000
},
{
"epoch": 2.1087920406618363,
"grad_norm": 5.198564529418945,
"learning_rate": 1.4853465988969395e-05,
"loss": 0.1996,
"step": 19500
},
{
"epoch": 2.162863631448037,
"grad_norm": 6.234917163848877,
"learning_rate": 1.3952272809199379e-05,
"loss": 0.195,
"step": 20000
},
{
"epoch": 2.216935222234238,
"grad_norm": 5.457370758056641,
"learning_rate": 1.3051079629429367e-05,
"loss": 0.1928,
"step": 20500
},
{
"epoch": 2.2710068130204393,
"grad_norm": 6.5635666847229,
"learning_rate": 1.214988644965935e-05,
"loss": 0.1949,
"step": 21000
},
{
"epoch": 2.32507840380664,
"grad_norm": 4.495577335357666,
"learning_rate": 1.1248693269889334e-05,
"loss": 0.19,
"step": 21500
},
{
"epoch": 2.379149994592841,
"grad_norm": 6.373176574707031,
"learning_rate": 1.0347500090119317e-05,
"loss": 0.1847,
"step": 22000
},
{
"epoch": 2.4332215853790418,
"grad_norm": 4.968171119689941,
"learning_rate": 9.446306910349303e-06,
"loss": 0.1783,
"step": 22500
},
{
"epoch": 2.4872931761652426,
"grad_norm": 5.412923812866211,
"learning_rate": 8.545113730579288e-06,
"loss": 0.177,
"step": 23000
},
{
"epoch": 2.541364766951444,
"grad_norm": 5.907869815826416,
"learning_rate": 7.643920550809272e-06,
"loss": 0.1757,
"step": 23500
},
{
"epoch": 2.5954363577376447,
"grad_norm": 5.753273010253906,
"learning_rate": 6.742727371039257e-06,
"loss": 0.1716,
"step": 24000
},
{
"epoch": 2.6495079485238455,
"grad_norm": 4.856042861938477,
"learning_rate": 5.84153419126924e-06,
"loss": 0.1648,
"step": 24500
},
{
"epoch": 2.7035795393100464,
"grad_norm": 4.939342498779297,
"learning_rate": 4.940341011499225e-06,
"loss": 0.1617,
"step": 25000
},
{
"epoch": 2.7576511300962476,
"grad_norm": 5.477869987487793,
"learning_rate": 4.03914783172921e-06,
"loss": 0.1671,
"step": 25500
},
{
"epoch": 2.8117227208824485,
"grad_norm": 6.977444648742676,
"learning_rate": 3.1379546519591943e-06,
"loss": 0.1592,
"step": 26000
},
{
"epoch": 2.8657943116686493,
"grad_norm": 4.571088790893555,
"learning_rate": 2.2367614721891784e-06,
"loss": 0.1588,
"step": 26500
},
{
"epoch": 2.91986590245485,
"grad_norm": 4.860353946685791,
"learning_rate": 1.335568292419163e-06,
"loss": 0.1518,
"step": 27000
},
{
"epoch": 2.973937493241051,
"grad_norm": 3.7616257667541504,
"learning_rate": 4.3437511264914753e-07,
"loss": 0.1568,
"step": 27500
},
{
"epoch": 3.0,
"step": 27741,
"total_flos": 2.3702775297552e+17,
"train_loss": 0.38246467155521724,
"train_runtime": 9126.074,
"train_samples_per_second": 194.526,
"train_steps_per_second": 3.04
}
],
"logging_steps": 500,
"max_steps": 27741,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.3702775297552e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}