Mel-Iza0's picture
Training in progress, step 100, checkpoint
6352386 verified
raw
history blame contribute delete
No virus
3.65 kB
{
"best_metric": 0.38556817173957825,
"best_model_checkpoint": "./mixstral/05-04-24-Weni-WeniGPT-Agents-Mixstral-Instruct-2.0.0-KTO_KTO with Agents 1.2.0 dataset and Mixstral model-3_max_steps-145_batch_16_2024-04-05_ppid_9/checkpoint-100",
"epoch": 0.684931506849315,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"grad_norm": 2.368553638458252,
"kl": 0.5817955732345581,
"learning_rate": 0.00018,
"logps/chosen": -227.94122314453125,
"logps/rejected": -240.64300537109375,
"loss": 0.475,
"rewards/chosen": -0.784079372882843,
"rewards/margins": 0.5924594402313232,
"rewards/rejected": -1.3401262760162354,
"step": 20
},
{
"epoch": 0.27,
"grad_norm": 1.441540241241455,
"kl": 10.41219425201416,
"learning_rate": 0.00015142857142857143,
"logps/chosen": -233.67567443847656,
"logps/rejected": -243.45187377929688,
"loss": 0.4403,
"rewards/chosen": 0.15312156081199646,
"rewards/margins": 0.6628362536430359,
"rewards/rejected": -0.5719818472862244,
"step": 40
},
{
"epoch": 0.34,
"eval_kl": 0.5120495557785034,
"eval_logps/chosen": -248.310302734375,
"eval_logps/rejected": -253.4099578857422,
"eval_loss": 0.4195210039615631,
"eval_rewards/chosen": -1.1683257818222046,
"eval_rewards/margins": 2.6137375831604004,
"eval_rewards/rejected": -3.615262269973755,
"eval_runtime": 357.6046,
"eval_samples_per_second": 0.839,
"eval_steps_per_second": 0.21,
"step": 50
},
{
"epoch": 0.41,
"grad_norm": 2.9933063983917236,
"kl": 1.537040114402771,
"learning_rate": 0.00012285714285714287,
"logps/chosen": -244.6122283935547,
"logps/rejected": -288.9703674316406,
"loss": 0.3877,
"rewards/chosen": -0.880365252494812,
"rewards/margins": 3.219341516494751,
"rewards/rejected": -4.040163516998291,
"step": 60
},
{
"epoch": 0.55,
"grad_norm": 1.7866162061691284,
"kl": 8.153600692749023,
"learning_rate": 9.428571428571429e-05,
"logps/chosen": -193.49400329589844,
"logps/rejected": -239.42953491210938,
"loss": 0.3612,
"rewards/chosen": 1.8910856246948242,
"rewards/margins": 2.9180142879486084,
"rewards/rejected": -1.0155872106552124,
"step": 80
},
{
"epoch": 0.68,
"grad_norm": 1.8967958688735962,
"kl": 5.099704742431641,
"learning_rate": 6.714285714285714e-05,
"logps/chosen": -210.8452911376953,
"logps/rejected": -259.0009460449219,
"loss": 0.3518,
"rewards/chosen": 1.323478102684021,
"rewards/margins": 3.5613787174224854,
"rewards/rejected": -2.316351890563965,
"step": 100
},
{
"epoch": 0.68,
"eval_kl": 0.0,
"eval_logps/chosen": -263.7559509277344,
"eval_logps/rejected": -282.9854431152344,
"eval_loss": 0.38556817173957825,
"eval_rewards/chosen": -2.7128894329071045,
"eval_rewards/margins": 3.9057483673095703,
"eval_rewards/rejected": -6.572808265686035,
"eval_runtime": 356.8194,
"eval_samples_per_second": 0.841,
"eval_steps_per_second": 0.21,
"step": 100
}
],
"logging_steps": 20,
"max_steps": 145,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}