|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016736401673640166, |
|
"grad_norm": 11.912498683051679, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.8434743881225586, |
|
"logits/rejected": -2.769583225250244, |
|
"logps/chosen": -178.4530487060547, |
|
"logps/rejected": -233.52891540527344, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 11.520133874238494, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.754981756210327, |
|
"logits/rejected": -2.7299110889434814, |
|
"logps/chosen": -225.8627471923828, |
|
"logps/rejected": -265.90814208984375, |
|
"loss": 0.6849, |
|
"rewards/accuracies": 0.5902777910232544, |
|
"rewards/chosen": -0.03168363496661186, |
|
"rewards/margins": 0.02158752828836441, |
|
"rewards/rejected": -0.053271159529685974, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 25.56430149750331, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.799773693084717, |
|
"logits/rejected": -2.7496700286865234, |
|
"logps/chosen": -259.5813293457031, |
|
"logps/rejected": -312.0262451171875, |
|
"loss": 0.6336, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.2481800615787506, |
|
"rewards/margins": 0.2926359176635742, |
|
"rewards/rejected": -0.5408159494400024, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 15.566903607908545, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.7819409370422363, |
|
"logits/rejected": -2.750037431716919, |
|
"logps/chosen": -289.746826171875, |
|
"logps/rejected": -358.7594299316406, |
|
"loss": 0.5846, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.41477519273757935, |
|
"rewards/margins": 0.5125462412834167, |
|
"rewards/rejected": -0.9273213148117065, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 18.95635048628815, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.6801064014434814, |
|
"logits/rejected": -2.6403772830963135, |
|
"logps/chosen": -270.6723937988281, |
|
"logps/rejected": -362.1679992675781, |
|
"loss": 0.5577, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.5635426640510559, |
|
"rewards/margins": 0.5637884736061096, |
|
"rewards/rejected": -1.127331018447876, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 20.509748331156263, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.7062981128692627, |
|
"logits/rejected": -2.653395175933838, |
|
"logps/chosen": -261.1449279785156, |
|
"logps/rejected": -334.8601379394531, |
|
"loss": 0.563, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.5345139503479004, |
|
"rewards/margins": 0.5169811844825745, |
|
"rewards/rejected": -1.0514951944351196, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9874476987447699, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5989973585484392, |
|
"train_runtime": 1077.191, |
|
"train_samples_per_second": 14.188, |
|
"train_steps_per_second": 0.055 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|