|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05194805194805195, |
|
"eval_steps": 1, |
|
"global_step": 1, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"grad_norm": 15.329390525817871, |
|
"learning_rate": 4e-05, |
|
"loss": 7.4907, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_NLI-v2_cosine_accuracy": 1.0, |
|
"eval_NLI-v2_dot_accuracy": 0.125, |
|
"eval_NLI-v2_euclidean_accuracy": 1.0, |
|
"eval_NLI-v2_manhattan_accuracy": 1.0, |
|
"eval_NLI-v2_max_accuracy": 1.0, |
|
"eval_VitaminC_cosine_accuracy": 0.55078125, |
|
"eval_VitaminC_cosine_accuracy_threshold": 0.9503422379493713, |
|
"eval_VitaminC_cosine_ap": 0.5203148129920425, |
|
"eval_VitaminC_cosine_f1": 0.6542553191489362, |
|
"eval_VitaminC_cosine_f1_threshold": 0.656802773475647, |
|
"eval_VitaminC_cosine_precision": 0.48616600790513836, |
|
"eval_VitaminC_cosine_recall": 1.0, |
|
"eval_VitaminC_dot_accuracy": 0.55078125, |
|
"eval_VitaminC_dot_accuracy_threshold": 425.30816650390625, |
|
"eval_VitaminC_dot_ap": 0.5120444819966403, |
|
"eval_VitaminC_dot_f1": 0.6542553191489362, |
|
"eval_VitaminC_dot_f1_threshold": 262.8174743652344, |
|
"eval_VitaminC_dot_precision": 0.48616600790513836, |
|
"eval_VitaminC_dot_recall": 1.0, |
|
"eval_VitaminC_euclidean_accuracy": 0.55078125, |
|
"eval_VitaminC_euclidean_accuracy_threshold": 7.050784111022949, |
|
"eval_VitaminC_euclidean_ap": 0.5175301700973289, |
|
"eval_VitaminC_euclidean_f1": 0.6507936507936508, |
|
"eval_VitaminC_euclidean_f1_threshold": 17.465972900390625, |
|
"eval_VitaminC_euclidean_precision": 0.4823529411764706, |
|
"eval_VitaminC_euclidean_recall": 1.0, |
|
"eval_VitaminC_manhattan_accuracy": 0.5390625, |
|
"eval_VitaminC_manhattan_accuracy_threshold": 107.76934814453125, |
|
"eval_VitaminC_manhattan_ap": 0.5208015383309144, |
|
"eval_VitaminC_manhattan_f1": 0.6542553191489362, |
|
"eval_VitaminC_manhattan_f1_threshold": 271.5865478515625, |
|
"eval_VitaminC_manhattan_precision": 0.48616600790513836, |
|
"eval_VitaminC_manhattan_recall": 1.0, |
|
"eval_VitaminC_max_accuracy": 0.55078125, |
|
"eval_VitaminC_max_accuracy_threshold": 425.30816650390625, |
|
"eval_VitaminC_max_ap": 0.5208015383309144, |
|
"eval_VitaminC_max_f1": 0.6542553191489362, |
|
"eval_VitaminC_max_f1_threshold": 271.5865478515625, |
|
"eval_VitaminC_max_precision": 0.48616600790513836, |
|
"eval_VitaminC_max_recall": 1.0, |
|
"eval_sequential_score": 0.5208015383309144, |
|
"eval_sts-test_pearson_cosine": 0.033928485348000664, |
|
"eval_sts-test_pearson_dot": 0.2554086617921545, |
|
"eval_sts-test_pearson_euclidean": 0.03489200141716902, |
|
"eval_sts-test_pearson_manhattan": 0.06296467882181725, |
|
"eval_sts-test_pearson_max": 0.2554086617921545, |
|
"eval_sts-test_spearman_cosine": 0.08944249572062771, |
|
"eval_sts-test_spearman_dot": 0.27863958137561534, |
|
"eval_sts-test_spearman_euclidean": 0.06202473500014035, |
|
"eval_sts-test_spearman_manhattan": 0.08266825793291849, |
|
"eval_sts-test_spearman_max": 0.27863958137561534, |
|
"eval_vitaminc-pairs_loss": 2.6973235607147217, |
|
"eval_vitaminc-pairs_runtime": 1.4352, |
|
"eval_vitaminc-pairs_samples_per_second": 75.251, |
|
"eval_vitaminc-pairs_steps_per_second": 1.394, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_negation-triplets_loss": 5.244063854217529, |
|
"eval_negation-triplets_runtime": 0.293, |
|
"eval_negation-triplets_samples_per_second": 218.403, |
|
"eval_negation-triplets_steps_per_second": 3.413, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_scitail-pairs-pos_loss": 1.975368618965149, |
|
"eval_scitail-pairs-pos_runtime": 0.3648, |
|
"eval_scitail-pairs-pos_samples_per_second": 148.035, |
|
"eval_scitail-pairs-pos_steps_per_second": 2.741, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_xsum-pairs_loss": 6.168678283691406, |
|
"eval_xsum-pairs_runtime": 3.3419, |
|
"eval_xsum-pairs_samples_per_second": 38.302, |
|
"eval_xsum-pairs_steps_per_second": 0.598, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_sciq_pairs_loss": 0.9838346838951111, |
|
"eval_sciq_pairs_runtime": 3.3684, |
|
"eval_sciq_pairs_samples_per_second": 38.0, |
|
"eval_sciq_pairs_steps_per_second": 0.594, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_qasc_pairs_loss": 6.569870471954346, |
|
"eval_qasc_pairs_runtime": 0.6748, |
|
"eval_qasc_pairs_samples_per_second": 189.699, |
|
"eval_qasc_pairs_steps_per_second": 2.964, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_openbookqa_pairs_loss": 4.515223979949951, |
|
"eval_openbookqa_pairs_runtime": 0.5729, |
|
"eval_openbookqa_pairs_samples_per_second": 223.438, |
|
"eval_openbookqa_pairs_steps_per_second": 3.491, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_msmarco_pairs_loss": 7.998310565948486, |
|
"eval_msmarco_pairs_runtime": 1.2044, |
|
"eval_msmarco_pairs_samples_per_second": 106.278, |
|
"eval_msmarco_pairs_steps_per_second": 1.661, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_nq_pairs_loss": 8.03341007232666, |
|
"eval_nq_pairs_runtime": 2.7885, |
|
"eval_nq_pairs_samples_per_second": 45.903, |
|
"eval_nq_pairs_steps_per_second": 0.717, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_trivia_pairs_loss": 6.768779754638672, |
|
"eval_trivia_pairs_runtime": 4.0886, |
|
"eval_trivia_pairs_samples_per_second": 29.105, |
|
"eval_trivia_pairs_steps_per_second": 0.489, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_gooaq_pairs_loss": 6.694434642791748, |
|
"eval_gooaq_pairs_runtime": 0.9221, |
|
"eval_gooaq_pairs_samples_per_second": 138.819, |
|
"eval_gooaq_pairs_steps_per_second": 2.169, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"eval_paws-pos_loss": 2.3459720611572266, |
|
"eval_paws-pos_runtime": 0.7324, |
|
"eval_paws-pos_samples_per_second": 174.765, |
|
"eval_paws-pos_steps_per_second": 2.731, |
|
"step": 1 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 2, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 320, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|