{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.7254590795737927, "global_step": 400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "learning_rate": 0.0001, "loss": 1.1655, "step": 10 }, { "epoch": 0.04, "learning_rate": 0.0001, "loss": 1.001, "step": 20 }, { "epoch": 0.05, "learning_rate": 0.0001, "loss": 1.0287, "step": 30 }, { "epoch": 0.07, "learning_rate": 0.0001, "loss": 1.1578, "step": 40 }, { "epoch": 0.09, "learning_rate": 0.0001, "loss": 1.2146, "step": 50 }, { "epoch": 0.11, "learning_rate": 0.0001, "loss": 0.997, "step": 60 }, { "epoch": 0.13, "learning_rate": 0.0001, "loss": 0.9024, "step": 70 }, { "epoch": 0.15, "learning_rate": 0.0001, "loss": 0.9901, "step": 80 }, { "epoch": 0.16, "learning_rate": 0.0001, "loss": 1.1264, "step": 90 }, { "epoch": 0.18, "learning_rate": 0.0001, "loss": 1.2038, "step": 100 }, { "epoch": 0.2, "learning_rate": 0.0001, "loss": 0.8935, "step": 110 }, { "epoch": 0.22, "learning_rate": 0.0001, "loss": 0.9178, "step": 120 }, { "epoch": 0.24, "learning_rate": 0.0001, "loss": 0.9746, "step": 130 }, { "epoch": 0.25, "learning_rate": 0.0001, "loss": 1.1566, "step": 140 }, { "epoch": 0.27, "learning_rate": 0.0001, "loss": 1.2877, "step": 150 }, { "epoch": 0.29, "learning_rate": 0.0001, "loss": 0.9146, "step": 160 }, { "epoch": 0.31, "learning_rate": 0.0001, "loss": 0.8895, "step": 170 }, { "epoch": 0.33, "learning_rate": 0.0001, "loss": 1.0121, "step": 180 }, { "epoch": 0.34, "eval_loss": 1.0215636491775513, "eval_runtime": 950.138, "eval_samples_per_second": 1.052, "eval_steps_per_second": 1.052, "step": 187 }, { "epoch": 0.34, "mmlu_eval_accuracy": 0.731892294851104, "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182, "mmlu_eval_accuracy_anatomy": 0.7857142857142857, "mmlu_eval_accuracy_astronomy": 0.6875, "mmlu_eval_accuracy_business_ethics": 0.7272727272727273, "mmlu_eval_accuracy_clinical_knowledge": 0.896551724137931, "mmlu_eval_accuracy_college_biology": 0.875, "mmlu_eval_accuracy_college_chemistry": 0.5, "mmlu_eval_accuracy_college_computer_science": 0.7272727272727273, "mmlu_eval_accuracy_college_mathematics": 0.45454545454545453, "mmlu_eval_accuracy_college_medicine": 0.9090909090909091, "mmlu_eval_accuracy_college_physics": 0.6363636363636364, "mmlu_eval_accuracy_computer_security": 0.6363636363636364, "mmlu_eval_accuracy_conceptual_physics": 0.6538461538461539, "mmlu_eval_accuracy_econometrics": 0.8333333333333334, "mmlu_eval_accuracy_electrical_engineering": 0.8125, "mmlu_eval_accuracy_elementary_mathematics": 0.7073170731707317, "mmlu_eval_accuracy_formal_logic": 0.5714285714285714, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.8125, "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666, "mmlu_eval_accuracy_high_school_european_history": 0.7777777777777778, "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091, "mmlu_eval_accuracy_high_school_government_and_politics": 0.9523809523809523, "mmlu_eval_accuracy_high_school_macroeconomics": 0.7441860465116279, "mmlu_eval_accuracy_high_school_mathematics": 0.3793103448275862, "mmlu_eval_accuracy_high_school_microeconomics": 0.9615384615384616, "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, "mmlu_eval_accuracy_high_school_psychology": 0.9333333333333333, "mmlu_eval_accuracy_high_school_statistics": 0.6956521739130435, "mmlu_eval_accuracy_high_school_us_history": 0.9090909090909091, "mmlu_eval_accuracy_high_school_world_history": 0.8076923076923077, "mmlu_eval_accuracy_human_aging": 0.8260869565217391, "mmlu_eval_accuracy_human_sexuality": 0.6666666666666666, "mmlu_eval_accuracy_international_law": 1.0, "mmlu_eval_accuracy_jurisprudence": 0.6363636363636364, "mmlu_eval_accuracy_logical_fallacies": 0.7777777777777778, "mmlu_eval_accuracy_machine_learning": 0.5454545454545454, "mmlu_eval_accuracy_management": 0.9090909090909091, "mmlu_eval_accuracy_marketing": 0.88, "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091, "mmlu_eval_accuracy_miscellaneous": 0.7906976744186046, "mmlu_eval_accuracy_moral_disputes": 0.8157894736842105, "mmlu_eval_accuracy_moral_scenarios": 0.59, "mmlu_eval_accuracy_nutrition": 0.7878787878787878, "mmlu_eval_accuracy_philosophy": 0.7941176470588235, "mmlu_eval_accuracy_prehistory": 0.8285714285714286, "mmlu_eval_accuracy_professional_accounting": 0.6451612903225806, "mmlu_eval_accuracy_professional_law": 0.6294117647058823, "mmlu_eval_accuracy_professional_medicine": 0.8064516129032258, "mmlu_eval_accuracy_professional_psychology": 0.8115942028985508, "mmlu_eval_accuracy_public_relations": 0.6666666666666666, "mmlu_eval_accuracy_security_studies": 0.8148148148148148, "mmlu_eval_accuracy_sociology": 0.9545454545454546, "mmlu_eval_accuracy_us_foreign_policy": 1.0, "mmlu_eval_accuracy_virology": 0.5, "mmlu_eval_accuracy_world_religions": 0.8421052631578947, "mmlu_loss": 1.326305795171384, "step": 187 }, { "epoch": 0.34, "learning_rate": 0.0001, "loss": 1.1133, "step": 190 }, { "epoch": 0.36, "learning_rate": 0.0001, "loss": 1.2485, "step": 200 }, { "epoch": 0.38, "learning_rate": 0.0001, "loss": 0.9653, "step": 210 }, { "epoch": 0.4, "learning_rate": 0.0001, "loss": 0.9455, "step": 220 }, { "epoch": 0.42, "learning_rate": 0.0001, "loss": 1.0373, "step": 230 }, { "epoch": 0.44, "learning_rate": 0.0001, "loss": 1.1425, "step": 240 }, { "epoch": 0.45, "learning_rate": 0.0001, "loss": 1.3136, "step": 250 }, { "epoch": 0.47, "learning_rate": 0.0001, "loss": 0.8695, "step": 260 }, { "epoch": 0.49, "learning_rate": 0.0001, "loss": 0.872, "step": 270 }, { "epoch": 0.51, "learning_rate": 0.0001, "loss": 1.0152, "step": 280 }, { "epoch": 0.53, "learning_rate": 0.0001, "loss": 1.1309, "step": 290 }, { "epoch": 0.54, "learning_rate": 0.0001, "loss": 1.267, "step": 300 }, { "epoch": 0.56, "learning_rate": 0.0001, "loss": 0.9249, "step": 310 }, { "epoch": 0.58, "learning_rate": 0.0001, "loss": 0.9148, "step": 320 }, { "epoch": 0.6, "learning_rate": 0.0001, "loss": 0.9864, "step": 330 }, { "epoch": 0.62, "learning_rate": 0.0001, "loss": 1.2312, "step": 340 }, { "epoch": 0.63, "learning_rate": 0.0001, "loss": 1.2354, "step": 350 }, { "epoch": 0.65, "learning_rate": 0.0001, "loss": 0.9126, "step": 360 }, { "epoch": 0.67, "learning_rate": 0.0001, "loss": 0.9213, "step": 370 }, { "epoch": 0.68, "eval_loss": 1.0163359642028809, "eval_runtime": 948.1151, "eval_samples_per_second": 1.055, "eval_steps_per_second": 1.055, "step": 374 }, { "epoch": 0.68, "mmlu_eval_accuracy": 0.7395476061435284, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.7857142857142857, "mmlu_eval_accuracy_astronomy": 0.75, "mmlu_eval_accuracy_business_ethics": 0.7272727272727273, "mmlu_eval_accuracy_clinical_knowledge": 0.896551724137931, "mmlu_eval_accuracy_college_biology": 0.875, "mmlu_eval_accuracy_college_chemistry": 0.5, "mmlu_eval_accuracy_college_computer_science": 0.7272727272727273, "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365, "mmlu_eval_accuracy_college_medicine": 0.9090909090909091, "mmlu_eval_accuracy_college_physics": 0.6363636363636364, "mmlu_eval_accuracy_computer_security": 0.6363636363636364, "mmlu_eval_accuracy_conceptual_physics": 0.5769230769230769, "mmlu_eval_accuracy_econometrics": 0.8333333333333334, "mmlu_eval_accuracy_electrical_engineering": 0.875, "mmlu_eval_accuracy_elementary_mathematics": 0.6829268292682927, "mmlu_eval_accuracy_formal_logic": 0.6428571428571429, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.8125, "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, "mmlu_eval_accuracy_high_school_computer_science": 0.7777777777777778, "mmlu_eval_accuracy_high_school_european_history": 0.7777777777777778, "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091, "mmlu_eval_accuracy_high_school_government_and_politics": 0.9523809523809523, "mmlu_eval_accuracy_high_school_macroeconomics": 0.7674418604651163, "mmlu_eval_accuracy_high_school_mathematics": 0.41379310344827586, "mmlu_eval_accuracy_high_school_microeconomics": 0.9615384615384616, "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, "mmlu_eval_accuracy_high_school_psychology": 0.95, "mmlu_eval_accuracy_high_school_statistics": 0.7391304347826086, "mmlu_eval_accuracy_high_school_us_history": 0.9090909090909091, "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693, "mmlu_eval_accuracy_human_aging": 0.8260869565217391, "mmlu_eval_accuracy_human_sexuality": 0.6666666666666666, "mmlu_eval_accuracy_international_law": 1.0, "mmlu_eval_accuracy_jurisprudence": 0.6363636363636364, "mmlu_eval_accuracy_logical_fallacies": 0.7777777777777778, "mmlu_eval_accuracy_machine_learning": 0.5454545454545454, "mmlu_eval_accuracy_management": 0.9090909090909091, "mmlu_eval_accuracy_marketing": 0.92, "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091, "mmlu_eval_accuracy_miscellaneous": 0.7790697674418605, "mmlu_eval_accuracy_moral_disputes": 0.8157894736842105, "mmlu_eval_accuracy_moral_scenarios": 0.57, "mmlu_eval_accuracy_nutrition": 0.7272727272727273, "mmlu_eval_accuracy_philosophy": 0.7941176470588235, "mmlu_eval_accuracy_prehistory": 0.8571428571428571, "mmlu_eval_accuracy_professional_accounting": 0.6774193548387096, "mmlu_eval_accuracy_professional_law": 0.6411764705882353, "mmlu_eval_accuracy_professional_medicine": 0.8387096774193549, "mmlu_eval_accuracy_professional_psychology": 0.8115942028985508, "mmlu_eval_accuracy_public_relations": 0.6666666666666666, "mmlu_eval_accuracy_security_studies": 0.8148148148148148, "mmlu_eval_accuracy_sociology": 0.9545454545454546, "mmlu_eval_accuracy_us_foreign_policy": 1.0, "mmlu_eval_accuracy_virology": 0.5, "mmlu_eval_accuracy_world_religions": 0.8947368421052632, "mmlu_loss": 1.2796503596061355, "step": 374 }, { "epoch": 0.69, "learning_rate": 0.0001, "loss": 0.9737, "step": 380 }, { "epoch": 0.71, "learning_rate": 0.0001, "loss": 1.157, "step": 390 }, { "epoch": 0.73, "learning_rate": 0.0001, "loss": 1.2106, "step": 400 } ], "max_steps": 1875, "num_train_epochs": 4, "total_flos": 4.626609292910592e+17, "trial_name": null, "trial_params": null }