ppo-Huggy / run_logs /timers.json
Dmitriy007's picture
Huggy
ee08545
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3981397151947021,
"min": 1.3981397151947021,
"max": 1.4256253242492676,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69395.265625,
"min": 69125.1796875,
"max": 77034.265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 70.72453371592539,
"min": 70.2867332382311,
"max": 404.46774193548384,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49295.0,
"min": 48616.0,
"max": 50154.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999971.0,
"min": 49867.0,
"max": 1999971.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999971.0,
"min": 49867.0,
"max": 1999971.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.484127998352051,
"min": 0.0479750819504261,
"max": 2.504026174545288,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1731.4371337890625,
"min": 5.900935173034668,
"max": 1755.3223876953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8065569126281025,
"min": 1.6769575735902398,
"max": 4.018324973929738,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2653.1701681017876,
"min": 206.2657815515995,
"max": 2698.9440857172012,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8065569126281025,
"min": 1.6769575735902398,
"max": 4.018324973929738,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2653.1701681017876,
"min": 206.2657815515995,
"max": 2698.9440857172012,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017998712366291632,
"min": 0.012518384542878873,
"max": 0.01989235194268986,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0539961370988749,
"min": 0.025036769085757745,
"max": 0.055540875697382336,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05829324478076564,
"min": 0.02271740051607291,
"max": 0.063868173584342,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17487973434229692,
"min": 0.04543480103214582,
"max": 0.19026331529021262,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8098987300666738e-06,
"min": 3.8098987300666738e-06,
"max": 0.00029533185155605,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1429696190200021e-05,
"min": 1.1429696190200021e-05,
"max": 0.00084422176859275,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126993333333334,
"min": 0.10126993333333334,
"max": 0.19844394999999992,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038098,
"min": 0.20767080000000004,
"max": 0.58140725,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.336967333333346e-05,
"min": 7.336967333333346e-05,
"max": 0.004922353105,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022010902000000037,
"min": 0.00022010902000000037,
"max": 0.014072221775000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672767826",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672770191"
},
"total": 2364.394758072,
"count": 1,
"self": 0.4358390480001617,
"children": {
"run_training.setup": {
"total": 0.10787638599998672,
"count": 1,
"self": 0.10787638599998672
},
"TrainerController.start_learning": {
"total": 2363.851042638,
"count": 1,
"self": 4.002066300060051,
"children": {
"TrainerController._reset_env": {
"total": 7.255175558000019,
"count": 1,
"self": 7.255175558000019
},
"TrainerController.advance": {
"total": 2352.47527567394,
"count": 233609,
"self": 4.416222426948025,
"children": {
"env_step": {
"total": 1856.8616702750535,
"count": 233609,
"self": 1558.876810670189,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.16118441692265,
"count": 233609,
"self": 15.0858459719326,
"children": {
"TorchPolicy.evaluate": {
"total": 280.07533844499005,
"count": 222912,
"self": 70.06833167909286,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.0070067658972,
"count": 222912,
"self": 210.0070067658972
}
}
}
}
},
"workers": {
"total": 2.8236751879418875,
"count": 233609,
"self": 0.0,
"children": {
"worker_root": {
"total": 2355.578155410907,
"count": 233609,
"is_parallel": true,
"self": 1070.785233419976,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019814409999980853,
"count": 1,
"is_parallel": true,
"self": 0.00044316799994703615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015382730000510492,
"count": 2,
"is_parallel": true,
"self": 0.0015382730000510492
}
}
},
"UnityEnvironment.step": {
"total": 0.029197091000014552,
"count": 1,
"is_parallel": true,
"self": 0.0002910090000796117,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018393699997432122,
"count": 1,
"is_parallel": true,
"self": 0.00018393699997432122
},
"communicator.exchange": {
"total": 0.027906185999995614,
"count": 1,
"is_parallel": true,
"self": 0.027906185999995614
},
"steps_from_proto": {
"total": 0.0008159589999650052,
"count": 1,
"is_parallel": true,
"self": 0.0002544019999390912,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000561557000025914,
"count": 2,
"is_parallel": true,
"self": 0.000561557000025914
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1284.792921990931,
"count": 233608,
"is_parallel": true,
"self": 36.46806741289902,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.09412825499874,
"count": 233608,
"is_parallel": true,
"self": 84.09412825499874
},
"communicator.exchange": {
"total": 1059.9334130729812,
"count": 233608,
"is_parallel": true,
"self": 1059.9334130729812
},
"steps_from_proto": {
"total": 104.2973132500519,
"count": 233608,
"is_parallel": true,
"self": 44.02618730102847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.27112594902343,
"count": 467216,
"is_parallel": true,
"self": 60.27112594902343
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 491.19738297193834,
"count": 233609,
"self": 6.287767834860745,
"children": {
"process_trajectory": {
"total": 164.10460282207634,
"count": 233609,
"self": 162.8917595250764,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2128432969999494,
"count": 10,
"self": 1.2128432969999494
}
}
},
"_update_policy": {
"total": 320.80501231500125,
"count": 97,
"self": 267.42735138800333,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.37766092699792,
"count": 2910,
"self": 53.37766092699792
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.769999683659989e-07,
"count": 1,
"self": 7.769999683659989e-07
},
"TrainerController._save_models": {
"total": 0.1185243290001381,
"count": 1,
"self": 0.0027926160005335987,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1157317129996045,
"count": 1,
"self": 0.1157317129996045
}
}
}
}
}
}
}