techandy42's picture
First Push
62d075d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9744600653648376,
"min": 0.9744600653648376,
"max": 2.8488054275512695,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9325.5830078125,
"min": 9325.5830078125,
"max": 29237.2890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.084089279174805,
"min": 0.23476101458072662,
"max": 12.084089279174805,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2356.3974609375,
"min": 45.543636322021484,
"max": 2434.11279296875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07091363758010238,
"min": 0.06125282533535295,
"max": 0.07358497881004773,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28365455032040954,
"min": 0.2450113013414118,
"max": 0.36792489405023865,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1915175770865936,
"min": 0.121728643305901,
"max": 0.2906159602251707,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7660703083463744,
"min": 0.486914573223604,
"max": 1.4150864303696389,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.5,
"min": 3.477272727272727,
"max": 24.054545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1034.0,
"min": 153.0,
"max": 1323.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.5,
"min": 3.477272727272727,
"max": 24.054545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1034.0,
"min": 153.0,
"max": 1323.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702310769",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702311142"
},
"total": 373.677748417,
"count": 1,
"self": 0.27313537200006976,
"children": {
"run_training.setup": {
"total": 0.0610273209999832,
"count": 1,
"self": 0.0610273209999832
},
"TrainerController.start_learning": {
"total": 373.3435857239999,
"count": 1,
"self": 0.5157246070109522,
"children": {
"TrainerController._reset_env": {
"total": 2.9644421190000685,
"count": 1,
"self": 2.9644421190000685
},
"TrainerController.advance": {
"total": 369.771936833989,
"count": 18201,
"self": 0.23707159998480165,
"children": {
"env_step": {
"total": 369.5348652340042,
"count": 18201,
"self": 249.45772117800288,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.8439313510055,
"count": 18201,
"self": 1.2846592030026613,
"children": {
"TorchPolicy.evaluate": {
"total": 118.55927214800283,
"count": 18201,
"self": 118.55927214800283
}
}
},
"workers": {
"total": 0.2332127049958217,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 372.7004824549998,
"count": 18201,
"is_parallel": true,
"self": 188.08667337799216,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004986751000046752,
"count": 1,
"is_parallel": true,
"self": 0.003495035000014468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014917160000322838,
"count": 10,
"is_parallel": true,
"self": 0.0014917160000322838
}
}
},
"UnityEnvironment.step": {
"total": 0.025909436999995705,
"count": 1,
"is_parallel": true,
"self": 0.0003599499999609179,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002657900000713198,
"count": 1,
"is_parallel": true,
"self": 0.0002657900000713198
},
"communicator.exchange": {
"total": 0.024081991000002745,
"count": 1,
"is_parallel": true,
"self": 0.024081991000002745
},
"steps_from_proto": {
"total": 0.0012017059999607227,
"count": 1,
"is_parallel": true,
"self": 0.00026500999990730634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009366960000534164,
"count": 10,
"is_parallel": true,
"self": 0.0009366960000534164
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 184.61380907700766,
"count": 18200,
"is_parallel": true,
"self": 7.356413577984995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.9731682990125137,
"count": 18200,
"is_parallel": true,
"self": 3.9731682990125137
},
"communicator.exchange": {
"total": 149.15801727800283,
"count": 18200,
"is_parallel": true,
"self": 149.15801727800283
},
"steps_from_proto": {
"total": 24.12620992200732,
"count": 18200,
"is_parallel": true,
"self": 4.607703199952994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.518506722054326,
"count": 182000,
"is_parallel": true,
"self": 19.518506722054326
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00030662800008940394,
"count": 1,
"self": 0.00030662800008940394,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 368.39105461600195,
"count": 271761,
"is_parallel": true,
"self": 4.5591762460007885,
"children": {
"process_trajectory": {
"total": 208.60139613200113,
"count": 271761,
"is_parallel": true,
"self": 208.13568943100108,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46570670100004463,
"count": 4,
"is_parallel": true,
"self": 0.46570670100004463
}
}
},
"_update_policy": {
"total": 155.23048223800004,
"count": 90,
"is_parallel": true,
"self": 34.62522150999507,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.60526072800496,
"count": 4587,
"is_parallel": true,
"self": 120.60526072800496
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09117553599980965,
"count": 1,
"self": 0.0009293829998568981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09024615299995276,
"count": 1,
"self": 0.09024615299995276
}
}
}
}
}
}
}