poca-RenatoFC / run_logs /timers.json
renatostrianese's picture
First Push
187b59a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4289458990097046,
"min": 1.3331218957901,
"max": 3.295753002166748,
"count": 2909
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28441.740234375,
"min": 14116.41015625,
"max": 134564.359375,
"count": 2909
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 72.63636363636364,
"min": 38.728,
"max": 999.0,
"count": 2909
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19176.0,
"min": 1368.0,
"max": 33472.0,
"count": 2909
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1633.2024141553666,
"min": 1197.4528454987494,
"max": 1700.748415074803,
"count": 2894
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 215582.71866850837,
"min": 2395.855516605603,
"max": 399412.78237338376,
"count": 2894
},
"SoccerTwos.Step.mean": {
"value": 29089880.0,
"min": 9034.0,
"max": 29089880.0,
"count": 2909
},
"SoccerTwos.Step.sum": {
"value": 29089880.0,
"min": 9034.0,
"max": 29089880.0,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.016269130632281303,
"min": -0.14047899842262268,
"max": 0.18399012088775635,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.1475253105163574,
"min": -23.84164047241211,
"max": 26.68563461303711,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01794874481856823,
"min": -0.1390056312084198,
"max": 0.18372780084609985,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.369234323501587,
"min": -24.460636138916016,
"max": 26.59808349609375,
"count": 2909
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2909
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.04186969814878522,
"min": -0.5260500013828278,
"max": 0.5829966128882715,
"count": 2909
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.526800155639648,
"min": -81.90319991111755,
"max": 53.70960021018982,
"count": 2909
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.04186969814878522,
"min": -0.5260500013828278,
"max": 0.5829966128882715,
"count": 2909
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.526800155639648,
"min": -81.90319991111755,
"max": 53.70960021018982,
"count": 2909
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2909
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2909
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017092395084910096,
"min": 0.010334619101680194,
"max": 0.02452780718158465,
"count": 1409
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017092395084910096,
"min": 0.010334619101680194,
"max": 0.02452780718158465,
"count": 1409
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08425002073248228,
"min": 4.625412599731741e-05,
"max": 0.132067218174537,
"count": 1409
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08425002073248228,
"min": 4.625412599731741e-05,
"max": 0.132067218174537,
"count": 1409
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0861614927649498,
"min": 4.70900532794379e-05,
"max": 0.13552140071988106,
"count": 1409
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0861614927649498,
"min": 4.70900532794379e-05,
"max": 0.13552140071988106,
"count": 1409
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1409
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1409
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 1409
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 1409
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1409
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1409
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691727176",
"python_version": "3.9.17 (main, Jul 5 2023, 21:05:34) \n[GCC 11.2.0]",
"command_line_arguments": "/home/renatostrianese/anaconda3/envs/RenatoTF/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwo/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691805708"
},
"total": 78532.367960672,
"count": 1,
"self": 0.06609775401011575,
"children": {
"run_training.setup": {
"total": 0.03313046699622646,
"count": 1,
"self": 0.03313046699622646
},
"TrainerController.start_learning": {
"total": 78532.26873245099,
"count": 1,
"self": 47.53482574840018,
"children": {
"TrainerController._reset_env": {
"total": 4.610130590015615,
"count": 146,
"self": 4.610130590015615
},
"TrainerController.advance": {
"total": 78479.86887295157,
"count": 2012113,
"self": 46.898810708808014,
"children": {
"env_step": {
"total": 35817.85841291145,
"count": 2012113,
"self": 30057.72681538717,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5729.997554799687,
"count": 2012113,
"self": 223.7001909118626,
"children": {
"TorchPolicy.evaluate": {
"total": 5506.297363887825,
"count": 3654072,
"self": 5506.297363887825
}
}
},
"workers": {
"total": 30.13404272459593,
"count": 2012112,
"self": 0.0,
"children": {
"worker_root": {
"total": 78466.91669943526,
"count": 2012112,
"is_parallel": true,
"self": 53690.49747905135,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031258160015568137,
"count": 2,
"is_parallel": true,
"self": 0.0009946089994627982,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021312070020940155,
"count": 8,
"is_parallel": true,
"self": 0.0021312070020940155
}
}
},
"UnityEnvironment.step": {
"total": 0.028206431001308374,
"count": 1,
"is_parallel": true,
"self": 0.0005391929953475483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004178079980192706,
"count": 1,
"is_parallel": true,
"self": 0.0004178079980192706
},
"communicator.exchange": {
"total": 0.025404737003555056,
"count": 1,
"is_parallel": true,
"self": 0.025404737003555056
},
"steps_from_proto": {
"total": 0.0018446930043864995,
"count": 2,
"is_parallel": true,
"self": 0.0004180240066489205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001426668997737579,
"count": 8,
"is_parallel": true,
"self": 0.001426668997737579
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 24776.102041746977,
"count": 2012111,
"is_parallel": true,
"self": 1435.6359346336321,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1037.9718928028597,
"count": 2012111,
"is_parallel": true,
"self": 1037.9718928028597
},
"communicator.exchange": {
"total": 17808.430640130806,
"count": 2012111,
"is_parallel": true,
"self": 17808.430640130806
},
"steps_from_proto": {
"total": 4494.06357417968,
"count": 4024222,
"is_parallel": true,
"self": 863.1041507397385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3630.959423439941,
"count": 16096888,
"is_parallel": true,
"self": 3630.959423439941
}
}
}
}
},
"steps_from_proto": {
"total": 0.3171786369275651,
"count": 290,
"is_parallel": true,
"self": 0.06221032103348989,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.2549683158940752,
"count": 1160,
"is_parallel": true,
"self": 0.2549683158940752
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 42615.11164933131,
"count": 2012112,
"self": 298.4929725266047,
"children": {
"process_trajectory": {
"total": 6410.776937356939,
"count": 2012112,
"self": 6396.632048044943,
"children": {
"RLTrainer._checkpoint": {
"total": 14.14488931199594,
"count": 58,
"self": 14.14488931199594
}
}
},
"_update_policy": {
"total": 35905.841739447766,
"count": 1409,
"self": 4567.986399615293,
"children": {
"TorchPOCAOptimizer.update": {
"total": 31337.855339832473,
"count": 42285,
"self": 31337.855339832473
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.69100530911237e-06,
"count": 1,
"self": 1.69100530911237e-06
},
"TrainerController._save_models": {
"total": 0.2549014700052794,
"count": 1,
"self": 0.002151414009858854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25275005599542055,
"count": 1,
"self": 0.25275005599542055
}
}
}
}
}
}
}