poca-SoccerTwos / run_logs /timers.json
michalcisek5's picture
first model
46e575b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5255776643753052,
"min": 1.5183714628219604,
"max": 3.2957606315612793,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29046.998046875,
"min": 28250.064453125,
"max": 112223.703125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 48.94059405940594,
"min": 38.2578125,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19772.0,
"min": 16088.0,
"max": 23724.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1622.9607940978499,
"min": 1199.4106211620376,
"max": 1643.0575580540906,
"count": 990
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 327838.0804077657,
"min": 2401.7801687024275,
"max": 397508.9409572698,
"count": 990
},
"SoccerTwos.Step.mean": {
"value": 9999925.0,
"min": 9866.0,
"max": 9999925.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999925.0,
"min": 9866.0,
"max": 9999925.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04324677586555481,
"min": -0.1383940726518631,
"max": 0.2275785356760025,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.735848426818848,
"min": -31.83063507080078,
"max": 26.886112213134766,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.042128488421440125,
"min": -0.12864451110363007,
"max": 0.2209123969078064,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.509954452514648,
"min": -29.58823585510254,
"max": 27.674062728881836,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.16663960271542616,
"min": -0.6135515155214252,
"max": 0.7308911781977204,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -33.66119974851608,
"min": -73.58920013904572,
"max": 80.68219953775406,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.16663960271542616,
"min": -0.6135515155214252,
"max": 0.7308911781977204,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -33.66119974851608,
"min": -73.58920013904572,
"max": 80.68219953775406,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019484805331255,
"min": 0.010612231803437074,
"max": 0.0248067281107069,
"count": 483
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019484805331255,
"min": 0.010612231803437074,
"max": 0.0248067281107069,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11746823241313299,
"min": 2.2673500128197097e-05,
"max": 0.12794503470261892,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11746823241313299,
"min": 2.2673500128197097e-05,
"max": 0.12794503470261892,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11954111034671465,
"min": 2.245076090427271e-05,
"max": 0.13084006160497666,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11954111034671465,
"min": 2.245076090427271e-05,
"max": 0.13084006160497666,
"count": 483
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679423542",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/home/michal/Documents/unit7/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679442739"
},
"total": 19197.637692959997,
"count": 1,
"self": 0.32047661999604315,
"children": {
"run_training.setup": {
"total": 0.01722705100019084,
"count": 1,
"self": 0.01722705100019084
},
"TrainerController.start_learning": {
"total": 19197.299989289,
"count": 1,
"self": 15.622600181752205,
"children": {
"TrainerController._reset_env": {
"total": 4.727446486004737,
"count": 50,
"self": 4.727446486004737
},
"TrainerController.advance": {
"total": 19176.76755325224,
"count": 690878,
"self": 13.91997850136977,
"children": {
"env_step": {
"total": 15367.255228750888,
"count": 690878,
"self": 12678.044168482655,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2679.0532795858107,
"count": 690878,
"self": 71.85565052560742,
"children": {
"TorchPolicy.evaluate": {
"total": 2607.1976290602033,
"count": 1260278,
"self": 2607.1976290602033
}
}
},
"workers": {
"total": 10.157780682421617,
"count": 690878,
"self": 0.0,
"children": {
"worker_root": {
"total": 19168.520380427828,
"count": 690878,
"is_parallel": true,
"self": 8429.969639540843,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003757190000214905,
"count": 2,
"is_parallel": true,
"self": 0.0009251029987353832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002832087001479522,
"count": 8,
"is_parallel": true,
"self": 0.002832087001479522
}
}
},
"UnityEnvironment.step": {
"total": 0.03183516100034467,
"count": 1,
"is_parallel": true,
"self": 0.001155119999566523,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008424509996984852,
"count": 1,
"is_parallel": true,
"self": 0.0008424509996984852
},
"communicator.exchange": {
"total": 0.026815002000148525,
"count": 1,
"is_parallel": true,
"self": 0.026815002000148525
},
"steps_from_proto": {
"total": 0.0030225880009311368,
"count": 2,
"is_parallel": true,
"self": 0.0005527390021597967,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00246984899877134,
"count": 8,
"is_parallel": true,
"self": 0.00246984899877134
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 10738.416116440978,
"count": 690877,
"is_parallel": true,
"self": 698.1550593481606,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 431.094126553573,
"count": 690877,
"is_parallel": true,
"self": 431.094126553573
},
"communicator.exchange": {
"total": 7698.778584594313,
"count": 690877,
"is_parallel": true,
"self": 7698.778584594313
},
"steps_from_proto": {
"total": 1910.3883459449316,
"count": 1381754,
"is_parallel": true,
"self": 343.6807282194068,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1566.7076177255249,
"count": 5527016,
"is_parallel": true,
"self": 1566.7076177255249
}
}
}
}
},
"steps_from_proto": {
"total": 0.1346244460082744,
"count": 98,
"is_parallel": true,
"self": 0.024240914005531522,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.11038353200274287,
"count": 392,
"is_parallel": true,
"self": 0.11038353200274287
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3795.592345999982,
"count": 690878,
"self": 121.0882979637745,
"children": {
"process_trajectory": {
"total": 1541.5585749082156,
"count": 690878,
"self": 1537.6997836962182,
"children": {
"RLTrainer._checkpoint": {
"total": 3.8587912119974135,
"count": 20,
"self": 3.8587912119974135
}
}
},
"_update_policy": {
"total": 2132.945473127992,
"count": 483,
"self": 1416.046193615126,
"children": {
"TorchPOCAOptimizer.update": {
"total": 716.8992795128661,
"count": 14490,
"self": 716.8992795128661
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.610004442743957e-07,
"count": 1,
"self": 8.610004442743957e-07
},
"TrainerController._save_models": {
"total": 0.1823885080011678,
"count": 1,
"self": 0.0014588049998565111,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18092970300131128,
"count": 1,
"self": 0.18092970300131128
}
}
}
}
}
}
}