ppo-Huggy / run_logs /timers.json
kongacute's picture
Huggy
f06c854
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406204342842102,
"min": 1.406204342842102,
"max": 1.4276167154312134,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70097.8828125,
"min": 69189.9453125,
"max": 77839.0703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.85854616895874,
"min": 91.63754646840148,
"max": 401.464,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49810.0,
"min": 48855.0,
"max": 50183.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999597.0,
"min": 49909.0,
"max": 1999597.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999597.0,
"min": 49909.0,
"max": 1999597.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.390012502670288,
"min": -0.0055025615729391575,
"max": 2.404435634613037,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1216.516357421875,
"min": -0.6823176145553589,
"max": 1251.265380859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.689209478543175,
"min": 1.9693418801311524,
"max": 3.9286821022247658,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1877.807624578476,
"min": 244.1983931362629,
"max": 1972.9066988229752,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.689209478543175,
"min": 1.9693418801311524,
"max": 3.9286821022247658,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1877.807624578476,
"min": 244.1983931362629,
"max": 1972.9066988229752,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018707517991424536,
"min": 0.014190022127392392,
"max": 0.019930612045573073,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05612255397427361,
"min": 0.028380044254784784,
"max": 0.05612255397427361,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04992914117044873,
"min": 0.022213439860691627,
"max": 0.06148137768937482,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1497874235113462,
"min": 0.044426879721383254,
"max": 0.18444413306812446,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2789989070333293e-06,
"min": 3.2789989070333293e-06,
"max": 0.00029534377655207497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.836996721099988e-06,
"min": 9.836996721099988e-06,
"max": 0.0008441673186108998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10109296666666669,
"min": 0.10109296666666669,
"max": 0.19844792500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032789000000001,
"min": 0.20739265,
"max": 0.5813891,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.453903666666661e-05,
"min": 6.453903666666661e-05,
"max": 0.004922551457500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019361710999999982,
"min": 0.00019361710999999982,
"max": 0.014071316090000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675852669",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675855060"
},
"total": 2391.220288294,
"count": 1,
"self": 0.4379427169997143,
"children": {
"run_training.setup": {
"total": 0.11690351400000054,
"count": 1,
"self": 0.11690351400000054
},
"TrainerController.start_learning": {
"total": 2390.6654420630002,
"count": 1,
"self": 4.210269567931391,
"children": {
"TrainerController._reset_env": {
"total": 12.510547397000039,
"count": 1,
"self": 12.510547397000039
},
"TrainerController.advance": {
"total": 2373.8117115170685,
"count": 231826,
"self": 4.710385647922067,
"children": {
"env_step": {
"total": 1846.370662411045,
"count": 231826,
"self": 1544.1890845290611,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.4296085330621,
"count": 231826,
"self": 15.825889788038296,
"children": {
"TorchPolicy.evaluate": {
"total": 283.6037187450238,
"count": 222874,
"self": 70.82066009892424,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.78305864609956,
"count": 222874,
"self": 212.78305864609956
}
}
}
}
},
"workers": {
"total": 2.7519693489218184,
"count": 231826,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.154821192921,
"count": 231826,
"is_parallel": true,
"self": 1123.0488062360541,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006154170999991493,
"count": 1,
"is_parallel": true,
"self": 0.0003571490000240374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0057970219999674555,
"count": 2,
"is_parallel": true,
"self": 0.0057970219999674555
}
}
},
"UnityEnvironment.step": {
"total": 0.02795126899997058,
"count": 1,
"is_parallel": true,
"self": 0.000349717999938548,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019868300000780437,
"count": 1,
"is_parallel": true,
"self": 0.00019868300000780437
},
"communicator.exchange": {
"total": 0.02666110400002708,
"count": 1,
"is_parallel": true,
"self": 0.02666110400002708
},
"steps_from_proto": {
"total": 0.000741763999997147,
"count": 1,
"is_parallel": true,
"self": 0.00025909099997534213,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004826730000218049,
"count": 2,
"is_parallel": true,
"self": 0.0004826730000218049
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1259.106014956867,
"count": 231825,
"is_parallel": true,
"self": 39.038242989091714,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.78630707290995,
"count": 231825,
"is_parallel": true,
"self": 78.78630707290995
},
"communicator.exchange": {
"total": 1044.318062866896,
"count": 231825,
"is_parallel": true,
"self": 1044.318062866896
},
"steps_from_proto": {
"total": 96.96340202796938,
"count": 231825,
"is_parallel": true,
"self": 37.75331466292835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.21008736504103,
"count": 463650,
"is_parallel": true,
"self": 59.21008736504103
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 522.7306634581016,
"count": 231826,
"self": 6.564328927055612,
"children": {
"process_trajectory": {
"total": 162.11563847104787,
"count": 231826,
"self": 160.8711598560485,
"children": {
"RLTrainer._checkpoint": {
"total": 1.244478614999366,
"count": 10,
"self": 1.244478614999366
}
}
},
"_update_policy": {
"total": 354.0506960599982,
"count": 97,
"self": 295.88804882599186,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.16264723400633,
"count": 2910,
"self": 58.16264723400633
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2059999789926223e-06,
"count": 1,
"self": 1.2059999789926223e-06
},
"TrainerController._save_models": {
"total": 0.13291237500015995,
"count": 1,
"self": 0.002360478000355215,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13055189699980474,
"count": 1,
"self": 0.13055189699980474
}
}
}
}
}
}
}