ppo-Pyramid / run_logs /timers.json
prahalath27's picture
First Push
bc6d467
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8653649687767029,
"min": 0.696636438369751,
"max": 1.4100573062896729,
"count": 21
},
"Pyramids.Policy.Entropy.sum": {
"value": 26237.865234375,
"min": 21066.28515625,
"max": 42775.5,
"count": 21
},
"Pyramids.Step.mean": {
"value": 629950.0,
"min": 29952.0,
"max": 629950.0,
"count": 21
},
"Pyramids.Step.sum": {
"value": 629950.0,
"min": 29952.0,
"max": 629950.0,
"count": 21
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07417815923690796,
"min": -0.16257144510746002,
"max": -0.03592294454574585,
"count": 21
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -17.951114654541016,
"min": -38.52943420410156,
"max": -8.729275703430176,
"count": 21
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014958946965634823,
"min": 0.013271074742078781,
"max": 0.19380821287631989,
"count": 21
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.620065212249756,
"min": 3.1850578784942627,
"max": 46.901588439941406,
"count": 21
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06910294711418451,
"min": 0.06478099469286235,
"max": 0.07426592190670049,
"count": 21
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9674412595985832,
"min": 0.5035416520170228,
"max": 1.0538863375453125,
"count": 21
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0007485693214026528,
"min": 6.67143164093629e-05,
"max": 0.00487186843172953,
"count": 21
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.010479970499637139,
"min": 0.0008672861133217178,
"max": 0.05996229362685256,
"count": 21
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00023851489906646903,
"min": 0.00023851489906646903,
"max": 0.00029838354339596195,
"count": 21
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0033392085869305664,
"min": 0.0020886848037717336,
"max": 0.0038024436325188,
"count": 21
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.17950495952380954,
"min": 0.17950495952380954,
"max": 0.19946118095238097,
"count": 21
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.5130694333333334,
"min": 1.3962282666666668,
"max": 2.7674636333333336,
"count": 21
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007952545456428572,
"min": 0.007952545456428572,
"max": 0.009946171977142856,
"count": 21
},
"Pyramids.Policy.Beta.sum": {
"value": 0.11133563639,
"min": 0.06962320384,
"max": 0.12676961697,
"count": 21
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01625954546034336,
"min": 0.01625954546034336,
"max": 0.36111146211624146,
"count": 21
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22763362526893616,
"min": 0.22763362526893616,
"max": 2.527780294418335,
"count": 21
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 966.5,
"min": 882.9705882352941,
"max": 999.0,
"count": 21
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30928.0,
"min": 15984.0,
"max": 33019.0,
"count": 21
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.779668798437342,
"min": -1.0000000521540642,
"max": -0.41301769346875306,
"count": 21
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -24.949401549994946,
"min": -31.998001664876938,
"max": -14.042601577937603,
"count": 21
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.779668798437342,
"min": -1.0000000521540642,
"max": -0.41301769346875306,
"count": 21
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -24.949401549994946,
"min": -31.998001664876938,
"max": -14.042601577937603,
"count": 21
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.16407702981086913,
"min": 0.16407702981086913,
"max": 6.558580571785569,
"count": 21
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.250464953947812,
"min": 5.250464953947812,
"max": 104.9372891485691,
"count": 21
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703520289",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703521566"
},
"total": 1276.3284580929999,
"count": 1,
"self": 0.493175557000086,
"children": {
"run_training.setup": {
"total": 0.05417933700005051,
"count": 1,
"self": 0.05417933700005051
},
"TrainerController.start_learning": {
"total": 1275.7811031989997,
"count": 1,
"self": 0.8139817919957295,
"children": {
"TrainerController._reset_env": {
"total": 2.0362777450000067,
"count": 1,
"self": 2.0362777450000067
},
"TrainerController.advance": {
"total": 1272.9272560610038,
"count": 39808,
"self": 0.853851328017754,
"children": {
"env_step": {
"total": 881.918124496023,
"count": 39808,
"self": 803.3392179180603,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.088698361036,
"count": 39808,
"self": 2.8598763800455345,
"children": {
"TorchPolicy.evaluate": {
"total": 75.22882198099046,
"count": 39612,
"self": 75.22882198099046
}
}
},
"workers": {
"total": 0.49020821692670324,
"count": 39808,
"self": 0.0,
"children": {
"worker_root": {
"total": 1272.8924486930418,
"count": 39808,
"is_parallel": true,
"self": 540.2442838990144,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018245910000587173,
"count": 1,
"is_parallel": true,
"self": 0.0005732650004119932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001251325999646724,
"count": 8,
"is_parallel": true,
"self": 0.001251325999646724
}
}
},
"UnityEnvironment.step": {
"total": 0.05229469000005338,
"count": 1,
"is_parallel": true,
"self": 0.0005633579999084759,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005113400000027468,
"count": 1,
"is_parallel": true,
"self": 0.0005113400000027468
},
"communicator.exchange": {
"total": 0.049173658000199794,
"count": 1,
"is_parallel": true,
"self": 0.049173658000199794
},
"steps_from_proto": {
"total": 0.0020463339999423624,
"count": 1,
"is_parallel": true,
"self": 0.00043597400008366094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016103599998587015,
"count": 8,
"is_parallel": true,
"self": 0.0016103599998587015
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 732.6481647940275,
"count": 39807,
"is_parallel": true,
"self": 21.375438582042307,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.145237243002157,
"count": 39807,
"is_parallel": true,
"self": 15.145237243002157
},
"communicator.exchange": {
"total": 635.597081579934,
"count": 39807,
"is_parallel": true,
"self": 635.597081579934
},
"steps_from_proto": {
"total": 60.53040738904906,
"count": 39807,
"is_parallel": true,
"self": 11.691484828160128,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.83892256088893,
"count": 318456,
"is_parallel": true,
"self": 48.83892256088893
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 390.1552802369631,
"count": 39808,
"self": 1.434370235954475,
"children": {
"process_trajectory": {
"total": 75.93375816700541,
"count": 39808,
"self": 75.8342619370053,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09949623000011343,
"count": 1,
"self": 0.09949623000011343
}
}
},
"_update_policy": {
"total": 312.7871518340032,
"count": 273,
"self": 187.06933069801062,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.71782113599261,
"count": 14397,
"self": 125.71782113599261
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2840000636060722e-06,
"count": 1,
"self": 1.2840000636060722e-06
},
"TrainerController._save_models": {
"total": 0.0035863170000993705,
"count": 1,
"self": 3.375799997229478e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0035525590001270757,
"count": 1,
"self": 0.0035525590001270757
}
}
}
}
}
}
}