ppo-pyramidsv1 / run_logs /timers.json
propet's picture
pyramids v1
d1d1325
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2807066738605499,
"min": 0.2807066738605499,
"max": 1.3785403966903687,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8416.708984375,
"min": 8416.708984375,
"max": 41819.40234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29902.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29902.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5903724431991577,
"min": -0.08363796770572662,
"max": 0.6943753361701965,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.12353515625,
"min": -20.07311248779297,
"max": 200.67446899414062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026276985183358192,
"min": 0.0030761195812374353,
"max": 0.4331190586090088,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.305001735687256,
"min": 0.8274761438369751,
"max": 102.64921569824219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06582811953871985,
"min": 0.06516470028214999,
"max": 0.07245532434397897,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.921593673542078,
"min": 0.5026240653136609,
"max": 1.0777334175586208,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014429506556486713,
"min": 0.0004922993454828314,
"max": 0.017590257472225597,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20201309179081398,
"min": 0.004430694109345483,
"max": 0.24626360461115834,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.540533200807144e-06,
"min": 7.540533200807144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010556746481130002,
"min": 0.00010556746481130002,
"max": 0.0032573925142026,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251347857142858,
"min": 0.10251347857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351887,
"min": 1.3886848,
"max": 2.4857974000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026109650928571434,
"min": 0.00026109650928571434,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036553511300000006,
"min": 0.0036553511300000006,
"max": 0.10861116025999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014858576469123363,
"min": 0.014858576469123363,
"max": 0.7446269989013672,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20802007615566254,
"min": 0.20802007615566254,
"max": 5.21238899230957,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 288.6185567010309,
"min": 278.0,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27996.0,
"min": 16829.0,
"max": 32201.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6265728971144806,
"min": -0.9999871489501768,
"max": 1.702578624330678,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.15099812299013,
"min": -31.99920167028904,
"max": 186.70099716633558,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6265728971144806,
"min": -0.9999871489501768,
"max": 1.702578624330678,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.15099812299013,
"min": -31.99920167028904,
"max": 186.70099716633558,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04452478174365145,
"min": 0.04452478174365145,
"max": 14.489005896098474,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.274379047390539,
"min": 4.274379047390539,
"max": 246.31310023367405,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678970031",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678972312"
},
"total": 2281.0831764779996,
"count": 1,
"self": 0.4917932069993185,
"children": {
"run_training.setup": {
"total": 0.17520433299978322,
"count": 1,
"self": 0.17520433299978322
},
"TrainerController.start_learning": {
"total": 2280.4161789380005,
"count": 1,
"self": 1.4639609029768508,
"children": {
"TrainerController._reset_env": {
"total": 7.994707173999814,
"count": 1,
"self": 7.994707173999814
},
"TrainerController.advance": {
"total": 2270.8678907210237,
"count": 63996,
"self": 1.5147190421225787,
"children": {
"env_step": {
"total": 1617.6987452108656,
"count": 63996,
"self": 1502.2290419068827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.58311953106886,
"count": 63996,
"self": 5.050820741978896,
"children": {
"TorchPolicy.evaluate": {
"total": 109.53229878908996,
"count": 62554,
"self": 109.53229878908996
}
}
},
"workers": {
"total": 0.8865837729140367,
"count": 63996,
"self": 0.0,
"children": {
"worker_root": {
"total": 2275.3220508680847,
"count": 63996,
"is_parallel": true,
"self": 894.4157313070355,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00265867200005232,
"count": 1,
"is_parallel": true,
"self": 0.0009147169998868776,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017439550001654425,
"count": 8,
"is_parallel": true,
"self": 0.0017439550001654425
}
}
},
"UnityEnvironment.step": {
"total": 0.04867095600002358,
"count": 1,
"is_parallel": true,
"self": 0.0005179599997973128,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004727870000351686,
"count": 1,
"is_parallel": true,
"self": 0.0004727870000351686
},
"communicator.exchange": {
"total": 0.04597873600005187,
"count": 1,
"is_parallel": true,
"self": 0.04597873600005187
},
"steps_from_proto": {
"total": 0.0017014730001392309,
"count": 1,
"is_parallel": true,
"self": 0.00038074600024629035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013207269998929405,
"count": 8,
"is_parallel": true,
"self": 0.0013207269998929405
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1380.9063195610493,
"count": 63995,
"is_parallel": true,
"self": 31.27054110609606,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.63493371599634,
"count": 63995,
"is_parallel": true,
"self": 23.63493371599634
},
"communicator.exchange": {
"total": 1230.9208444089268,
"count": 63995,
"is_parallel": true,
"self": 1230.9208444089268
},
"steps_from_proto": {
"total": 95.08000033003009,
"count": 63995,
"is_parallel": true,
"self": 20.697079896788637,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.38292043324145,
"count": 511960,
"is_parallel": true,
"self": 74.38292043324145
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 651.6544264680356,
"count": 63996,
"self": 2.690925970047374,
"children": {
"process_trajectory": {
"total": 127.45521108198409,
"count": 63996,
"self": 127.18534352098413,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26986756099995546,
"count": 2,
"self": 0.26986756099995546
}
}
},
"_update_policy": {
"total": 521.5082894160041,
"count": 447,
"self": 333.6191637679849,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.88912564801922,
"count": 22836,
"self": 187.88912564801922
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2669997886405326e-06,
"count": 1,
"self": 1.2669997886405326e-06
},
"TrainerController._save_models": {
"total": 0.08961887300029048,
"count": 1,
"self": 0.0014015870001458097,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08821728600014467,
"count": 1,
"self": 0.08821728600014467
}
}
}
}
}
}
}