Iamvincent's picture
First Push
032b9a1
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1113511323928833,
"min": 1.1113511323928833,
"max": 2.853257656097412,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10684.5302734375,
"min": 10684.5302734375,
"max": 29314.369140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.931989669799805,
"min": 0.482957661151886,
"max": 13.931989669799805,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2716.738037109375,
"min": 93.69378662109375,
"max": 2827.4814453125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07096270011571308,
"min": 0.0620249980916817,
"max": 0.0791516791446067,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2838508004628523,
"min": 0.2480999923667268,
"max": 0.3740844559540753,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1474702567154286,
"min": 0.12260380780379124,
"max": 0.2874875267931059,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5898810268617144,
"min": 0.49041523121516495,
"max": 1.4374376339655295,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.522727272727273,
"min": 4.0227272727272725,
"max": 27.772727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1211.0,
"min": 177.0,
"max": 1507.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.522727272727273,
"min": 4.0227272727272725,
"max": 27.772727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1211.0,
"min": 177.0,
"max": 1507.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673618360",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673618814"
},
"total": 453.4110915460001,
"count": 1,
"self": 0.3786681260003206,
"children": {
"run_training.setup": {
"total": 0.10939629699987563,
"count": 1,
"self": 0.10939629699987563
},
"TrainerController.start_learning": {
"total": 452.9230271229999,
"count": 1,
"self": 0.5097587159752948,
"children": {
"TrainerController._reset_env": {
"total": 6.123212664000221,
"count": 1,
"self": 6.123212664000221
},
"TrainerController.advance": {
"total": 446.04182323902387,
"count": 18213,
"self": 0.2848577199870306,
"children": {
"env_step": {
"total": 445.75696551903684,
"count": 18213,
"self": 282.1225357660619,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.3690842009819,
"count": 18213,
"self": 1.3731497859666888,
"children": {
"TorchPolicy.evaluate": {
"total": 161.9959344150152,
"count": 18213,
"self": 32.92054237998036,
"children": {
"TorchPolicy.sample_actions": {
"total": 129.07539203503484,
"count": 18213,
"self": 129.07539203503484
}
}
}
}
},
"workers": {
"total": 0.2653455519930503,
"count": 18213,
"self": 0.0,
"children": {
"worker_root": {
"total": 451.59780886402996,
"count": 18213,
"is_parallel": true,
"self": 235.063955551032,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00201175600022907,
"count": 1,
"is_parallel": true,
"self": 0.0006903140001668362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001321442000062234,
"count": 10,
"is_parallel": true,
"self": 0.001321442000062234
}
}
},
"UnityEnvironment.step": {
"total": 0.04346494999981587,
"count": 1,
"is_parallel": true,
"self": 0.0004371089999040123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029429000005620765,
"count": 1,
"is_parallel": true,
"self": 0.00029429000005620765
},
"communicator.exchange": {
"total": 0.04092408199994679,
"count": 1,
"is_parallel": true,
"self": 0.04092408199994679
},
"steps_from_proto": {
"total": 0.0018094689999088587,
"count": 1,
"is_parallel": true,
"self": 0.00040874600017559715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014007229997332615,
"count": 10,
"is_parallel": true,
"self": 0.0014007229997332615
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 216.53385331299796,
"count": 18212,
"is_parallel": true,
"self": 8.43970731698846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.032522559016343,
"count": 18212,
"is_parallel": true,
"self": 5.032522559016343
},
"communicator.exchange": {
"total": 170.31760912699565,
"count": 18212,
"is_parallel": true,
"self": 170.31760912699565
},
"steps_from_proto": {
"total": 32.74401430999751,
"count": 18212,
"is_parallel": true,
"self": 6.616447349115788,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.127566960881722,
"count": 182120,
"is_parallel": true,
"self": 26.127566960881722
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.816800037588109e-05,
"count": 1,
"self": 4.816800037588109e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 443.380782900972,
"count": 316260,
"is_parallel": true,
"self": 8.34657657807793,
"children": {
"process_trajectory": {
"total": 266.51127307489105,
"count": 316260,
"is_parallel": true,
"self": 264.69162657589095,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8196464990001004,
"count": 4,
"is_parallel": true,
"self": 1.8196464990001004
}
}
},
"_update_policy": {
"total": 168.52293324800303,
"count": 90,
"is_parallel": true,
"self": 43.988932111009944,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.53400113699308,
"count": 4587,
"is_parallel": true,
"self": 124.53400113699308
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24818433600012213,
"count": 1,
"self": 0.0036526670000966988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24453166900002543,
"count": 1,
"self": 0.24453166900002543
}
}
}
}
}
}
}