María Navas Loro
First try
3fbe73e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9305831789970398,
"min": 0.9305831789970398,
"max": 2.847496509552002,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8885.2080078125,
"min": 8885.2080078125,
"max": 29129.888671875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.308141708374023,
"min": 0.4374871253967285,
"max": 13.308141708374023,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2595.087646484375,
"min": 84.87250518798828,
"max": 2701.822265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06894930970356297,
"min": 0.06321284798985147,
"max": 0.07603551310025354,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2757972388142519,
"min": 0.2564540574710597,
"max": 0.3801775655012677,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1936516881573434,
"min": 0.12349805030538061,
"max": 0.30410263205275817,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7746067526293736,
"min": 0.49399220122152243,
"max": 1.520513160263791,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.347009730600001e-05,
"min": 1.347009730600001e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400004e-05,
"min": 5.388038922400004e-05,
"max": 0.00230860003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10404100000000001,
"min": 0.10404100000000001,
"max": 0.24594099999999997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41616400000000003,
"min": 0.41616400000000003,
"max": 1.19258,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002252506000000002,
"min": 0.0002252506000000002,
"max": 0.0077837906,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0009010024000000008,
"min": 0.0009010024000000008,
"max": 0.036941428000000005,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.09090909090909,
"min": 3.5454545454545454,
"max": 26.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1148.0,
"min": 156.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.09090909090909,
"min": 3.5454545454545454,
"max": 26.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1148.0,
"min": 156.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673432814",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673433257"
},
"total": 442.97319316000005,
"count": 1,
"self": 0.38753719199996794,
"children": {
"run_training.setup": {
"total": 0.1057847640000773,
"count": 1,
"self": 0.1057847640000773
},
"TrainerController.start_learning": {
"total": 442.479871204,
"count": 1,
"self": 0.5849504949945867,
"children": {
"TrainerController._reset_env": {
"total": 6.9985603129999845,
"count": 1,
"self": 6.9985603129999845
},
"TrainerController.advance": {
"total": 434.7689509010055,
"count": 18201,
"self": 0.30195454200850236,
"children": {
"env_step": {
"total": 434.466996358997,
"count": 18201,
"self": 283.84284018099834,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.33078861700812,
"count": 18201,
"self": 1.4280259020138146,
"children": {
"TorchPolicy.evaluate": {
"total": 148.9027627149943,
"count": 18201,
"self": 33.47368812198306,
"children": {
"TorchPolicy.sample_actions": {
"total": 115.42907459301125,
"count": 18201,
"self": 115.42907459301125
}
}
}
}
},
"workers": {
"total": 0.2933675609905322,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 441.1979710750022,
"count": 18201,
"is_parallel": true,
"self": 212.089190817008,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007206913999993958,
"count": 1,
"is_parallel": true,
"self": 0.004282828999976118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029240850000178398,
"count": 10,
"is_parallel": true,
"self": 0.0029240850000178398
}
}
},
"UnityEnvironment.step": {
"total": 0.033741344000077333,
"count": 1,
"is_parallel": true,
"self": 0.00038271599999006867,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004589039999700617,
"count": 1,
"is_parallel": true,
"self": 0.0004589039999700617
},
"communicator.exchange": {
"total": 0.030927683000072648,
"count": 1,
"is_parallel": true,
"self": 0.030927683000072648
},
"steps_from_proto": {
"total": 0.0019720410000445554,
"count": 1,
"is_parallel": true,
"self": 0.0004391550000946154,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00153288599994994,
"count": 10,
"is_parallel": true,
"self": 0.00153288599994994
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 229.10878025799423,
"count": 18200,
"is_parallel": true,
"self": 8.712622355014219,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.382485823986826,
"count": 18200,
"is_parallel": true,
"self": 5.382485823986826
},
"communicator.exchange": {
"total": 182.69545027197717,
"count": 18200,
"is_parallel": true,
"self": 182.69545027197717
},
"steps_from_proto": {
"total": 32.318221807016016,
"count": 18200,
"is_parallel": true,
"self": 7.000705867989382,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.317515939026634,
"count": 182000,
"is_parallel": true,
"self": 25.317515939026634
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.668699989451852e-05,
"count": 1,
"self": 4.668699989451852e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 431.6571568429612,
"count": 356482,
"is_parallel": true,
"self": 9.66980314497971,
"children": {
"process_trajectory": {
"total": 246.83052933098213,
"count": 356482,
"is_parallel": true,
"self": 246.03864846798206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.791880863000074,
"count": 4,
"is_parallel": true,
"self": 0.791880863000074
}
}
},
"_update_policy": {
"total": 175.15682436699933,
"count": 90,
"is_parallel": true,
"self": 44.898335928997994,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.25848843800134,
"count": 4587,
"is_parallel": true,
"self": 130.25848843800134
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12736280800004351,
"count": 1,
"self": 0.0008446280000953266,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1265181799999482,
"count": 1,
"self": 0.1265181799999482
}
}
}
}
}
}
}