ppo-Huggy / run_logs /timers.json
Liphos's picture
Huggy
1d22581
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014661312103271,
"min": 1.4014661312103271,
"max": 1.426560401916504,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70346.59375,
"min": 68747.9609375,
"max": 75939.6015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.7495361781076,
"min": 88.35714285714286,
"max": 420.47899159663865,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49453.0,
"min": 48936.0,
"max": 50167.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999294.0,
"min": 49506.0,
"max": 1999294.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999294.0,
"min": 49506.0,
"max": 1999294.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.369168758392334,
"min": 0.03171422705054283,
"max": 2.40924072265625,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1276.98193359375,
"min": 3.742278575897217,
"max": 1303.1617431640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.753542330468519,
"min": 1.6918142768791167,
"max": 3.88624769015028,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2023.159316122532,
"min": 199.63408467173576,
"max": 2067.6876316070557,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.753542330468519,
"min": 1.6918142768791167,
"max": 3.88624769015028,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2023.159316122532,
"min": 199.63408467173576,
"max": 2067.6876316070557,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013813339480111608,
"min": 0.013375715384609067,
"max": 0.02052473187407789,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.027626678960223215,
"min": 0.026751430769218133,
"max": 0.0582309822513101,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05706169034043948,
"min": 0.023028228183587393,
"max": 0.06249140202999115,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11412338068087896,
"min": 0.046056456367174786,
"max": 0.1793522235006094,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.385573538175002e-06,
"min": 4.385573538175002e-06,
"max": 0.000295268776577075,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.771147076350003e-06,
"min": 8.771147076350003e-06,
"max": 0.00084380236873255,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10146182500000003,
"min": 0.10146182500000003,
"max": 0.19842292500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20292365000000007,
"min": 0.20292365000000007,
"max": 0.5812674500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.294506749999999e-05,
"min": 8.294506749999999e-05,
"max": 0.0049213039574999996,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016589013499999997,
"min": 0.00016589013499999997,
"max": 0.014065245754999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671890432",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671892620"
},
"total": 2188.4268355060003,
"count": 1,
"self": 0.3895615390006242,
"children": {
"run_training.setup": {
"total": 0.11562870399995973,
"count": 1,
"self": 0.11562870399995973
},
"TrainerController.start_learning": {
"total": 2187.921645263,
"count": 1,
"self": 3.757933008994769,
"children": {
"TrainerController._reset_env": {
"total": 8.293625327999962,
"count": 1,
"self": 8.293625327999962
},
"TrainerController.advance": {
"total": 2175.749818007005,
"count": 231522,
"self": 4.043047716838373,
"children": {
"env_step": {
"total": 1715.705174458078,
"count": 231522,
"self": 1442.278034430934,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.92428431106987,
"count": 231522,
"self": 13.867775245100688,
"children": {
"TorchPolicy.evaluate": {
"total": 257.0565090659692,
"count": 222876,
"self": 64.20149116888365,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.85501789708553,
"count": 222876,
"self": 192.85501789708553
}
}
}
}
},
"workers": {
"total": 2.5028557160742366,
"count": 231522,
"self": 0.0,
"children": {
"worker_root": {
"total": 2180.62261862307,
"count": 231522,
"is_parallel": true,
"self": 989.7696110140716,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021279640000102518,
"count": 1,
"is_parallel": true,
"self": 0.0004187039999123954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017092600000978564,
"count": 2,
"is_parallel": true,
"self": 0.0017092600000978564
}
}
},
"UnityEnvironment.step": {
"total": 0.0284157950000008,
"count": 1,
"is_parallel": true,
"self": 0.0002632289999837667,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017422600001282262,
"count": 1,
"is_parallel": true,
"self": 0.00017422600001282262
},
"communicator.exchange": {
"total": 0.027306364999958532,
"count": 1,
"is_parallel": true,
"self": 0.027306364999958532
},
"steps_from_proto": {
"total": 0.000671975000045677,
"count": 1,
"is_parallel": true,
"self": 0.0002356130000862322,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004363619999594448,
"count": 2,
"is_parallel": true,
"self": 0.0004363619999594448
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1190.8530076089985,
"count": 231521,
"is_parallel": true,
"self": 34.13946592993034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.09993416109296,
"count": 231521,
"is_parallel": true,
"self": 76.09993416109296
},
"communicator.exchange": {
"total": 989.147176780995,
"count": 231521,
"is_parallel": true,
"self": 989.147176780995
},
"steps_from_proto": {
"total": 91.4664307369801,
"count": 231521,
"is_parallel": true,
"self": 37.2827676359658,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.183663101014304,
"count": 463042,
"is_parallel": true,
"self": 54.183663101014304
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.0015958320887,
"count": 231522,
"self": 5.848336176108546,
"children": {
"process_trajectory": {
"total": 141.4775350939807,
"count": 231522,
"self": 140.29009006298088,
"children": {
"RLTrainer._checkpoint": {
"total": 1.187445030999811,
"count": 10,
"self": 1.187445030999811
}
}
},
"_update_policy": {
"total": 308.67572456199946,
"count": 96,
"self": 256.1084050210011,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.56731954099837,
"count": 2880,
"self": 52.56731954099837
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.779998097452335e-07,
"count": 1,
"self": 8.779998097452335e-07
},
"TrainerController._save_models": {
"total": 0.12026804100014488,
"count": 1,
"self": 0.0024291929998980777,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1178388480002468,
"count": 1,
"self": 0.1178388480002468
}
}
}
}
}
}
}