ppo-Huggy / run_logs /timers.json
ElementBrawler's picture
Huggy
4f34f2a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404979944229126,
"min": 1.404979944229126,
"max": 1.4287396669387817,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69493.1171875,
"min": 69493.1171875,
"max": 76242.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 75.82208588957056,
"min": 71.20953757225433,
"max": 370.3037037037037,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49436.0,
"min": 49247.0,
"max": 49991.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49532.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49532.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4752533435821533,
"min": 0.16101893782615662,
"max": 2.5439491271972656,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1613.865234375,
"min": 21.5765380859375,
"max": 1737.81591796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.846790493814492,
"min": 1.8260335653130688,
"max": 4.10094083582979,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2508.1074019670486,
"min": 244.68849775195122,
"max": 2710.4925855994225,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.846790493814492,
"min": 1.8260335653130688,
"max": 4.10094083582979,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2508.1074019670486,
"min": 244.68849775195122,
"max": 2710.4925855994225,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015302463580155745,
"min": 0.01483008829333509,
"max": 0.01941269134792189,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045907390740467235,
"min": 0.02966017658667018,
"max": 0.058238074043765665,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06037283866769738,
"min": 0.02137228480229775,
"max": 0.060926961712539196,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18111851600309214,
"min": 0.0427445696045955,
"max": 0.18111851600309214,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.9049486983833365e-06,
"min": 3.9049486983833365e-06,
"max": 0.000295333876555375,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1714846095150009e-05,
"min": 1.1714846095150009e-05,
"max": 0.0008442123185959,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1013016166666667,
"min": 0.1013016166666667,
"max": 0.198444625,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039048500000001,
"min": 0.20773444999999996,
"max": 0.5814040999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.495067166666674e-05,
"min": 7.495067166666674e-05,
"max": 0.0049223867875,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002248520150000002,
"min": 0.0002248520150000002,
"max": 0.014072064589999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677560731",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677563189"
},
"total": 2458.5205187,
"count": 1,
"self": 0.6363257870002599,
"children": {
"run_training.setup": {
"total": 0.11137853100001394,
"count": 1,
"self": 0.11137853100001394
},
"TrainerController.start_learning": {
"total": 2457.772814382,
"count": 1,
"self": 4.595930622915603,
"children": {
"TrainerController._reset_env": {
"total": 10.554390768999951,
"count": 1,
"self": 10.554390768999951
},
"TrainerController.advance": {
"total": 2442.4486832970842,
"count": 233586,
"self": 4.641318486043929,
"children": {
"env_step": {
"total": 1905.411947846933,
"count": 233586,
"self": 1585.2545595169504,
"children": {
"SubprocessEnvManager._take_step": {
"total": 317.28821042511527,
"count": 233586,
"self": 16.360977292074608,
"children": {
"TorchPolicy.evaluate": {
"total": 300.92723313304066,
"count": 222919,
"self": 74.40848567509818,
"children": {
"TorchPolicy.sample_actions": {
"total": 226.51874745794248,
"count": 222919,
"self": 226.51874745794248
}
}
}
}
},
"workers": {
"total": 2.8691779048673425,
"count": 233586,
"self": 0.0,
"children": {
"worker_root": {
"total": 2448.6126408110727,
"count": 233586,
"is_parallel": true,
"self": 1165.3015958881049,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008981450000646873,
"count": 1,
"is_parallel": true,
"self": 0.0003659790000938301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005321659999708572,
"count": 2,
"is_parallel": true,
"self": 0.0005321659999708572
}
}
},
"UnityEnvironment.step": {
"total": 0.05258688699996128,
"count": 1,
"is_parallel": true,
"self": 0.0003072900000233858,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008906389999765452,
"count": 1,
"is_parallel": true,
"self": 0.0008906389999765452
},
"communicator.exchange": {
"total": 0.04835272499997245,
"count": 1,
"is_parallel": true,
"self": 0.04835272499997245
},
"steps_from_proto": {
"total": 0.003036232999988897,
"count": 1,
"is_parallel": true,
"self": 0.00034246900008838566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026937639999005114,
"count": 2,
"is_parallel": true,
"self": 0.0026937639999005114
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1283.3110449229678,
"count": 233585,
"is_parallel": true,
"self": 38.45308878297101,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.20524881093309,
"count": 233585,
"is_parallel": true,
"self": 82.20524881093309
},
"communicator.exchange": {
"total": 1068.5163669240915,
"count": 233585,
"is_parallel": true,
"self": 1068.5163669240915
},
"steps_from_proto": {
"total": 94.13634040497232,
"count": 233585,
"is_parallel": true,
"self": 40.32420034501479,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.81214005995753,
"count": 467170,
"is_parallel": true,
"self": 53.81214005995753
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 532.3954169641075,
"count": 233586,
"self": 6.9149789610630705,
"children": {
"process_trajectory": {
"total": 175.53024831204516,
"count": 233586,
"self": 174.24176740404505,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2884809080001105,
"count": 10,
"self": 1.2884809080001105
}
}
},
"_update_policy": {
"total": 349.9501896909993,
"count": 97,
"self": 292.01122274399756,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.938966947001745,
"count": 2910,
"self": 57.938966947001745
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9050003174925223e-06,
"count": 1,
"self": 1.9050003174925223e-06
},
"TrainerController._save_models": {
"total": 0.17380778799997643,
"count": 1,
"self": 0.0028479600000537175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1709598279999227,
"count": 1,
"self": 0.1709598279999227
}
}
}
}
}
}
}