Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .summary/0/events.out.tfevents.1733610845.anne-hackaway +3 -0
- README.md +56 -0
- checkpoint_p0/best_000000772_3162112_reward_27.345.pth +3 -0
- checkpoint_p0/checkpoint_000000597_2445312.pth +3 -0
- checkpoint_p0/checkpoint_000000978_4005888.pth +3 -0
- config.json +142 -0
- replay.mp4 +3 -0
- sf_log.txt +684 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
replay.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1733610845.anne-hackaway
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c68d0ef779fba2176f6062c4d377d995b16fad6a762482378daedd0dcef83dd4
|
3 |
+
size 178181
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
model-index:
|
8 |
+
- name: APPO
|
9 |
+
results:
|
10 |
+
- task:
|
11 |
+
type: reinforcement-learning
|
12 |
+
name: reinforcement-learning
|
13 |
+
dataset:
|
14 |
+
name: doom_health_gathering_supreme
|
15 |
+
type: doom_health_gathering_supreme
|
16 |
+
metrics:
|
17 |
+
- type: mean_reward
|
18 |
+
value: 9.26 +/- 3.64
|
19 |
+
name: mean_reward
|
20 |
+
verified: false
|
21 |
+
---
|
22 |
+
|
23 |
+
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
|
24 |
+
|
25 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
26 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
27 |
+
|
28 |
+
|
29 |
+
## Downloading the model
|
30 |
+
|
31 |
+
After installing Sample-Factory, download the model with:
|
32 |
+
```
|
33 |
+
python -m sample_factory.huggingface.load_from_hub -r rahatchd/rl_course_vizdoom_health_gathering_supreme
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Using the model
|
38 |
+
|
39 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
40 |
+
```
|
41 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
46 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
47 |
+
|
48 |
+
## Training with this model
|
49 |
+
|
50 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
51 |
+
```
|
52 |
+
python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
|
53 |
+
```
|
54 |
+
|
55 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
56 |
+
|
checkpoint_p0/best_000000772_3162112_reward_27.345.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d8e625029d33f15f0e7532db24d3216e76835b42f57e98b81bb644445e0170f
|
3 |
+
size 34929051
|
checkpoint_p0/checkpoint_000000597_2445312.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99bd23c65514c90b23ba61f9f656d13b4d5a68e7874f60e591e7d1c2f3daee5e
|
3 |
+
size 34929541
|
checkpoint_p0/checkpoint_000000978_4005888.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3acc9971f7b95b9c1771bb994c387d39bf679edec2fe807449ccf824f4fd7b2d
|
3 |
+
size 34929477
|
config.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "doom_health_gathering_supreme",
|
5 |
+
"experiment": "default_experiment",
|
6 |
+
"train_dir": "/home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": null,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 1000,
|
18 |
+
"num_workers": 8,
|
19 |
+
"num_envs_per_worker": 4,
|
20 |
+
"batch_size": 1024,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 32,
|
24 |
+
"recurrence": 32,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.001,
|
32 |
+
"value_loss_coeff": 0.5,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"exploration_loss": "symmetric_kl",
|
35 |
+
"gae_lambda": 0.95,
|
36 |
+
"ppo_clip_ratio": 0.1,
|
37 |
+
"ppo_clip_value": 0.2,
|
38 |
+
"with_vtrace": false,
|
39 |
+
"vtrace_rho": 1.0,
|
40 |
+
"vtrace_c": 1.0,
|
41 |
+
"optimizer": "adam",
|
42 |
+
"adam_eps": 1e-06,
|
43 |
+
"adam_beta1": 0.9,
|
44 |
+
"adam_beta2": 0.999,
|
45 |
+
"max_grad_norm": 4.0,
|
46 |
+
"learning_rate": 0.0001,
|
47 |
+
"lr_schedule": "constant",
|
48 |
+
"lr_schedule_kl_threshold": 0.008,
|
49 |
+
"lr_adaptive_min": 1e-06,
|
50 |
+
"lr_adaptive_max": 0.01,
|
51 |
+
"obs_subtract_mean": 0.0,
|
52 |
+
"obs_scale": 255.0,
|
53 |
+
"normalize_input": true,
|
54 |
+
"normalize_input_keys": null,
|
55 |
+
"decorrelate_experience_max_seconds": 0,
|
56 |
+
"decorrelate_envs_on_one_worker": true,
|
57 |
+
"actor_worker_gpus": [],
|
58 |
+
"set_workers_cpu_affinity": true,
|
59 |
+
"force_envs_single_thread": false,
|
60 |
+
"default_niceness": 0,
|
61 |
+
"log_to_file": true,
|
62 |
+
"experiment_summaries_interval": 10,
|
63 |
+
"flush_summaries_interval": 30,
|
64 |
+
"stats_avg": 100,
|
65 |
+
"summaries_use_frameskip": true,
|
66 |
+
"heartbeat_interval": 20,
|
67 |
+
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4000000,
|
69 |
+
"train_for_seconds": 10000000000,
|
70 |
+
"save_every_sec": 120,
|
71 |
+
"keep_checkpoints": 2,
|
72 |
+
"load_checkpoint_kind": "latest",
|
73 |
+
"save_milestones_sec": -1,
|
74 |
+
"save_best_every_sec": 5,
|
75 |
+
"save_best_metric": "reward",
|
76 |
+
"save_best_after": 100000,
|
77 |
+
"benchmark": false,
|
78 |
+
"encoder_mlp_layers": [
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"encoder_conv_architecture": "convnet_simple",
|
83 |
+
"encoder_conv_mlp_layers": [
|
84 |
+
512
|
85 |
+
],
|
86 |
+
"use_rnn": true,
|
87 |
+
"rnn_size": 512,
|
88 |
+
"rnn_type": "gru",
|
89 |
+
"rnn_num_layers": 1,
|
90 |
+
"decoder_mlp_layers": [],
|
91 |
+
"nonlinearity": "elu",
|
92 |
+
"policy_initialization": "orthogonal",
|
93 |
+
"policy_init_gain": 1.0,
|
94 |
+
"actor_critic_share_weights": true,
|
95 |
+
"adaptive_stddev": true,
|
96 |
+
"continuous_tanh_scale": 0.0,
|
97 |
+
"initial_stddev": 1.0,
|
98 |
+
"use_env_info_cache": false,
|
99 |
+
"env_gpu_actions": false,
|
100 |
+
"env_gpu_observations": true,
|
101 |
+
"env_frameskip": 4,
|
102 |
+
"env_framestack": 1,
|
103 |
+
"pixel_format": "CHW",
|
104 |
+
"use_record_episode_statistics": false,
|
105 |
+
"with_wandb": false,
|
106 |
+
"wandb_user": null,
|
107 |
+
"wandb_project": "sample_factory",
|
108 |
+
"wandb_group": null,
|
109 |
+
"wandb_job_type": "SF",
|
110 |
+
"wandb_tags": [],
|
111 |
+
"with_pbt": false,
|
112 |
+
"pbt_mix_policies_in_one_env": true,
|
113 |
+
"pbt_period_env_steps": 5000000,
|
114 |
+
"pbt_start_mutation": 20000000,
|
115 |
+
"pbt_replace_fraction": 0.3,
|
116 |
+
"pbt_mutation_rate": 0.15,
|
117 |
+
"pbt_replace_reward_gap": 0.1,
|
118 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
119 |
+
"pbt_optimize_gamma": false,
|
120 |
+
"pbt_target_objective": "true_objective",
|
121 |
+
"pbt_perturb_min": 1.1,
|
122 |
+
"pbt_perturb_max": 1.5,
|
123 |
+
"num_agents": -1,
|
124 |
+
"num_humans": 0,
|
125 |
+
"num_bots": -1,
|
126 |
+
"start_bot_difficulty": null,
|
127 |
+
"timelimit": null,
|
128 |
+
"res_w": 128,
|
129 |
+
"res_h": 72,
|
130 |
+
"wide_aspect_ratio": false,
|
131 |
+
"eval_env_frameskip": 1,
|
132 |
+
"fps": 35,
|
133 |
+
"command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
|
134 |
+
"cli_args": {
|
135 |
+
"env": "doom_health_gathering_supreme",
|
136 |
+
"num_workers": 8,
|
137 |
+
"num_envs_per_worker": 4,
|
138 |
+
"train_for_env_steps": 4000000
|
139 |
+
},
|
140 |
+
"git_hash": "unknown",
|
141 |
+
"git_repo_name": "not a git repository"
|
142 |
+
}
|
replay.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:771a84f36fff5a9f6c0f347eb2570410f7c1f0c413dbb8fe45b841c6ac9e0482
|
3 |
+
size 17446371
|
sf_log.txt
ADDED
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-12-07 14:34:08,376][19013] Saving configuration to /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/config.json...
|
2 |
+
[2024-12-07 14:34:08,376][19013] Rollout worker 0 uses device cpu
|
3 |
+
[2024-12-07 14:34:08,377][19013] Rollout worker 1 uses device cpu
|
4 |
+
[2024-12-07 14:34:08,377][19013] Rollout worker 2 uses device cpu
|
5 |
+
[2024-12-07 14:34:08,377][19013] Rollout worker 3 uses device cpu
|
6 |
+
[2024-12-07 14:34:08,378][19013] Rollout worker 4 uses device cpu
|
7 |
+
[2024-12-07 14:34:08,378][19013] Rollout worker 5 uses device cpu
|
8 |
+
[2024-12-07 14:34:08,378][19013] Rollout worker 6 uses device cpu
|
9 |
+
[2024-12-07 14:34:08,379][19013] Rollout worker 7 uses device cpu
|
10 |
+
[2024-12-07 14:34:08,444][19013] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
11 |
+
[2024-12-07 14:34:08,444][19013] InferenceWorker_p0-w0: min num requests: 2
|
12 |
+
[2024-12-07 14:34:08,474][19013] Starting all processes...
|
13 |
+
[2024-12-07 14:34:08,474][19013] Starting process learner_proc0
|
14 |
+
[2024-12-07 14:34:08,524][19013] Starting all processes...
|
15 |
+
[2024-12-07 14:34:08,528][19013] Starting process inference_proc0-0
|
16 |
+
[2024-12-07 14:34:08,528][19013] Starting process rollout_proc0
|
17 |
+
[2024-12-07 14:34:08,528][19013] Starting process rollout_proc1
|
18 |
+
[2024-12-07 14:34:08,529][19013] Starting process rollout_proc2
|
19 |
+
[2024-12-07 14:34:08,529][19013] Starting process rollout_proc3
|
20 |
+
[2024-12-07 14:34:08,529][19013] Starting process rollout_proc4
|
21 |
+
[2024-12-07 14:34:08,530][19013] Starting process rollout_proc5
|
22 |
+
[2024-12-07 14:34:08,530][19013] Starting process rollout_proc6
|
23 |
+
[2024-12-07 14:34:08,530][19013] Starting process rollout_proc7
|
24 |
+
[2024-12-07 14:34:09,922][23822] Worker 3 uses CPU cores [9, 10, 11]
|
25 |
+
[2024-12-07 14:34:09,956][23826] Worker 7 uses CPU cores [21, 22, 23]
|
26 |
+
[2024-12-07 14:34:09,973][23806] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
27 |
+
[2024-12-07 14:34:09,973][23806] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
28 |
+
[2024-12-07 14:34:09,975][23824] Worker 5 uses CPU cores [15, 16, 17]
|
29 |
+
[2024-12-07 14:34:09,976][23827] Worker 6 uses CPU cores [18, 19, 20]
|
30 |
+
[2024-12-07 14:34:09,988][23806] Num visible devices: 1
|
31 |
+
[2024-12-07 14:34:09,994][23825] Worker 2 uses CPU cores [6, 7, 8]
|
32 |
+
[2024-12-07 14:34:09,996][23820] Worker 0 uses CPU cores [0, 1, 2]
|
33 |
+
[2024-12-07 14:34:10,005][23806] Starting seed is not provided
|
34 |
+
[2024-12-07 14:34:10,005][23806] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
35 |
+
[2024-12-07 14:34:10,006][23806] Initializing actor-critic model on device cuda:0
|
36 |
+
[2024-12-07 14:34:10,006][23806] RunningMeanStd input shape: (3, 72, 128)
|
37 |
+
[2024-12-07 14:34:10,007][23806] RunningMeanStd input shape: (1,)
|
38 |
+
[2024-12-07 14:34:10,015][23806] ConvEncoder: input_channels=3
|
39 |
+
[2024-12-07 14:34:10,022][23821] Worker 1 uses CPU cores [3, 4, 5]
|
40 |
+
[2024-12-07 14:34:10,045][23823] Worker 4 uses CPU cores [12, 13, 14]
|
41 |
+
[2024-12-07 14:34:10,090][23819] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
42 |
+
[2024-12-07 14:34:10,091][23819] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
43 |
+
[2024-12-07 14:34:10,105][23819] Num visible devices: 1
|
44 |
+
[2024-12-07 14:34:10,112][23806] Conv encoder output size: 512
|
45 |
+
[2024-12-07 14:34:10,113][23806] Policy head output size: 512
|
46 |
+
[2024-12-07 14:34:10,131][23806] Created Actor Critic model with architecture:
|
47 |
+
[2024-12-07 14:34:10,131][23806] ActorCriticSharedWeights(
|
48 |
+
(obs_normalizer): ObservationNormalizer(
|
49 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
50 |
+
(running_mean_std): ModuleDict(
|
51 |
+
(obs): RunningMeanStdInPlace()
|
52 |
+
)
|
53 |
+
)
|
54 |
+
)
|
55 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
56 |
+
(encoder): VizdoomEncoder(
|
57 |
+
(basic_encoder): ConvEncoder(
|
58 |
+
(enc): RecursiveScriptModule(
|
59 |
+
original_name=ConvEncoderImpl
|
60 |
+
(conv_head): RecursiveScriptModule(
|
61 |
+
original_name=Sequential
|
62 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
63 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
64 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
65 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
66 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
67 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
68 |
+
)
|
69 |
+
(mlp_layers): RecursiveScriptModule(
|
70 |
+
original_name=Sequential
|
71 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
72 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
73 |
+
)
|
74 |
+
)
|
75 |
+
)
|
76 |
+
)
|
77 |
+
(core): ModelCoreRNN(
|
78 |
+
(core): GRU(512, 512)
|
79 |
+
)
|
80 |
+
(decoder): MlpDecoder(
|
81 |
+
(mlp): Identity()
|
82 |
+
)
|
83 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
84 |
+
(action_parameterization): ActionParameterizationDefault(
|
85 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
86 |
+
)
|
87 |
+
)
|
88 |
+
[2024-12-07 14:34:10,276][23806] Using optimizer <class 'torch.optim.adam.Adam'>
|
89 |
+
[2024-12-07 14:34:11,044][23806] No checkpoints found
|
90 |
+
[2024-12-07 14:34:11,044][23806] Did not load from checkpoint, starting from scratch!
|
91 |
+
[2024-12-07 14:34:11,044][23806] Initialized policy 0 weights for model version 0
|
92 |
+
[2024-12-07 14:34:11,046][23806] LearnerWorker_p0 finished initialization!
|
93 |
+
[2024-12-07 14:34:11,046][23806] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
94 |
+
[2024-12-07 14:34:11,160][23819] RunningMeanStd input shape: (3, 72, 128)
|
95 |
+
[2024-12-07 14:34:11,160][23819] RunningMeanStd input shape: (1,)
|
96 |
+
[2024-12-07 14:34:11,167][23819] ConvEncoder: input_channels=3
|
97 |
+
[2024-12-07 14:34:11,230][23819] Conv encoder output size: 512
|
98 |
+
[2024-12-07 14:34:11,230][23819] Policy head output size: 512
|
99 |
+
[2024-12-07 14:34:11,255][19013] Inference worker 0-0 is ready!
|
100 |
+
[2024-12-07 14:34:11,256][19013] All inference workers are ready! Signal rollout workers to start!
|
101 |
+
[2024-12-07 14:34:11,298][23820] Doom resolution: 160x120, resize resolution: (128, 72)
|
102 |
+
[2024-12-07 14:34:11,298][23823] Doom resolution: 160x120, resize resolution: (128, 72)
|
103 |
+
[2024-12-07 14:34:11,300][23821] Doom resolution: 160x120, resize resolution: (128, 72)
|
104 |
+
[2024-12-07 14:34:11,300][23824] Doom resolution: 160x120, resize resolution: (128, 72)
|
105 |
+
[2024-12-07 14:34:11,301][23822] Doom resolution: 160x120, resize resolution: (128, 72)
|
106 |
+
[2024-12-07 14:34:11,301][23826] Doom resolution: 160x120, resize resolution: (128, 72)
|
107 |
+
[2024-12-07 14:34:11,304][23825] Doom resolution: 160x120, resize resolution: (128, 72)
|
108 |
+
[2024-12-07 14:34:11,306][23827] Doom resolution: 160x120, resize resolution: (128, 72)
|
109 |
+
[2024-12-07 14:34:11,542][23824] Decorrelating experience for 0 frames...
|
110 |
+
[2024-12-07 14:34:11,542][23826] Decorrelating experience for 0 frames...
|
111 |
+
[2024-12-07 14:34:11,542][23820] Decorrelating experience for 0 frames...
|
112 |
+
[2024-12-07 14:34:11,543][23827] Decorrelating experience for 0 frames...
|
113 |
+
[2024-12-07 14:34:11,749][23827] Decorrelating experience for 32 frames...
|
114 |
+
[2024-12-07 14:34:11,749][23824] Decorrelating experience for 32 frames...
|
115 |
+
[2024-12-07 14:34:11,750][23826] Decorrelating experience for 32 frames...
|
116 |
+
[2024-12-07 14:34:11,788][23825] Decorrelating experience for 0 frames...
|
117 |
+
[2024-12-07 14:34:11,791][23823] Decorrelating experience for 0 frames...
|
118 |
+
[2024-12-07 14:34:11,993][23825] Decorrelating experience for 32 frames...
|
119 |
+
[2024-12-07 14:34:11,999][23823] Decorrelating experience for 32 frames...
|
120 |
+
[2024-12-07 14:34:12,004][23827] Decorrelating experience for 64 frames...
|
121 |
+
[2024-12-07 14:34:12,010][23826] Decorrelating experience for 64 frames...
|
122 |
+
[2024-12-07 14:34:12,234][23827] Decorrelating experience for 96 frames...
|
123 |
+
[2024-12-07 14:34:12,248][23825] Decorrelating experience for 64 frames...
|
124 |
+
[2024-12-07 14:34:12,252][23823] Decorrelating experience for 64 frames...
|
125 |
+
[2024-12-07 14:34:12,473][23825] Decorrelating experience for 96 frames...
|
126 |
+
[2024-12-07 14:34:12,507][23824] Decorrelating experience for 64 frames...
|
127 |
+
[2024-12-07 14:34:12,676][23823] Decorrelating experience for 96 frames...
|
128 |
+
[2024-12-07 14:34:12,733][23824] Decorrelating experience for 96 frames...
|
129 |
+
[2024-12-07 14:34:13,342][23806] Signal inference workers to stop experience collection...
|
130 |
+
[2024-12-07 14:34:13,344][23819] InferenceWorker_p0-w0: stopping experience collection
|
131 |
+
[2024-12-07 14:34:14,621][23806] Signal inference workers to resume experience collection...
|
132 |
+
[2024-12-07 14:34:14,621][23819] InferenceWorker_p0-w0: resuming experience collection
|
133 |
+
[2024-12-07 14:34:15,572][19013] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 20480. Throughput: 0: nan. Samples: 2290. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
|
134 |
+
[2024-12-07 14:34:15,573][19013] Avg episode reward: [(0, '3.977')]
|
135 |
+
[2024-12-07 14:34:16,809][23819] Updated weights for policy 0, policy_version 10 (0.0055)
|
136 |
+
[2024-12-07 14:34:19,427][23819] Updated weights for policy 0, policy_version 20 (0.0005)
|
137 |
+
[2024-12-07 14:34:20,572][19013] Fps is (10 sec: 15564.9, 60 sec: 15564.9, 300 sec: 15564.9). Total num frames: 98304. Throughput: 0: 4348.4. Samples: 24032. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
138 |
+
[2024-12-07 14:34:20,573][19013] Avg episode reward: [(0, '4.353')]
|
139 |
+
[2024-12-07 14:34:22,008][23819] Updated weights for policy 0, policy_version 30 (0.0005)
|
140 |
+
[2024-12-07 14:34:24,540][23819] Updated weights for policy 0, policy_version 40 (0.0005)
|
141 |
+
[2024-12-07 14:34:25,572][19013] Fps is (10 sec: 15564.9, 60 sec: 15564.9, 300 sec: 15564.9). Total num frames: 176128. Throughput: 0: 3386.4. Samples: 36154. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
142 |
+
[2024-12-07 14:34:25,573][19013] Avg episode reward: [(0, '4.445')]
|
143 |
+
[2024-12-07 14:34:25,590][23806] Saving new best policy, reward=4.445!
|
144 |
+
[2024-12-07 14:34:27,136][23819] Updated weights for policy 0, policy_version 50 (0.0005)
|
145 |
+
[2024-12-07 14:34:28,436][19013] Heartbeat connected on Batcher_0
|
146 |
+
[2024-12-07 14:34:28,440][19013] Heartbeat connected on LearnerWorker_p0
|
147 |
+
[2024-12-07 14:34:28,446][19013] Heartbeat connected on InferenceWorker_p0-w0
|
148 |
+
[2024-12-07 14:34:28,458][19013] Heartbeat connected on RolloutWorker_w2
|
149 |
+
[2024-12-07 14:34:28,464][19013] Heartbeat connected on RolloutWorker_w4
|
150 |
+
[2024-12-07 14:34:28,467][19013] Heartbeat connected on RolloutWorker_w5
|
151 |
+
[2024-12-07 14:34:28,474][19013] Heartbeat connected on RolloutWorker_w6
|
152 |
+
[2024-12-07 14:34:29,728][23819] Updated weights for policy 0, policy_version 60 (0.0005)
|
153 |
+
[2024-12-07 14:34:30,572][19013] Fps is (10 sec: 15974.3, 60 sec: 15837.9, 300 sec: 15837.9). Total num frames: 258048. Throughput: 0: 3843.9. Samples: 59948. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
154 |
+
[2024-12-07 14:34:30,573][19013] Avg episode reward: [(0, '4.400')]
|
155 |
+
[2024-12-07 14:34:31,330][23822] Another process currently holds the lock /tmp/sf2_rahatchd/doom_003.lockfile, attempt: 1
|
156 |
+
[2024-12-07 14:34:31,588][23820] Another process currently holds the lock /tmp/sf2_rahatchd/doom_003.lockfile, attempt: 1
|
157 |
+
[2024-12-07 14:34:32,034][23826] Another process currently holds the lock /tmp/sf2_rahatchd/doom_003.lockfile, attempt: 1
|
158 |
+
[2024-12-07 14:34:32,279][23819] Updated weights for policy 0, policy_version 70 (0.0005)
|
159 |
+
[2024-12-07 14:34:34,871][23819] Updated weights for policy 0, policy_version 80 (0.0005)
|
160 |
+
[2024-12-07 14:34:35,572][19013] Fps is (10 sec: 15974.4, 60 sec: 15769.6, 300 sec: 15769.6). Total num frames: 335872. Throughput: 0: 4069.7. Samples: 83684. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
161 |
+
[2024-12-07 14:34:35,573][19013] Avg episode reward: [(0, '4.238')]
|
162 |
+
[2024-12-07 14:34:37,529][23819] Updated weights for policy 0, policy_version 90 (0.0005)
|
163 |
+
[2024-12-07 14:34:40,153][23819] Updated weights for policy 0, policy_version 100 (0.0005)
|
164 |
+
[2024-12-07 14:34:40,258][23826] Decorrelating experience for 96 frames...
|
165 |
+
[2024-12-07 14:34:40,299][19013] Heartbeat connected on RolloutWorker_w7
|
166 |
+
[2024-12-07 14:34:40,520][23822] Decorrelating experience for 0 frames...
|
167 |
+
[2024-12-07 14:34:40,572][19013] Fps is (10 sec: 15564.9, 60 sec: 15728.7, 300 sec: 15728.7). Total num frames: 413696. Throughput: 0: 3723.6. Samples: 95380. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
168 |
+
[2024-12-07 14:34:40,573][19013] Avg episode reward: [(0, '4.623')]
|
169 |
+
[2024-12-07 14:34:40,574][23806] Saving new best policy, reward=4.623!
|
170 |
+
[2024-12-07 14:34:40,767][23820] Decorrelating experience for 32 frames...
|
171 |
+
[2024-12-07 14:34:40,779][23822] Decorrelating experience for 32 frames...
|
172 |
+
[2024-12-07 14:34:41,047][23820] Decorrelating experience for 64 frames...
|
173 |
+
[2024-12-07 14:34:41,089][23822] Decorrelating experience for 64 frames...
|
174 |
+
[2024-12-07 14:34:41,289][23820] Decorrelating experience for 96 frames...
|
175 |
+
[2024-12-07 14:34:41,334][19013] Heartbeat connected on RolloutWorker_w0
|
176 |
+
[2024-12-07 14:34:41,366][23822] Decorrelating experience for 96 frames...
|
177 |
+
[2024-12-07 14:34:41,413][19013] Heartbeat connected on RolloutWorker_w3
|
178 |
+
[2024-12-07 14:34:42,172][23819] Updated weights for policy 0, policy_version 110 (0.0006)
|
179 |
+
[2024-12-07 14:34:43,880][23819] Updated weights for policy 0, policy_version 120 (0.0006)
|
180 |
+
[2024-12-07 14:34:45,572][19013] Fps is (10 sec: 19251.0, 60 sec: 16930.1, 300 sec: 16930.1). Total num frames: 528384. Throughput: 0: 4111.3. Samples: 125628. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
181 |
+
[2024-12-07 14:34:45,573][19013] Avg episode reward: [(0, '4.533')]
|
182 |
+
[2024-12-07 14:34:45,636][23819] Updated weights for policy 0, policy_version 130 (0.0006)
|
183 |
+
[2024-12-07 14:34:47,368][23819] Updated weights for policy 0, policy_version 140 (0.0006)
|
184 |
+
[2024-12-07 14:34:49,090][23819] Updated weights for policy 0, policy_version 150 (0.0006)
|
185 |
+
[2024-12-07 14:34:50,572][19013] Fps is (10 sec: 23347.2, 60 sec: 17905.4, 300 sec: 17905.4). Total num frames: 647168. Throughput: 0: 4548.9. Samples: 161502. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
186 |
+
[2024-12-07 14:34:50,572][19013] Avg episode reward: [(0, '4.887')]
|
187 |
+
[2024-12-07 14:34:50,599][23806] Saving new best policy, reward=4.887!
|
188 |
+
[2024-12-07 14:34:50,759][23819] Updated weights for policy 0, policy_version 160 (0.0006)
|
189 |
+
[2024-12-07 14:34:52,474][23819] Updated weights for policy 0, policy_version 170 (0.0006)
|
190 |
+
[2024-12-07 14:34:54,205][23819] Updated weights for policy 0, policy_version 180 (0.0006)
|
191 |
+
[2024-12-07 14:34:55,572][19013] Fps is (10 sec: 24166.6, 60 sec: 18739.2, 300 sec: 18739.2). Total num frames: 770048. Throughput: 0: 4428.4. Samples: 179426. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
192 |
+
[2024-12-07 14:34:55,573][19013] Avg episode reward: [(0, '5.630')]
|
193 |
+
[2024-12-07 14:34:55,577][23806] Saving new best policy, reward=5.630!
|
194 |
+
[2024-12-07 14:34:55,906][23819] Updated weights for policy 0, policy_version 190 (0.0006)
|
195 |
+
[2024-12-07 14:34:57,612][23819] Updated weights for policy 0, policy_version 200 (0.0005)
|
196 |
+
[2024-12-07 14:34:59,318][23819] Updated weights for policy 0, policy_version 210 (0.0005)
|
197 |
+
[2024-12-07 14:35:00,572][19013] Fps is (10 sec: 24166.4, 60 sec: 19296.7, 300 sec: 19296.7). Total num frames: 888832. Throughput: 0: 4731.9. Samples: 215224. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
198 |
+
[2024-12-07 14:35:00,573][19013] Avg episode reward: [(0, '6.418')]
|
199 |
+
[2024-12-07 14:35:00,574][23806] Saving new best policy, reward=6.418!
|
200 |
+
[2024-12-07 14:35:01,062][23819] Updated weights for policy 0, policy_version 220 (0.0005)
|
201 |
+
[2024-12-07 14:35:02,771][23819] Updated weights for policy 0, policy_version 230 (0.0005)
|
202 |
+
[2024-12-07 14:35:04,449][23819] Updated weights for policy 0, policy_version 240 (0.0006)
|
203 |
+
[2024-12-07 14:35:05,572][19013] Fps is (10 sec: 23756.6, 60 sec: 19742.7, 300 sec: 19742.7). Total num frames: 1007616. Throughput: 0: 5050.4. Samples: 251302. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
204 |
+
[2024-12-07 14:35:05,573][19013] Avg episode reward: [(0, '6.368')]
|
205 |
+
[2024-12-07 14:35:06,143][23819] Updated weights for policy 0, policy_version 250 (0.0005)
|
206 |
+
[2024-12-07 14:35:07,805][23819] Updated weights for policy 0, policy_version 260 (0.0005)
|
207 |
+
[2024-12-07 14:35:09,486][23819] Updated weights for policy 0, policy_version 270 (0.0006)
|
208 |
+
[2024-12-07 14:35:10,572][19013] Fps is (10 sec: 24166.5, 60 sec: 20182.1, 300 sec: 20182.1). Total num frames: 1130496. Throughput: 0: 5188.9. Samples: 269656. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
209 |
+
[2024-12-07 14:35:10,572][19013] Avg episode reward: [(0, '6.861')]
|
210 |
+
[2024-12-07 14:35:10,574][23806] Saving new best policy, reward=6.861!
|
211 |
+
[2024-12-07 14:35:11,214][23819] Updated weights for policy 0, policy_version 280 (0.0006)
|
212 |
+
[2024-12-07 14:35:12,978][23819] Updated weights for policy 0, policy_version 290 (0.0006)
|
213 |
+
[2024-12-07 14:35:14,732][23819] Updated weights for policy 0, policy_version 300 (0.0005)
|
214 |
+
[2024-12-07 14:35:15,572][19013] Fps is (10 sec: 23756.7, 60 sec: 20411.7, 300 sec: 20411.7). Total num frames: 1245184. Throughput: 0: 5446.4. Samples: 305038. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
215 |
+
[2024-12-07 14:35:15,573][19013] Avg episode reward: [(0, '10.907')]
|
216 |
+
[2024-12-07 14:35:15,601][23806] Saving new best policy, reward=10.907!
|
217 |
+
[2024-12-07 14:35:16,460][23819] Updated weights for policy 0, policy_version 310 (0.0006)
|
218 |
+
[2024-12-07 14:35:18,153][23819] Updated weights for policy 0, policy_version 320 (0.0005)
|
219 |
+
[2024-12-07 14:35:19,869][23819] Updated weights for policy 0, policy_version 330 (0.0006)
|
220 |
+
[2024-12-07 14:35:20,572][19013] Fps is (10 sec: 23756.7, 60 sec: 21162.7, 300 sec: 20732.1). Total num frames: 1368064. Throughput: 0: 5715.3. Samples: 340872. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
221 |
+
[2024-12-07 14:35:20,573][19013] Avg episode reward: [(0, '10.922')]
|
222 |
+
[2024-12-07 14:35:20,574][23806] Saving new best policy, reward=10.922!
|
223 |
+
[2024-12-07 14:35:21,588][23819] Updated weights for policy 0, policy_version 340 (0.0005)
|
224 |
+
[2024-12-07 14:35:23,313][23819] Updated weights for policy 0, policy_version 350 (0.0006)
|
225 |
+
[2024-12-07 14:35:24,968][23819] Updated weights for policy 0, policy_version 360 (0.0005)
|
226 |
+
[2024-12-07 14:35:25,572][19013] Fps is (10 sec: 24166.6, 60 sec: 21845.3, 300 sec: 20948.1). Total num frames: 1486848. Throughput: 0: 5854.0. Samples: 358812. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
227 |
+
[2024-12-07 14:35:25,573][19013] Avg episode reward: [(0, '14.171')]
|
228 |
+
[2024-12-07 14:35:25,576][23806] Saving new best policy, reward=14.171!
|
229 |
+
[2024-12-07 14:35:26,667][23819] Updated weights for policy 0, policy_version 370 (0.0006)
|
230 |
+
[2024-12-07 14:35:28,436][23819] Updated weights for policy 0, policy_version 380 (0.0006)
|
231 |
+
[2024-12-07 14:35:30,136][23819] Updated weights for policy 0, policy_version 390 (0.0006)
|
232 |
+
[2024-12-07 14:35:30,572][19013] Fps is (10 sec: 23756.7, 60 sec: 22459.7, 300 sec: 21135.3). Total num frames: 1605632. Throughput: 0: 5982.7. Samples: 394850. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
233 |
+
[2024-12-07 14:35:30,573][19013] Avg episode reward: [(0, '15.187')]
|
234 |
+
[2024-12-07 14:35:30,574][23806] Saving new best policy, reward=15.187!
|
235 |
+
[2024-12-07 14:35:31,846][23819] Updated weights for policy 0, policy_version 400 (0.0006)
|
236 |
+
[2024-12-07 14:35:33,531][23819] Updated weights for policy 0, policy_version 410 (0.0006)
|
237 |
+
[2024-12-07 14:35:35,243][23819] Updated weights for policy 0, policy_version 420 (0.0006)
|
238 |
+
[2024-12-07 14:35:35,572][19013] Fps is (10 sec: 23756.8, 60 sec: 23142.4, 300 sec: 21299.2). Total num frames: 1724416. Throughput: 0: 5984.0. Samples: 430784. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
239 |
+
[2024-12-07 14:35:35,573][19013] Avg episode reward: [(0, '17.358')]
|
240 |
+
[2024-12-07 14:35:35,580][23806] Saving new best policy, reward=17.358!
|
241 |
+
[2024-12-07 14:35:36,966][23819] Updated weights for policy 0, policy_version 430 (0.0006)
|
242 |
+
[2024-12-07 14:35:38,650][23819] Updated weights for policy 0, policy_version 440 (0.0006)
|
243 |
+
[2024-12-07 14:35:40,348][23819] Updated weights for policy 0, policy_version 450 (0.0006)
|
244 |
+
[2024-12-07 14:35:40,572][19013] Fps is (10 sec: 24166.6, 60 sec: 23893.3, 300 sec: 21492.0). Total num frames: 1847296. Throughput: 0: 5988.3. Samples: 448900. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
245 |
+
[2024-12-07 14:35:40,573][19013] Avg episode reward: [(0, '14.961')]
|
246 |
+
[2024-12-07 14:35:42,052][23819] Updated weights for policy 0, policy_version 460 (0.0006)
|
247 |
+
[2024-12-07 14:35:43,761][23819] Updated weights for policy 0, policy_version 470 (0.0006)
|
248 |
+
[2024-12-07 14:35:45,477][23819] Updated weights for policy 0, policy_version 480 (0.0005)
|
249 |
+
[2024-12-07 14:35:45,572][19013] Fps is (10 sec: 24166.5, 60 sec: 23961.6, 300 sec: 21617.8). Total num frames: 1966080. Throughput: 0: 5992.3. Samples: 484878. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
250 |
+
[2024-12-07 14:35:45,572][19013] Avg episode reward: [(0, '17.778')]
|
251 |
+
[2024-12-07 14:35:45,574][23806] Saving new best policy, reward=17.778!
|
252 |
+
[2024-12-07 14:35:47,212][23819] Updated weights for policy 0, policy_version 490 (0.0006)
|
253 |
+
[2024-12-07 14:35:48,912][23819] Updated weights for policy 0, policy_version 500 (0.0006)
|
254 |
+
[2024-12-07 14:35:50,572][19013] Fps is (10 sec: 23756.7, 60 sec: 23961.6, 300 sec: 21730.4). Total num frames: 2084864. Throughput: 0: 5987.8. Samples: 520754. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
255 |
+
[2024-12-07 14:35:50,573][19013] Avg episode reward: [(0, '17.858')]
|
256 |
+
[2024-12-07 14:35:50,593][23806] Saving new best policy, reward=17.858!
|
257 |
+
[2024-12-07 14:35:50,593][23819] Updated weights for policy 0, policy_version 510 (0.0005)
|
258 |
+
[2024-12-07 14:35:52,338][23819] Updated weights for policy 0, policy_version 520 (0.0006)
|
259 |
+
[2024-12-07 14:35:54,023][23819] Updated weights for policy 0, policy_version 530 (0.0005)
|
260 |
+
[2024-12-07 14:35:55,572][19013] Fps is (10 sec: 24166.2, 60 sec: 23961.6, 300 sec: 21872.6). Total num frames: 2207744. Throughput: 0: 5980.4. Samples: 538776. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
261 |
+
[2024-12-07 14:35:55,573][19013] Avg episode reward: [(0, '18.651')]
|
262 |
+
[2024-12-07 14:35:55,577][23806] Saving new best policy, reward=18.651!
|
263 |
+
[2024-12-07 14:35:55,691][23819] Updated weights for policy 0, policy_version 540 (0.0005)
|
264 |
+
[2024-12-07 14:35:57,355][23819] Updated weights for policy 0, policy_version 550 (0.0005)
|
265 |
+
[2024-12-07 14:35:59,071][23819] Updated weights for policy 0, policy_version 560 (0.0006)
|
266 |
+
[2024-12-07 14:36:00,572][19013] Fps is (10 sec: 24166.4, 60 sec: 23961.6, 300 sec: 21962.4). Total num frames: 2326528. Throughput: 0: 6000.2. Samples: 575048. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
267 |
+
[2024-12-07 14:36:00,573][19013] Avg episode reward: [(0, '20.626')]
|
268 |
+
[2024-12-07 14:36:00,574][23806] Saving new best policy, reward=20.626!
|
269 |
+
[2024-12-07 14:36:00,803][23819] Updated weights for policy 0, policy_version 570 (0.0005)
|
270 |
+
[2024-12-07 14:36:02,587][23819] Updated weights for policy 0, policy_version 580 (0.0006)
|
271 |
+
[2024-12-07 14:36:04,280][23819] Updated weights for policy 0, policy_version 590 (0.0006)
|
272 |
+
[2024-12-07 14:36:05,572][19013] Fps is (10 sec: 23756.9, 60 sec: 23961.6, 300 sec: 22043.9). Total num frames: 2445312. Throughput: 0: 5993.6. Samples: 610586. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
273 |
+
[2024-12-07 14:36:05,573][19013] Avg episode reward: [(0, '19.659')]
|
274 |
+
[2024-12-07 14:36:05,578][23806] Saving /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000597_2445312.pth...
|
275 |
+
[2024-12-07 14:36:06,010][23819] Updated weights for policy 0, policy_version 600 (0.0006)
|
276 |
+
[2024-12-07 14:36:07,722][23819] Updated weights for policy 0, policy_version 610 (0.0006)
|
277 |
+
[2024-12-07 14:36:09,485][23819] Updated weights for policy 0, policy_version 620 (0.0006)
|
278 |
+
[2024-12-07 14:36:10,572][19013] Fps is (10 sec: 23756.8, 60 sec: 23893.3, 300 sec: 22118.4). Total num frames: 2564096. Throughput: 0: 5989.9. Samples: 628358. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
279 |
+
[2024-12-07 14:36:10,573][19013] Avg episode reward: [(0, '20.821')]
|
280 |
+
[2024-12-07 14:36:10,573][23806] Saving new best policy, reward=20.821!
|
281 |
+
[2024-12-07 14:36:11,223][23819] Updated weights for policy 0, policy_version 630 (0.0006)
|
282 |
+
[2024-12-07 14:36:12,900][23819] Updated weights for policy 0, policy_version 640 (0.0005)
|
283 |
+
[2024-12-07 14:36:14,589][23819] Updated weights for policy 0, policy_version 650 (0.0005)
|
284 |
+
[2024-12-07 14:36:15,572][19013] Fps is (10 sec: 23756.7, 60 sec: 23961.6, 300 sec: 22186.7). Total num frames: 2682880. Throughput: 0: 5983.3. Samples: 664100. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
285 |
+
[2024-12-07 14:36:15,573][19013] Avg episode reward: [(0, '25.818')]
|
286 |
+
[2024-12-07 14:36:15,599][23806] Saving new best policy, reward=25.818!
|
287 |
+
[2024-12-07 14:36:16,313][23819] Updated weights for policy 0, policy_version 660 (0.0006)
|
288 |
+
[2024-12-07 14:36:18,010][23819] Updated weights for policy 0, policy_version 670 (0.0006)
|
289 |
+
[2024-12-07 14:36:19,721][23819] Updated weights for policy 0, policy_version 680 (0.0006)
|
290 |
+
[2024-12-07 14:36:20,572][19013] Fps is (10 sec: 23756.9, 60 sec: 23893.4, 300 sec: 22249.5). Total num frames: 2801664. Throughput: 0: 5985.2. Samples: 700118. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
291 |
+
[2024-12-07 14:36:20,573][19013] Avg episode reward: [(0, '23.182')]
|
292 |
+
[2024-12-07 14:36:21,452][23819] Updated weights for policy 0, policy_version 690 (0.0006)
|
293 |
+
[2024-12-07 14:36:23,178][23819] Updated weights for policy 0, policy_version 700 (0.0006)
|
294 |
+
[2024-12-07 14:36:24,922][23819] Updated weights for policy 0, policy_version 710 (0.0006)
|
295 |
+
[2024-12-07 14:36:25,572][19013] Fps is (10 sec: 24166.6, 60 sec: 23961.6, 300 sec: 22339.0). Total num frames: 2924544. Throughput: 0: 5977.3. Samples: 717878. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
296 |
+
[2024-12-07 14:36:25,572][19013] Avg episode reward: [(0, '25.883')]
|
297 |
+
[2024-12-07 14:36:25,577][23806] Saving new best policy, reward=25.883!
|
298 |
+
[2024-12-07 14:36:26,623][23819] Updated weights for policy 0, policy_version 720 (0.0006)
|
299 |
+
[2024-12-07 14:36:28,373][23819] Updated weights for policy 0, policy_version 730 (0.0006)
|
300 |
+
[2024-12-07 14:36:30,077][23819] Updated weights for policy 0, policy_version 740 (0.0006)
|
301 |
+
[2024-12-07 14:36:30,572][19013] Fps is (10 sec: 24166.4, 60 sec: 23961.6, 300 sec: 22391.5). Total num frames: 3043328. Throughput: 0: 5966.4. Samples: 753368. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
302 |
+
[2024-12-07 14:36:30,573][19013] Avg episode reward: [(0, '24.200')]
|
303 |
+
[2024-12-07 14:36:31,784][23819] Updated weights for policy 0, policy_version 750 (0.0006)
|
304 |
+
[2024-12-07 14:36:33,524][23819] Updated weights for policy 0, policy_version 760 (0.0006)
|
305 |
+
[2024-12-07 14:36:35,223][23819] Updated weights for policy 0, policy_version 770 (0.0006)
|
306 |
+
[2024-12-07 14:36:35,572][19013] Fps is (10 sec: 23346.9, 60 sec: 23893.3, 300 sec: 22411.0). Total num frames: 3158016. Throughput: 0: 5966.9. Samples: 789264. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
307 |
+
[2024-12-07 14:36:35,573][19013] Avg episode reward: [(0, '27.345')]
|
308 |
+
[2024-12-07 14:36:35,584][23806] Saving new best policy, reward=27.345!
|
309 |
+
[2024-12-07 14:36:36,970][23819] Updated weights for policy 0, policy_version 780 (0.0006)
|
310 |
+
[2024-12-07 14:36:38,637][23819] Updated weights for policy 0, policy_version 790 (0.0005)
|
311 |
+
[2024-12-07 14:36:40,311][23819] Updated weights for policy 0, policy_version 800 (0.0005)
|
312 |
+
[2024-12-07 14:36:40,572][19013] Fps is (10 sec: 23756.6, 60 sec: 23893.3, 300 sec: 22485.6). Total num frames: 3280896. Throughput: 0: 5969.2. Samples: 807390. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
313 |
+
[2024-12-07 14:36:40,573][19013] Avg episode reward: [(0, '24.428')]
|
314 |
+
[2024-12-07 14:36:41,986][23819] Updated weights for policy 0, policy_version 810 (0.0005)
|
315 |
+
[2024-12-07 14:36:43,711][23819] Updated weights for policy 0, policy_version 820 (0.0006)
|
316 |
+
[2024-12-07 14:36:45,438][23819] Updated weights for policy 0, policy_version 830 (0.0006)
|
317 |
+
[2024-12-07 14:36:45,572][19013] Fps is (10 sec: 24166.6, 60 sec: 23893.3, 300 sec: 22528.0). Total num frames: 3399680. Throughput: 0: 5968.0. Samples: 843610. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
318 |
+
[2024-12-07 14:36:45,573][19013] Avg episode reward: [(0, '26.352')]
|
319 |
+
[2024-12-07 14:36:47,139][23819] Updated weights for policy 0, policy_version 840 (0.0006)
|
320 |
+
[2024-12-07 14:36:48,865][23819] Updated weights for policy 0, policy_version 850 (0.0006)
|
321 |
+
[2024-12-07 14:36:50,541][23819] Updated weights for policy 0, policy_version 860 (0.0006)
|
322 |
+
[2024-12-07 14:36:50,572][19013] Fps is (10 sec: 24166.3, 60 sec: 23961.6, 300 sec: 22594.1). Total num frames: 3522560. Throughput: 0: 5975.9. Samples: 879504. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
323 |
+
[2024-12-07 14:36:50,573][19013] Avg episode reward: [(0, '25.960')]
|
324 |
+
[2024-12-07 14:36:52,301][23819] Updated weights for policy 0, policy_version 870 (0.0005)
|
325 |
+
[2024-12-07 14:36:54,003][23819] Updated weights for policy 0, policy_version 880 (0.0006)
|
326 |
+
[2024-12-07 14:36:55,572][19013] Fps is (10 sec: 24166.6, 60 sec: 23893.4, 300 sec: 22630.4). Total num frames: 3641344. Throughput: 0: 5978.9. Samples: 897410. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
327 |
+
[2024-12-07 14:36:55,572][19013] Avg episode reward: [(0, '26.897')]
|
328 |
+
[2024-12-07 14:36:55,723][23819] Updated weights for policy 0, policy_version 890 (0.0006)
|
329 |
+
[2024-12-07 14:36:57,435][23819] Updated weights for policy 0, policy_version 900 (0.0006)
|
330 |
+
[2024-12-07 14:36:59,172][23819] Updated weights for policy 0, policy_version 910 (0.0005)
|
331 |
+
[2024-12-07 14:37:00,572][19013] Fps is (10 sec: 23756.8, 60 sec: 23893.3, 300 sec: 22664.5). Total num frames: 3760128. Throughput: 0: 5978.4. Samples: 933130. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
332 |
+
[2024-12-07 14:37:00,573][19013] Avg episode reward: [(0, '25.898')]
|
333 |
+
[2024-12-07 14:37:00,844][23819] Updated weights for policy 0, policy_version 920 (0.0005)
|
334 |
+
[2024-12-07 14:37:02,559][23819] Updated weights for policy 0, policy_version 930 (0.0006)
|
335 |
+
[2024-12-07 14:37:04,287][23819] Updated weights for policy 0, policy_version 940 (0.0006)
|
336 |
+
[2024-12-07 14:37:05,572][19013] Fps is (10 sec: 23756.4, 60 sec: 23893.3, 300 sec: 22696.6). Total num frames: 3878912. Throughput: 0: 5973.1. Samples: 968908. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
337 |
+
[2024-12-07 14:37:05,573][19013] Avg episode reward: [(0, '26.120')]
|
338 |
+
[2024-12-07 14:37:06,038][23819] Updated weights for policy 0, policy_version 950 (0.0006)
|
339 |
+
[2024-12-07 14:37:07,756][23819] Updated weights for policy 0, policy_version 960 (0.0006)
|
340 |
+
[2024-12-07 14:37:09,463][23819] Updated weights for policy 0, policy_version 970 (0.0005)
|
341 |
+
[2024-12-07 14:37:10,572][19013] Fps is (10 sec: 23757.1, 60 sec: 23893.4, 300 sec: 22727.0). Total num frames: 3997696. Throughput: 0: 5973.8. Samples: 986698. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
342 |
+
[2024-12-07 14:37:10,573][19013] Avg episode reward: [(0, '25.009')]
|
343 |
+
[2024-12-07 14:37:10,814][23806] Stopping Batcher_0...
|
344 |
+
[2024-12-07 14:37:10,814][23806] Saving /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
345 |
+
[2024-12-07 14:37:10,814][19013] Component Batcher_0 stopped!
|
346 |
+
[2024-12-07 14:37:10,816][19013] Component RolloutWorker_w1 process died already! Don't wait for it.
|
347 |
+
[2024-12-07 14:37:10,815][23806] Loop batcher_evt_loop terminating...
|
348 |
+
[2024-12-07 14:37:10,832][23819] Weights refcount: 2 0
|
349 |
+
[2024-12-07 14:37:10,833][23819] Stopping InferenceWorker_p0-w0...
|
350 |
+
[2024-12-07 14:37:10,833][23819] Loop inference_proc0-0_evt_loop terminating...
|
351 |
+
[2024-12-07 14:37:10,833][19013] Component InferenceWorker_p0-w0 stopped!
|
352 |
+
[2024-12-07 14:37:10,849][23824] Stopping RolloutWorker_w5...
|
353 |
+
[2024-12-07 14:37:10,850][23824] Loop rollout_proc5_evt_loop terminating...
|
354 |
+
[2024-12-07 14:37:10,849][19013] Component RolloutWorker_w5 stopped!
|
355 |
+
[2024-12-07 14:37:10,851][23823] Stopping RolloutWorker_w4...
|
356 |
+
[2024-12-07 14:37:10,851][19013] Component RolloutWorker_w4 stopped!
|
357 |
+
[2024-12-07 14:37:10,851][23820] Stopping RolloutWorker_w0...
|
358 |
+
[2024-12-07 14:37:10,851][23823] Loop rollout_proc4_evt_loop terminating...
|
359 |
+
[2024-12-07 14:37:10,851][23820] Loop rollout_proc0_evt_loop terminating...
|
360 |
+
[2024-12-07 14:37:10,851][19013] Component RolloutWorker_w0 stopped!
|
361 |
+
[2024-12-07 14:37:10,851][23826] Stopping RolloutWorker_w7...
|
362 |
+
[2024-12-07 14:37:10,852][23826] Loop rollout_proc7_evt_loop terminating...
|
363 |
+
[2024-12-07 14:37:10,852][19013] Component RolloutWorker_w7 stopped!
|
364 |
+
[2024-12-07 14:37:10,852][23827] Stopping RolloutWorker_w6...
|
365 |
+
[2024-12-07 14:37:10,852][23822] Stopping RolloutWorker_w3...
|
366 |
+
[2024-12-07 14:37:10,853][23822] Loop rollout_proc3_evt_loop terminating...
|
367 |
+
[2024-12-07 14:37:10,853][23827] Loop rollout_proc6_evt_loop terminating...
|
368 |
+
[2024-12-07 14:37:10,852][19013] Component RolloutWorker_w6 stopped!
|
369 |
+
[2024-12-07 14:37:10,853][19013] Component RolloutWorker_w3 stopped!
|
370 |
+
[2024-12-07 14:37:10,854][23825] Stopping RolloutWorker_w2...
|
371 |
+
[2024-12-07 14:37:10,854][23825] Loop rollout_proc2_evt_loop terminating...
|
372 |
+
[2024-12-07 14:37:10,854][19013] Component RolloutWorker_w2 stopped!
|
373 |
+
[2024-12-07 14:37:10,869][23806] Saving /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
374 |
+
[2024-12-07 14:37:10,939][23806] Stopping LearnerWorker_p0...
|
375 |
+
[2024-12-07 14:37:10,939][23806] Loop learner_proc0_evt_loop terminating...
|
376 |
+
[2024-12-07 14:37:10,939][19013] Component LearnerWorker_p0 stopped!
|
377 |
+
[2024-12-07 14:37:10,940][19013] Waiting for process learner_proc0 to stop...
|
378 |
+
[2024-12-07 14:37:11,717][19013] Waiting for process inference_proc0-0 to join...
|
379 |
+
[2024-12-07 14:37:11,718][19013] Waiting for process rollout_proc0 to join...
|
380 |
+
[2024-12-07 14:37:11,718][19013] Waiting for process rollout_proc1 to join...
|
381 |
+
[2024-12-07 14:37:11,719][19013] Waiting for process rollout_proc2 to join...
|
382 |
+
[2024-12-07 14:37:11,720][19013] Waiting for process rollout_proc3 to join...
|
383 |
+
[2024-12-07 14:37:11,721][19013] Waiting for process rollout_proc4 to join...
|
384 |
+
[2024-12-07 14:37:11,722][19013] Waiting for process rollout_proc5 to join...
|
385 |
+
[2024-12-07 14:37:11,723][19013] Waiting for process rollout_proc6 to join...
|
386 |
+
[2024-12-07 14:37:11,724][19013] Waiting for process rollout_proc7 to join...
|
387 |
+
[2024-12-07 14:37:11,725][19013] Batcher 0 profile tree view:
|
388 |
+
batching: 10.5578, releasing_batches: 0.0212
|
389 |
+
[2024-12-07 14:37:11,726][19013] InferenceWorker_p0-w0 profile tree view:
|
390 |
+
wait_policy: 0.0000
|
391 |
+
wait_policy_total: 4.8273
|
392 |
+
update_model: 2.0921
|
393 |
+
weight_update: 0.0005
|
394 |
+
one_step: 0.0017
|
395 |
+
handle_policy_step: 163.6695
|
396 |
+
deserialize: 5.4997, stack: 0.7929, obs_to_device_normalize: 41.8845, forward: 72.2956, send_messages: 7.4725
|
397 |
+
prepare_outputs: 29.4647
|
398 |
+
to_cpu: 21.6545
|
399 |
+
[2024-12-07 14:37:11,726][19013] Learner 0 profile tree view:
|
400 |
+
misc: 0.0039, prepare_batch: 7.2401
|
401 |
+
train: 26.4145
|
402 |
+
epoch_init: 0.0037, minibatch_init: 0.0042, losses_postprocess: 0.3988, kl_divergence: 0.3982, after_optimizer: 9.2403
|
403 |
+
calculate_losses: 9.3937
|
404 |
+
losses_init: 0.0027, forward_head: 0.5595, bptt_initial: 6.1467, tail: 0.4752, advantages_returns: 0.1337, losses: 0.9484
|
405 |
+
bptt: 0.9985
|
406 |
+
bptt_forward_core: 0.9580
|
407 |
+
update: 6.6839
|
408 |
+
clip: 0.6204
|
409 |
+
[2024-12-07 14:37:11,727][19013] RolloutWorker_w0 profile tree view:
|
410 |
+
wait_for_trajectories: 0.1300, enqueue_policy_requests: 8.3049, env_step: 101.9303, overhead: 4.5937, complete_rollouts: 0.1726
|
411 |
+
save_policy_outputs: 8.8612
|
412 |
+
split_output_tensors: 2.9795
|
413 |
+
[2024-12-07 14:37:11,728][19013] RolloutWorker_w7 profile tree view:
|
414 |
+
wait_for_trajectories: 0.1243, enqueue_policy_requests: 8.2736, env_step: 100.7709, overhead: 4.5028, complete_rollouts: 0.1745
|
415 |
+
save_policy_outputs: 8.7132
|
416 |
+
split_output_tensors: 2.9265
|
417 |
+
[2024-12-07 14:37:11,729][19013] Loop Runner_EvtLoop terminating...
|
418 |
+
[2024-12-07 14:37:11,730][19013] Runner profile tree view:
|
419 |
+
main_loop: 183.2567
|
420 |
+
[2024-12-07 14:37:11,732][19013] Collected {0: 4005888}, FPS: 21859.4
|
421 |
+
[2024-12-07 14:39:41,500][19013] Loading existing experiment configuration from /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/config.json
|
422 |
+
[2024-12-07 14:39:41,500][19013] Overriding arg 'num_workers' with value 1 passed from command line
|
423 |
+
[2024-12-07 14:39:41,501][19013] Adding new argument 'no_render'=True that is not in the saved config file!
|
424 |
+
[2024-12-07 14:39:41,501][19013] Adding new argument 'save_video'=True that is not in the saved config file!
|
425 |
+
[2024-12-07 14:39:41,502][19013] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
426 |
+
[2024-12-07 14:39:41,502][19013] Adding new argument 'video_name'=None that is not in the saved config file!
|
427 |
+
[2024-12-07 14:39:41,503][19013] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
428 |
+
[2024-12-07 14:39:41,504][19013] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
429 |
+
[2024-12-07 14:39:41,504][19013] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
430 |
+
[2024-12-07 14:39:41,504][19013] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
431 |
+
[2024-12-07 14:39:41,505][19013] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
432 |
+
[2024-12-07 14:39:41,505][19013] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
433 |
+
[2024-12-07 14:39:41,505][19013] Adding new argument 'train_script'=None that is not in the saved config file!
|
434 |
+
[2024-12-07 14:39:41,506][19013] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
435 |
+
[2024-12-07 14:39:41,506][19013] Using frameskip 1 and render_action_repeat=4 for evaluation
|
436 |
+
[2024-12-07 14:39:41,526][19013] Doom resolution: 160x120, resize resolution: (128, 72)
|
437 |
+
[2024-12-07 14:39:41,528][19013] RunningMeanStd input shape: (3, 72, 128)
|
438 |
+
[2024-12-07 14:39:41,529][19013] RunningMeanStd input shape: (1,)
|
439 |
+
[2024-12-07 14:39:41,535][19013] ConvEncoder: input_channels=3
|
440 |
+
[2024-12-07 14:39:41,599][19013] Conv encoder output size: 512
|
441 |
+
[2024-12-07 14:39:41,599][19013] Policy head output size: 512
|
442 |
+
[2024-12-07 14:39:41,735][19013] Loading state from checkpoint /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
443 |
+
[2024-12-07 14:39:42,277][19013] Num frames 100...
|
444 |
+
[2024-12-07 14:39:42,353][19013] Num frames 200...
|
445 |
+
[2024-12-07 14:39:42,433][19013] Num frames 300...
|
446 |
+
[2024-12-07 14:39:42,515][19013] Num frames 400...
|
447 |
+
[2024-12-07 14:39:42,589][19013] Num frames 500...
|
448 |
+
[2024-12-07 14:39:42,665][19013] Num frames 600...
|
449 |
+
[2024-12-07 14:39:42,741][19013] Num frames 700...
|
450 |
+
[2024-12-07 14:39:42,823][19013] Num frames 800...
|
451 |
+
[2024-12-07 14:39:42,906][19013] Num frames 900...
|
452 |
+
[2024-12-07 14:39:43,035][19013] Avg episode rewards: #0: 19.920, true rewards: #0: 9.920
|
453 |
+
[2024-12-07 14:39:43,036][19013] Avg episode reward: 19.920, avg true_objective: 9.920
|
454 |
+
[2024-12-07 14:39:43,045][19013] Num frames 1000...
|
455 |
+
[2024-12-07 14:39:43,164][19013] Num frames 1100...
|
456 |
+
[2024-12-07 14:39:43,256][19013] Num frames 1200...
|
457 |
+
[2024-12-07 14:39:43,336][19013] Num frames 1300...
|
458 |
+
[2024-12-07 14:39:43,419][19013] Num frames 1400...
|
459 |
+
[2024-12-07 14:39:43,499][19013] Num frames 1500...
|
460 |
+
[2024-12-07 14:39:43,586][19013] Num frames 1600...
|
461 |
+
[2024-12-07 14:39:43,674][19013] Num frames 1700...
|
462 |
+
[2024-12-07 14:39:43,760][19013] Num frames 1800...
|
463 |
+
[2024-12-07 14:39:43,842][19013] Num frames 1900...
|
464 |
+
[2024-12-07 14:39:43,924][19013] Num frames 2000...
|
465 |
+
[2024-12-07 14:39:44,007][19013] Num frames 2100...
|
466 |
+
[2024-12-07 14:39:44,104][19013] Avg episode rewards: #0: 22.220, true rewards: #0: 10.720
|
467 |
+
[2024-12-07 14:39:44,105][19013] Avg episode reward: 22.220, avg true_objective: 10.720
|
468 |
+
[2024-12-07 14:39:44,179][19013] Num frames 2200...
|
469 |
+
[2024-12-07 14:39:44,285][19013] Num frames 2300...
|
470 |
+
[2024-12-07 14:39:44,370][19013] Num frames 2400...
|
471 |
+
[2024-12-07 14:39:44,450][19013] Num frames 2500...
|
472 |
+
[2024-12-07 14:39:44,532][19013] Num frames 2600...
|
473 |
+
[2024-12-07 14:39:44,613][19013] Num frames 2700...
|
474 |
+
[2024-12-07 14:39:44,694][19013] Num frames 2800...
|
475 |
+
[2024-12-07 14:39:44,778][19013] Num frames 2900...
|
476 |
+
[2024-12-07 14:39:44,859][19013] Num frames 3000...
|
477 |
+
[2024-12-07 14:39:44,941][19013] Num frames 3100...
|
478 |
+
[2024-12-07 14:39:45,018][19013] Num frames 3200...
|
479 |
+
[2024-12-07 14:39:45,085][19013] Num frames 3300...
|
480 |
+
[2024-12-07 14:39:45,154][19013] Num frames 3400...
|
481 |
+
[2024-12-07 14:39:45,222][19013] Num frames 3500...
|
482 |
+
[2024-12-07 14:39:45,292][19013] Num frames 3600...
|
483 |
+
[2024-12-07 14:39:45,360][19013] Num frames 3700...
|
484 |
+
[2024-12-07 14:39:45,430][19013] Num frames 3800...
|
485 |
+
[2024-12-07 14:39:45,500][19013] Num frames 3900...
|
486 |
+
[2024-12-07 14:39:45,569][19013] Num frames 4000...
|
487 |
+
[2024-12-07 14:39:45,640][19013] Num frames 4100...
|
488 |
+
[2024-12-07 14:39:45,710][19013] Num frames 4200...
|
489 |
+
[2024-12-07 14:39:45,794][19013] Avg episode rewards: #0: 35.480, true rewards: #0: 14.147
|
490 |
+
[2024-12-07 14:39:45,794][19013] Avg episode reward: 35.480, avg true_objective: 14.147
|
491 |
+
[2024-12-07 14:39:45,861][19013] Num frames 4300...
|
492 |
+
[2024-12-07 14:39:45,944][19013] Num frames 4400...
|
493 |
+
[2024-12-07 14:39:46,013][19013] Num frames 4500...
|
494 |
+
[2024-12-07 14:39:46,083][19013] Num frames 4600...
|
495 |
+
[2024-12-07 14:39:46,152][19013] Num frames 4700...
|
496 |
+
[2024-12-07 14:39:46,222][19013] Num frames 4800...
|
497 |
+
[2024-12-07 14:39:46,291][19013] Num frames 4900...
|
498 |
+
[2024-12-07 14:39:46,361][19013] Num frames 5000...
|
499 |
+
[2024-12-07 14:39:46,430][19013] Num frames 5100...
|
500 |
+
[2024-12-07 14:39:46,519][19013] Avg episode rewards: #0: 32.100, true rewards: #0: 12.850
|
501 |
+
[2024-12-07 14:39:46,520][19013] Avg episode reward: 32.100, avg true_objective: 12.850
|
502 |
+
[2024-12-07 14:39:46,584][19013] Num frames 5200...
|
503 |
+
[2024-12-07 14:39:46,691][19013] Num frames 5300...
|
504 |
+
[2024-12-07 14:39:46,770][19013] Num frames 5400...
|
505 |
+
[2024-12-07 14:39:46,852][19013] Num frames 5500...
|
506 |
+
[2024-12-07 14:39:46,979][19013] Avg episode rewards: #0: 27.376, true rewards: #0: 11.176
|
507 |
+
[2024-12-07 14:39:46,980][19013] Avg episode reward: 27.376, avg true_objective: 11.176
|
508 |
+
[2024-12-07 14:39:46,996][19013] Num frames 5600...
|
509 |
+
[2024-12-07 14:39:47,115][19013] Num frames 5700...
|
510 |
+
[2024-12-07 14:39:47,210][19013] Num frames 5800...
|
511 |
+
[2024-12-07 14:39:47,292][19013] Num frames 5900...
|
512 |
+
[2024-12-07 14:39:47,373][19013] Num frames 6000...
|
513 |
+
[2024-12-07 14:39:47,455][19013] Num frames 6100...
|
514 |
+
[2024-12-07 14:39:47,535][19013] Num frames 6200...
|
515 |
+
[2024-12-07 14:39:47,615][19013] Num frames 6300...
|
516 |
+
[2024-12-07 14:39:47,697][19013] Num frames 6400...
|
517 |
+
[2024-12-07 14:39:47,793][19013] Avg episode rewards: #0: 25.587, true rewards: #0: 10.753
|
518 |
+
[2024-12-07 14:39:47,794][19013] Avg episode reward: 25.587, avg true_objective: 10.753
|
519 |
+
[2024-12-07 14:39:47,853][19013] Num frames 6500...
|
520 |
+
[2024-12-07 14:39:47,966][19013] Num frames 6600...
|
521 |
+
[2024-12-07 14:39:48,057][19013] Num frames 6700...
|
522 |
+
[2024-12-07 14:39:48,137][19013] Num frames 6800...
|
523 |
+
[2024-12-07 14:39:48,216][19013] Num frames 6900...
|
524 |
+
[2024-12-07 14:39:48,292][19013] Num frames 7000...
|
525 |
+
[2024-12-07 14:39:48,385][19013] Avg episode rewards: #0: 23.228, true rewards: #0: 10.086
|
526 |
+
[2024-12-07 14:39:48,386][19013] Avg episode reward: 23.228, avg true_objective: 10.086
|
527 |
+
[2024-12-07 14:39:48,438][19013] Num frames 7100...
|
528 |
+
[2024-12-07 14:39:48,539][19013] Num frames 7200...
|
529 |
+
[2024-12-07 14:39:48,609][19013] Num frames 7300...
|
530 |
+
[2024-12-07 14:39:48,679][19013] Num frames 7400...
|
531 |
+
[2024-12-07 14:39:48,749][19013] Num frames 7500...
|
532 |
+
[2024-12-07 14:39:48,820][19013] Num frames 7600...
|
533 |
+
[2024-12-07 14:39:48,897][19013] Avg episode rewards: #0: 21.545, true rewards: #0: 9.545
|
534 |
+
[2024-12-07 14:39:48,898][19013] Avg episode reward: 21.545, avg true_objective: 9.545
|
535 |
+
[2024-12-07 14:39:48,972][19013] Num frames 7700...
|
536 |
+
[2024-12-07 14:39:49,056][19013] Num frames 7800...
|
537 |
+
[2024-12-07 14:39:49,125][19013] Num frames 7900...
|
538 |
+
[2024-12-07 14:39:49,194][19013] Num frames 8000...
|
539 |
+
[2024-12-07 14:39:49,284][19013] Avg episode rewards: #0: 19.949, true rewards: #0: 8.949
|
540 |
+
[2024-12-07 14:39:49,284][19013] Avg episode reward: 19.949, avg true_objective: 8.949
|
541 |
+
[2024-12-07 14:39:49,342][19013] Num frames 8100...
|
542 |
+
[2024-12-07 14:39:49,431][19013] Num frames 8200...
|
543 |
+
[2024-12-07 14:39:49,502][19013] Num frames 8300...
|
544 |
+
[2024-12-07 14:39:49,572][19013] Num frames 8400...
|
545 |
+
[2024-12-07 14:39:49,641][19013] Num frames 8500...
|
546 |
+
[2024-12-07 14:39:49,711][19013] Num frames 8600...
|
547 |
+
[2024-12-07 14:39:49,782][19013] Num frames 8700...
|
548 |
+
[2024-12-07 14:39:49,864][19013] Avg episode rewards: #0: 19.336, true rewards: #0: 8.736
|
549 |
+
[2024-12-07 14:39:49,865][19013] Avg episode reward: 19.336, avg true_objective: 8.736
|
550 |
+
[2024-12-07 14:40:01,237][19013] Replay video saved to /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/replay.mp4!
|
551 |
+
[2024-12-07 14:41:09,726][19013] Loading existing experiment configuration from /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/config.json
|
552 |
+
[2024-12-07 14:41:09,727][19013] Overriding arg 'num_workers' with value 1 passed from command line
|
553 |
+
[2024-12-07 14:41:09,727][19013] Adding new argument 'no_render'=True that is not in the saved config file!
|
554 |
+
[2024-12-07 14:41:09,728][19013] Adding new argument 'save_video'=True that is not in the saved config file!
|
555 |
+
[2024-12-07 14:41:09,728][19013] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
556 |
+
[2024-12-07 14:41:09,729][19013] Adding new argument 'video_name'=None that is not in the saved config file!
|
557 |
+
[2024-12-07 14:41:09,730][19013] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
558 |
+
[2024-12-07 14:41:09,730][19013] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
559 |
+
[2024-12-07 14:41:09,731][19013] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
560 |
+
[2024-12-07 14:41:09,731][19013] Adding new argument 'hf_repository'='rahatchd/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
561 |
+
[2024-12-07 14:41:09,731][19013] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
562 |
+
[2024-12-07 14:41:09,732][19013] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
563 |
+
[2024-12-07 14:41:09,732][19013] Adding new argument 'train_script'=None that is not in the saved config file!
|
564 |
+
[2024-12-07 14:41:09,732][19013] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
565 |
+
[2024-12-07 14:41:09,733][19013] Using frameskip 1 and render_action_repeat=4 for evaluation
|
566 |
+
[2024-12-07 14:41:09,758][19013] RunningMeanStd input shape: (3, 72, 128)
|
567 |
+
[2024-12-07 14:41:09,759][19013] RunningMeanStd input shape: (1,)
|
568 |
+
[2024-12-07 14:41:09,769][19013] ConvEncoder: input_channels=3
|
569 |
+
[2024-12-07 14:41:09,803][19013] Conv encoder output size: 512
|
570 |
+
[2024-12-07 14:41:09,804][19013] Policy head output size: 512
|
571 |
+
[2024-12-07 14:41:09,836][19013] Loading state from checkpoint /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
572 |
+
[2024-12-07 14:41:10,172][19013] Num frames 100...
|
573 |
+
[2024-12-07 14:41:10,256][19013] Num frames 200...
|
574 |
+
[2024-12-07 14:41:10,340][19013] Num frames 300...
|
575 |
+
[2024-12-07 14:41:10,423][19013] Num frames 400...
|
576 |
+
[2024-12-07 14:41:10,506][19013] Num frames 500...
|
577 |
+
[2024-12-07 14:41:10,589][19013] Num frames 600...
|
578 |
+
[2024-12-07 14:41:10,676][19013] Num frames 700...
|
579 |
+
[2024-12-07 14:41:10,758][19013] Num frames 800...
|
580 |
+
[2024-12-07 14:41:10,843][19013] Num frames 900...
|
581 |
+
[2024-12-07 14:41:10,926][19013] Num frames 1000...
|
582 |
+
[2024-12-07 14:41:11,000][19013] Avg episode rewards: #0: 24.240, true rewards: #0: 10.240
|
583 |
+
[2024-12-07 14:41:11,000][19013] Avg episode reward: 24.240, avg true_objective: 10.240
|
584 |
+
[2024-12-07 14:41:11,065][19013] Num frames 1100...
|
585 |
+
[2024-12-07 14:41:11,148][19013] Num frames 1200...
|
586 |
+
[2024-12-07 14:41:11,230][19013] Num frames 1300...
|
587 |
+
[2024-12-07 14:41:11,317][19013] Num frames 1400...
|
588 |
+
[2024-12-07 14:41:11,404][19013] Num frames 1500...
|
589 |
+
[2024-12-07 14:41:11,486][19013] Num frames 1600...
|
590 |
+
[2024-12-07 14:41:11,568][19013] Avg episode rewards: #0: 18.160, true rewards: #0: 8.160
|
591 |
+
[2024-12-07 14:41:11,568][19013] Avg episode reward: 18.160, avg true_objective: 8.160
|
592 |
+
[2024-12-07 14:41:11,664][19013] Num frames 1700...
|
593 |
+
[2024-12-07 14:41:11,758][19013] Num frames 1800...
|
594 |
+
[2024-12-07 14:41:11,839][19013] Num frames 1900...
|
595 |
+
[2024-12-07 14:41:11,919][19013] Num frames 2000...
|
596 |
+
[2024-12-07 14:41:11,999][19013] Num frames 2100...
|
597 |
+
[2024-12-07 14:41:12,114][19013] Avg episode rewards: #0: 15.587, true rewards: #0: 7.253
|
598 |
+
[2024-12-07 14:41:12,115][19013] Avg episode reward: 15.587, avg true_objective: 7.253
|
599 |
+
[2024-12-07 14:41:12,145][19013] Num frames 2200...
|
600 |
+
[2024-12-07 14:41:12,265][19013] Num frames 2300...
|
601 |
+
[2024-12-07 14:41:12,352][19013] Num frames 2400...
|
602 |
+
[2024-12-07 14:41:12,432][19013] Num frames 2500...
|
603 |
+
[2024-12-07 14:41:12,515][19013] Num frames 2600...
|
604 |
+
[2024-12-07 14:41:12,596][19013] Num frames 2700...
|
605 |
+
[2024-12-07 14:41:12,679][19013] Num frames 2800...
|
606 |
+
[2024-12-07 14:41:12,762][19013] Num frames 2900...
|
607 |
+
[2024-12-07 14:41:12,842][19013] Num frames 3000...
|
608 |
+
[2024-12-07 14:41:12,922][19013] Num frames 3100...
|
609 |
+
[2024-12-07 14:41:13,032][19013] Avg episode rewards: #0: 16.420, true rewards: #0: 7.920
|
610 |
+
[2024-12-07 14:41:13,033][19013] Avg episode reward: 16.420, avg true_objective: 7.920
|
611 |
+
[2024-12-07 14:41:13,072][19013] Num frames 3200...
|
612 |
+
[2024-12-07 14:41:13,186][19013] Num frames 3300...
|
613 |
+
[2024-12-07 14:41:13,297][19013] Num frames 3400...
|
614 |
+
[2024-12-07 14:41:13,369][19013] Num frames 3500...
|
615 |
+
[2024-12-07 14:41:13,439][19013] Num frames 3600...
|
616 |
+
[2024-12-07 14:41:13,513][19013] Num frames 3700...
|
617 |
+
[2024-12-07 14:41:13,585][19013] Num frames 3800...
|
618 |
+
[2024-12-07 14:41:13,662][19013] Num frames 3900...
|
619 |
+
[2024-12-07 14:41:13,745][19013] Num frames 4000...
|
620 |
+
[2024-12-07 14:41:13,828][19013] Num frames 4100...
|
621 |
+
[2024-12-07 14:41:13,912][19013] Num frames 4200...
|
622 |
+
[2024-12-07 14:41:13,994][19013] Num frames 4300...
|
623 |
+
[2024-12-07 14:41:14,077][19013] Num frames 4400...
|
624 |
+
[2024-12-07 14:41:14,162][19013] Num frames 4500...
|
625 |
+
[2024-12-07 14:41:14,246][19013] Num frames 4600...
|
626 |
+
[2024-12-07 14:41:14,328][19013] Num frames 4700...
|
627 |
+
[2024-12-07 14:41:14,409][19013] Num frames 4800...
|
628 |
+
[2024-12-07 14:41:14,492][19013] Num frames 4900...
|
629 |
+
[2024-12-07 14:41:14,621][19013] Avg episode rewards: #0: 21.584, true rewards: #0: 9.984
|
630 |
+
[2024-12-07 14:41:14,621][19013] Avg episode reward: 21.584, avg true_objective: 9.984
|
631 |
+
[2024-12-07 14:41:14,632][19013] Num frames 5000...
|
632 |
+
[2024-12-07 14:41:14,751][19013] Num frames 5100...
|
633 |
+
[2024-12-07 14:41:14,832][19013] Num frames 5200...
|
634 |
+
[2024-12-07 14:41:14,902][19013] Num frames 5300...
|
635 |
+
[2024-12-07 14:41:14,972][19013] Num frames 5400...
|
636 |
+
[2024-12-07 14:41:15,052][19013] Num frames 5500...
|
637 |
+
[2024-12-07 14:41:15,129][19013] Num frames 5600...
|
638 |
+
[2024-12-07 14:41:15,199][19013] Num frames 5700...
|
639 |
+
[2024-12-07 14:41:15,322][19013] Avg episode rewards: #0: 20.487, true rewards: #0: 9.653
|
640 |
+
[2024-12-07 14:41:15,323][19013] Avg episode reward: 20.487, avg true_objective: 9.653
|
641 |
+
[2024-12-07 14:41:15,331][19013] Num frames 5800...
|
642 |
+
[2024-12-07 14:41:15,446][19013] Num frames 5900...
|
643 |
+
[2024-12-07 14:41:15,525][19013] Num frames 6000...
|
644 |
+
[2024-12-07 14:41:15,605][19013] Num frames 6100...
|
645 |
+
[2024-12-07 14:41:15,684][19013] Num frames 6200...
|
646 |
+
[2024-12-07 14:41:15,764][19013] Num frames 6300...
|
647 |
+
[2024-12-07 14:41:15,842][19013] Num frames 6400...
|
648 |
+
[2024-12-07 14:41:15,921][19013] Num frames 6500...
|
649 |
+
[2024-12-07 14:41:16,002][19013] Num frames 6600...
|
650 |
+
[2024-12-07 14:41:16,074][19013] Num frames 6700...
|
651 |
+
[2024-12-07 14:41:16,143][19013] Num frames 6800...
|
652 |
+
[2024-12-07 14:41:16,213][19013] Num frames 6900...
|
653 |
+
[2024-12-07 14:41:16,281][19013] Num frames 7000...
|
654 |
+
[2024-12-07 14:41:16,339][19013] Avg episode rewards: #0: 22.154, true rewards: #0: 10.011
|
655 |
+
[2024-12-07 14:41:16,340][19013] Avg episode reward: 22.154, avg true_objective: 10.011
|
656 |
+
[2024-12-07 14:41:16,439][19013] Num frames 7100...
|
657 |
+
[2024-12-07 14:41:16,539][19013] Num frames 7200...
|
658 |
+
[2024-12-07 14:41:16,607][19013] Num frames 7300...
|
659 |
+
[2024-12-07 14:41:16,677][19013] Num frames 7400...
|
660 |
+
[2024-12-07 14:41:16,749][19013] Num frames 7500...
|
661 |
+
[2024-12-07 14:41:16,828][19013] Num frames 7600...
|
662 |
+
[2024-12-07 14:41:16,906][19013] Num frames 7700...
|
663 |
+
[2024-12-07 14:41:16,989][19013] Num frames 7800...
|
664 |
+
[2024-12-07 14:41:17,085][19013] Avg episode rewards: #0: 21.191, true rewards: #0: 9.816
|
665 |
+
[2024-12-07 14:41:17,086][19013] Avg episode reward: 21.191, avg true_objective: 9.816
|
666 |
+
[2024-12-07 14:41:17,144][19013] Num frames 7900...
|
667 |
+
[2024-12-07 14:41:17,259][19013] Num frames 8000...
|
668 |
+
[2024-12-07 14:41:17,352][19013] Num frames 8100...
|
669 |
+
[2024-12-07 14:41:17,432][19013] Num frames 8200...
|
670 |
+
[2024-12-07 14:41:17,513][19013] Num frames 8300...
|
671 |
+
[2024-12-07 14:41:17,595][19013] Num frames 8400...
|
672 |
+
[2024-12-07 14:41:17,676][19013] Num frames 8500...
|
673 |
+
[2024-12-07 14:41:17,758][19013] Num frames 8600...
|
674 |
+
[2024-12-07 14:41:17,838][19013] Num frames 8700...
|
675 |
+
[2024-12-07 14:41:17,906][19013] Avg episode rewards: #0: 20.908, true rewards: #0: 9.686
|
676 |
+
[2024-12-07 14:41:17,907][19013] Avg episode reward: 20.908, avg true_objective: 9.686
|
677 |
+
[2024-12-07 14:41:18,010][19013] Num frames 8800...
|
678 |
+
[2024-12-07 14:41:18,113][19013] Num frames 8900...
|
679 |
+
[2024-12-07 14:41:18,183][19013] Num frames 9000...
|
680 |
+
[2024-12-07 14:41:18,255][19013] Num frames 9100...
|
681 |
+
[2024-12-07 14:41:18,325][19013] Num frames 9200...
|
682 |
+
[2024-12-07 14:41:18,420][19013] Avg episode rewards: #0: 19.561, true rewards: #0: 9.261
|
683 |
+
[2024-12-07 14:41:18,421][19013] Avg episode reward: 19.561, avg true_objective: 9.261
|
684 |
+
[2024-12-07 14:41:34,293][19013] Replay video saved to /home/rahatchd/code/deep-rl-hugging-face/unit8/train_dir/default_experiment/replay.mp4!
|