|
--- |
|
library_name: stable-baselines3 |
|
tags: |
|
- LunarLander-v2 |
|
- deep-reinforcement-learning |
|
- reinforcement-learning |
|
- stable-baselines3 |
|
model-index: |
|
- name: PPO |
|
results: |
|
- task: |
|
type: reinforcement-learning |
|
name: reinforcement-learning |
|
dataset: |
|
name: LunarLander-v2 |
|
type: LunarLander-v2 |
|
metrics: |
|
- type: mean_reward |
|
value: 290.32 +/- 15.84 |
|
name: mean_reward |
|
verified: false |
|
--- |
|
|
|
# **PPO** Agent playing **LunarLander-v2** |
|
This is a trained model of a **PPO** agent playing **LunarLander-v2** |
|
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). |
|
|
|
## Colab |
|
https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit1/unit1.ipynb#scrollTo=PAEVwK-aahfx |
|
|
|
## Usage (with Stable-baselines3) |
|
|
|
|
|
```python |
|
import gymnasium |
|
|
|
from huggingface_sb3 import load_from_hub, package_to_hub |
|
from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. |
|
|
|
from stable_baselines3 import PPO |
|
from stable_baselines3.common.env_util import make_vec_env |
|
from stable_baselines3.common.evaluation import evaluate_policy |
|
from stable_baselines3.common.monitor import Monitor |
|
|
|
import gymnasium as gym |
|
|
|
# We create our environment with gym.make("<name_of_the_environment>") |
|
env = gym.make("LunarLander-v2") |
|
env.reset() |
|
print("_____OBSERVATION SPACE_____ \n") |
|
print("Observation Space Shape", env.observation_space.shape) |
|
print("Sample observation", env.observation_space.sample()) # Get a random observation |
|
|
|
print("\n _____ACTION SPACE_____ \n") |
|
print("Action Space Shape", env.action_space.n) |
|
print("Action Space Sample", env.action_space.sample()) # Take a random action |
|
|
|
# Create the environment |
|
env = make_vec_env('LunarLander-v2', n_envs=16) |
|
# TODO: Define a PPO MlpPolicy architecture |
|
# We use MultiLayerPerceptron (MLPPolicy) because the input is a vector, |
|
# if we had frames as input we would use CnnPolicy |
|
model = PPO('MlpPolicy', env, verbose=1) |
|
# TODO: Train it for 1,000,000 timesteps |
|
model.learn(total_timesteps=int(2e6)) |
|
|
|
# TODO: Specify file name for model and save the model to file |
|
model_name = "ppo-LunarLander-v1" |
|
model.save(model_name) |
|
|
|
# TODO: Evaluate the agent |
|
# Create a new environment for evaluation |
|
eval_env = Monitor(gym.make("LunarLander-v2")) |
|
|
|
# Evaluate the model with 10 evaluation episodes and deterministic=True |
|
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True) |
|
|
|
# Print the results |
|
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") |
|
|
|
import gymnasium as gym |
|
from stable_baselines3.common.vec_env import DummyVecEnv |
|
from stable_baselines3.common.env_util import make_vec_env |
|
|
|
from huggingface_sb3 import package_to_hub |
|
|
|
## TODO: Define a repo_id |
|
## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 |
|
repo_id = "HugBot/ppo-LunarLander-v2" |
|
|
|
# TODO: Define the name of the environment |
|
env_id = "LunarLander-v2" |
|
|
|
# Create the evaluation env and set the render_mode="rgb_array" |
|
eval_env = DummyVecEnv([lambda: Monitor(gym.make(env_id, render_mode="rgb_array"))]) |
|
|
|
|
|
# TODO: Define the model architecture we used |
|
model_architecture = "PPO" |
|
|
|
## TODO: Define the commit message |
|
commit_message = "Upload PPO LunarLander-v2 trained agent" |
|
|
|
# method save, evaluate, generate a model card and record a replay video of your agent before pushing the repo to the hub |
|
package_to_hub(model=model, # Our trained model |
|
model_name=model_name, # The name of our trained model |
|
model_architecture=model_architecture, # The model architecture we used: in our case PPO |
|
env_id=env_id, # Name of the environment |
|
eval_env=eval_env, # Evaluation Environment |
|
repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 |
|
commit_message=commit_message) |
|
|
|
from huggingface_sb3 import load_from_hub |
|
repo_id = "HugBot/ppo-LunarLander-v2" # The repo_id |
|
filename = "ppo-LunarLander-v1.zip" # The model filename.zip |
|
|
|
# When the model was trained on Python 3.8 the pickle protocol is 5 |
|
# But Python 3.6, 3.7 use protocol 4 |
|
# In order to get compatibility we need to: |
|
# 1. Install pickle5 (we done it at the beginning of the colab) |
|
# 2. Create a custom empty object we pass as parameter to PPO.load() |
|
custom_objects = { |
|
"learning_rate": 0.0, |
|
"lr_schedule": lambda _: 0.0, |
|
"clip_range": lambda _: 0.0, |
|
} |
|
|
|
checkpoint = load_from_hub(repo_id, filename) |
|
model = PPO.load(checkpoint, custom_objects=custom_objects, print_system_info=True) |
|
|
|
#@title |
|
eval_env = Monitor(gym.make("LunarLander-v2")) |
|
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True) |
|
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") |
|
... |
|
``` |
|
|