{"train": [{"diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex c58cfeb50..679e57ff2 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -666,7 +666,7 @@ def load_checkpoint_in_model(\n elif len(potential_index) == 1:\n index_filename = os.path.join(checkpoint, potential_index[0])\n else:\n- raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\n+ raise ValueError(f\"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones.\")\n else:\n raise ValueError(\n \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/955", "pr_id": 1188526483}, {"diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex d3247a464..75a2c06f6 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -404,6 +404,7 @@ def load_checkpoint_and_dispatch(\n offload_folder=offload_folder,\n dtype=dtype,\n offload_state_dict=offload_state_dict,\n+ offload_buffers=offload_buffers,\n )\n if device_map is None:\n return model\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 2774b18ce..c58cfeb50 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -607,6 +607,7 @@ def load_checkpoint_in_model(\n offload_folder: Optional[Union[str, os.PathLike]] = None,\n dtype: Optional[Union[str, torch.dtype]] = None,\n offload_state_dict: bool = False,\n+ offload_buffers: bool = False,\n ):\n \"\"\"\n Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n@@ -636,6 +637,8 @@ def load_checkpoint_in_model(\n offload_state_dict (`bool`, *optional*, defaults to `False`):\n If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\n the weight of the CPU state dict + the biggest shard does not fit.\n+ offload_buffers (`bool`, *optional*, defaults to `False):\n+ Whether or not to include the buffers in the weights offloaded to disk.\n \"\"\"\n if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n raise ValueError(\n@@ -687,6 +690,8 @@ def load_checkpoint_in_model(\n state_dict_folder = tempfile.mkdtemp()\n state_dict_index = {}\n \n+ buffer_names = [name for name, _ in model.named_buffers()]\n+\n for checkpoint_file in checkpoint_files:\n checkpoint = torch.load(checkpoint_file)\n if device_map is None:\n@@ -703,7 +708,8 @@ def load_checkpoint_in_model(\n param_device = device_map[module_name]\n \n if param_device == \"disk\":\n- set_module_tensor_to_device(model, param_name, \"meta\")\n+ if offload_buffers or param_name not in buffer_names:\n+ set_module_tensor_to_device(model, param_name, \"meta\")\n offload_weight(param, param_name, offload_folder, index=offload_index)\n elif param_device == \"cpu\" and offload_state_dict:\n set_module_tensor_to_device(model, param_name, \"meta\")\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 16243d5e1..644d297b2 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -275,6 +275,31 @@ def test_load_checkpoint_in_model_one_gpu(self):\n self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n \n+ @require_cuda\n+ def test_load_checkpoint_in_model_disk_offload(self):\n+ device_map = {\"linear1\": \"cpu\", \"batchnorm\": \"disk\", \"linear2\": \"cpu\"}\n+\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ # Buffers are not offloaded by default\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n @require_multi_gpu\n def test_load_checkpoint_in_model_two_gpu(self):\n device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": 1}\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/951", "pr_id": 1183897941}, {"diff": "diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex fceb0bd23..03a4e346c 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -243,10 +243,7 @@ def test_deepspeed_plugin(self, stage):\n \n @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n def test_accelerate_state_deepspeed(self, dtype):\n- state = AcceleratorState(_from_accelerator=True)\n- if state.initialized:\n- state.initialized = False\n-\n+ AcceleratorState._reset_state()\n deepspeed_plugin = DeepSpeedPlugin(\n gradient_accumulation_steps=1,\n gradient_clipping=1.0,\n@@ -259,7 +256,6 @@ def test_accelerate_state_deepspeed(self, dtype):\n with mockenv_context(**self.dist_env):\n state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state\n self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n- state.initialized = False\n \n def test_init_zero3(self):\n deepspeed_plugin = DeepSpeedPlugin(\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/950", "pr_id": 1183653396}, {"diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 31a6d64a2..d97327de4 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -35,6 +35,14 @@\n import torch_xla.core.xla_model as xm\n \n \n+def is_initialized() -> bool:\n+ \"\"\"\n+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,\n+ but works as a module method.\n+ \"\"\"\n+ return AcceleratorState._shared_state != {}\n+\n+\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\n@@ -45,6 +53,7 @@ class AcceleratorState:\n - **device** (`torch.device`) -- The device to use.\n - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\n in use.\n+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.\n - **local_process_index** (`int`) -- The index of the current process on the current server.\n - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type\n of mixed precision being performed.\n@@ -69,8 +78,7 @@ def __init__(\n if parse_flag_from_env(\"ACCELERATE_USE_CPU\"):\n cpu = True\n self._check_initialized(mixed_precision, cpu)\n- self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n- if not getattr(self, \"initialized\", False):\n+ if not self.initialized:\n self.backend = None\n self.deepspeed_plugin = None\n mixed_precision = (\n@@ -245,18 +253,17 @@ def __init__(\n and self.device.type == \"cuda\"\n ):\n torch.backends.cuda.matmul.allow_tf32 = True\n- self.initialized = True\n \n- def __repr__(self):\n- mixed_precision = self.mixed_precision\n+ self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n \n+ def __repr__(self):\n repr = (\n f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n f\"Num processes: {self.num_processes}\\n\"\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n- f\"Mixed precision type: {mixed_precision}\\n\"\n+ f\"Mixed precision type: {self.mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n@@ -286,9 +293,14 @@ def _reset_state():\n \"Resets `_shared_state`, is used internally and should not be called\"\n AcceleratorState._shared_state = {}\n \n+ @property\n+ def initialized(self) -> bool:\n+ \"Returns whether the `AcceleratorState` has been initialized\"\n+ return self._shared_state != {}\n+\n def _check_initialized(self, mixed_precision=None, cpu=None):\n \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\n- if getattr(self, \"initialized\", False):\n+ if self.initialized:\n err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\n if cpu and self.device.type != \"cpu\":\n raise ValueError(err.format(flag=\"cpu=True\"))\n@@ -311,11 +323,15 @@ class GradientState:\n \n def __init__(self):\n self.__dict__ = self._shared_state\n- if not getattr(self, \"initialized\", False):\n+ if not self.initialized:\n self.sync_gradients = True\n self.end_of_dataloader = False\n self.remainder = -1\n- self.initialized = True\n+\n+ @property\n+ def initialized(self) -> bool:\n+ \"Returns whether the `GradientState` has been initialized\"\n+ return GradientState._shared_state != {}\n \n def __repr__(self):\n return (\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/949", "pr_id": 1183552649}, {"diff": "diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 22f51cce1..84c115fd3 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -34,7 +34,7 @@ def offload_weight(weight, weight_name, offload_folder, index=None):\n # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.\n weight = weight.view(torch.int16)\n dtype = \"bfloat16\"\n- array = weight.numpy()\n+ array = weight.cpu().numpy()\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n if index is not None:\n if dtype is None:\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/945", "pr_id": 1178312394}, {"diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ec5041bfa..7c07e9105 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -363,7 +363,7 @@ def __init__(\n if (\n self.state.mixed_precision == \"fp16\"\n and self.device.type != \"cpu\"\n- and self.distributed_type != DistributedType.MEGATRON_LM\n+ and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)\n ):\n self.native_amp = True\n if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\n@@ -375,10 +375,10 @@ def __init__(\n self.scaler = ShardedGradScaler(**kwargs)\n else:\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n- elif (\n- self.state.mixed_precision == \"bf16\"\n- and self.distributed_type != DistributedType.FSDP\n- and self.distributed_type != DistributedType.MEGATRON_LM\n+ elif self.state.mixed_precision == \"bf16\" and self.distributed_type not in (\n+ DistributedType.DEEPSPEED,\n+ DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ):\n if self.device.type == \"cpu\":\n self.native_amp = is_torch_version(\">=\", \"1.10\")\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/943", "pr_id": 1176583667}, {"diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 7afebea48..379d6454e 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -37,7 +37,10 @@\n _available_trackers = []\n \n if is_tensorboard_available():\n- from torch.utils import tensorboard\n+ try:\n+ from torch.utils import tensorboard\n+ except ModuleNotFoundError:\n+ import tensorboardX as tensorboard\n \n _available_trackers.append(LoggerType.TENSORBOARD)\n \n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/942", "pr_id": 1176562235}, {"diff": "diff --git a/docs/source/usage_guides/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\nindex 29561c77b..0377296c1 100644\n--- a/docs/source/usage_guides/deepspeed.mdx\n+++ b/docs/source/usage_guides/deepspeed.mdx\n@@ -395,6 +395,196 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a sample script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. Content of the `accelerate` config:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------\n+In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------\n+Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. Content of the `accelerate` config:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\n+\n+1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": \"auto\",\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\n+ \"offload_optimizer\": {\n+ \"device\": \"auto\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"auto\"\n+ }\n+ },\n+ \"gradient_clipping\": \"auto\",\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: fp16\n+ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}\n+```\n+\n+**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \n+`Important code changes when using DeepSpeed Config File`.\n+\n ## Saving and loading\n \n 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex a3dcd2dcb..50acf55a1 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -460,7 +460,7 @@ def get_cluster_input():\n \n if distributed_type != DistributedType.TPU:\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n- mixed_precision = \"no\"\n+ mixed_precision = None\n else:\n mixed_precision = _ask_options(\n \"Do you wish to use FP16 or BF16 (mixed precision)?\",\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex ba492802e..8b4a28292 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -78,6 +78,7 @@ def to_dict(self):\n for key, value in result.items():\n if isinstance(value, Enum):\n result[key] = value.value\n+ result = {k: v for k, v in result.items() if v is not None}\n return result\n \n @classmethod\n@@ -88,7 +89,7 @@ def from_json_file(cls, json_file=None):\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n if \"mixed_precision\" not in config_dict:\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\n@@ -111,7 +112,7 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n \n if \"mixed_precision\" not in config_dict:\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 91d4427ac..b5f831b47 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--offload_param_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_accumulation_steps\",\n default=None,\n type=int,\n- help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `1`.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_clipping\",\n default=None,\n type=float,\n- help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `1.0`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=\"true\",\n+ default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n- default=\"false\",\n+ default=None,\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_hostfile\",\n@@ -363,7 +368,7 @@ def launch_command_parser(subparsers=None):\n \"--deepspeed_multinode_launcher\",\n default=None,\n type=str,\n- help=\"DeepSpeed multi-node launcher to use.\",\n+ help=\"DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.\",\n )\n \n # fsdp arguments\n@@ -717,14 +722,22 @@ def deepspeed_launcher(args):\n \n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"ACCELERATE_CONFIG_DS_FIELDS\"] = str(args.deepspeed_fields_from_accelerate_config).lower()\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n- current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n- current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n+ if args.zero_stage is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n+ if args.gradient_accumulation_steps is not None:\n+ current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n+ if args.gradient_clipping is not None:\n+ current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n+ if args.offload_optimizer_device is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n+ if args.offload_param_device is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n+ if args.zero3_init_flag is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n+ if args.zero3_save_16bit_model is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n if args.deepspeed_config_file is not None:\n current_env[\"ACCELERATE_DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\n \n@@ -966,6 +979,7 @@ def launch_command(args):\n \n defaults = None\n warned = []\n+ mp_from_config_flag = False\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n@@ -1013,7 +1027,12 @@ def launch_command(args):\n ):\n setattr(args, name, attr)\n if not args.mixed_precision:\n- args.mixed_precision = defaults.mixed_precision\n+ if defaults.mixed_precision is None:\n+ args.mixed_precision = \"no\"\n+ else:\n+ args.mixed_precision = defaults.mixed_precision\n+ mp_from_config_flag = True\n+\n if args.dynamo_backend is None:\n warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n args.dynamo_backend = \"no\"\n@@ -1056,6 +1075,10 @@ def launch_command(args):\n \n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []\n+ if mp_from_config_flag:\n+ args.deepspeed_fields_from_accelerate_config.append(\"mixed_precision\")\n+ args.deepspeed_fields_from_accelerate_config = \",\".join(args.deepspeed_fields_from_accelerate_config)\n deepspeed_launcher(args)\n elif args.use_fsdp and not args.cpu:\n multi_gpu_launcher(args)\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 5a87d7860..01f174a10 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -394,6 +394,28 @@ class DeepSpeedPlugin:\n def __post_init__(self):\n from .deepspeed import HfDeepSpeedConfig\n \n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.gradient_clipping is None:\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n+ if gradient_clipping != \"none\":\n+ self.gradient_clipping = float(gradient_clipping)\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ if self.offload_param_device is None:\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+\n+ if self.zero3_save_16bit_model is None:\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )\n+\n if self.hf_ds_config is None:\n self.hf_ds_config = os.environ.get(\"ACCELERATE_DEEPSPEED_CONFIG_FILE\", \"none\")\n if (\n@@ -405,33 +427,22 @@ def __post_init__(self):\n self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)\n if \"gradient_accumulation_steps\" not in self.hf_ds_config.config:\n self.hf_ds_config.config[\"gradient_accumulation_steps\"] = 1\n- elif self.hf_ds_config.config[\"gradient_accumulation_steps\"] == \"auto\":\n- raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.\")\n if \"zero_optimization\" not in self.hf_ds_config.config:\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n- else:\n- if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n-\n- if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n- if gradient_clipping != \"none\":\n- self.gradient_clipping = float(gradient_clipping)\n-\n- if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n-\n- if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n-\n- if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n-\n- if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = (\n- os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n- )\n \n+ self._deepspeed_config_checks()\n+ kwargs = {\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"gradient_clipping\": self.gradient_clipping if self.gradient_clipping else 1.0,\n+ \"zero_optimization.stage\": self.zero_stage,\n+ \"zero_optimization.offload_optimizer.device\": self.offload_optimizer_device,\n+ \"zero_optimization.offload_param.device\": self.offload_param_device,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": self.zero3_save_16bit_model,\n+ }\n+ for key in kwargs.keys():\n+ self.fill_match(key, **kwargs, must_match=False)\n+ self.hf_ds_config.set_stage_and_offload()\n+ else:\n config = {\n \"train_batch_size\": \"auto\",\n \"train_micro_batch_size_per_gpu\": \"auto\",\n@@ -450,15 +461,19 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = (\n+ strtobool(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", str(self.hf_ds_config.is_zero3()))) == 1\n+ )\n if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\n self.zero3_init_flag = False\n \n- def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):\n+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):\n+ mismatches = [] if mismatches is None else mismatches\n config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)\n if config is None:\n return\n@@ -503,10 +518,28 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\n \n def set_mixed_precision(self, mixed_precision):\n ds_config = self.deepspeed_config\n- if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n- ds_config.update({\"fp16\": {\"enabled\": True, \"auto_cast\": True}})\n- elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n- ds_config.update({\"bf16\": {\"enabled\": True}})\n+ kwargs = {\n+ \"fp16.enabled\": mixed_precision == \"fp16\",\n+ \"bf16.enabled\": mixed_precision == \"bf16\",\n+ }\n+ if mixed_precision == \"fp16\":\n+ if \"fp16\" not in ds_config:\n+ ds_config[\"fp16\"] = {\"enabled\": True, \"auto_cast\": True}\n+ elif mixed_precision == \"bf16\":\n+ if \"bf16\" not in ds_config:\n+ ds_config[\"bf16\"] = {\"enabled\": True}\n+\n+ if mixed_precision != \"no\":\n+ diff_dtype = \"bf16\" if mixed_precision == \"fp16\" else \"fp16\"\n+ if str(ds_config.get(diff_dtype, {}).get(\"enabled\", \"False\")).lower() == \"true\":\n+ raise ValueError(\n+ f\"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file.\"\n+ )\n+ for dtype in [\"fp16\", \"bf16\"]:\n+ if dtype not in ds_config:\n+ ds_config[dtype] = {\"enabled\": False}\n+ self.fill_match(\"fp16.enabled\", must_match=False, **kwargs)\n+ self.fill_match(\"bf16.enabled\", must_match=False, **kwargs)\n \n def set_deepspeed_weakref(self):\n from .imports import is_transformers_available\n@@ -549,6 +582,31 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower() for name in env_variable_names_to_ignore\n+ ]\n+\n+ deepspeed_fields_from_accelerate_config = os.environ.get(\"ACCELERATE_CONFIG_DS_FIELDS\", \"\").split(\",\")\n+\n+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n\"\n+ \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n\"\n+ \"It will only ask for the necessary config variables when using `deepspeed_config_file`.\"\n+ )\n+\n \n @dataclass\n class FullyShardedDataParallelPlugin:\ndiff --git a/src/accelerate/utils/deepspeed.py b/src/accelerate/utils/deepspeed.py\nindex 02d1ab8bc..69dc5c7f8 100644\n--- a/src/accelerate/utils/deepspeed.py\n+++ b/src/accelerate/utils/deepspeed.py\n@@ -50,6 +50,9 @@ def __init__(self, config_file_or_dict):\n raise ValueError(\"expecting either a path to a DeepSpeed config file or a pre-populated dict\")\n self.config = config\n \n+ self.set_stage_and_offload()\n+\n+ def set_stage_and_offload(self):\n # zero stage - this is done as early as possible, before model is created, to allow\n # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\n # during ``zero.Init()`` which needs to know the dtype, and some other hparams.\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex eca75c1ed..fceb0bd23 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -285,8 +285,6 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n from deepspeed.runtime.engine import DeepSpeedEngine\n \n kwargs = {\n- \"fp16.enabled\": True,\n- \"bf16.enabled\": False,\n \"optimizer.params.lr\": 5e-5,\n \"optimizer.params.weight_decay\": 0.0,\n \"scheduler.params.warmup_min_lr\": 0.0,\n@@ -370,7 +368,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test DeepSpeed optimizer + DeepSpeed scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -430,7 +428,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test custom optimizer + DeepSpeed scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -463,7 +461,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test deepspeed optimizer + custom scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -501,8 +499,6 @@ def test_save_checkpoints(self):\n )\n del deepspeed_plugin.deepspeed_config[\"bf16\"]\n kwargs = {\n- \"fp16.enabled\": True,\n- \"bf16.enabled\": False,\n \"optimizer.params.lr\": 5e-5,\n \"optimizer.params.weight_decay\": 0.0,\n \"scheduler.params.warmup_min_lr\": 0.0,\n@@ -518,7 +514,7 @@ def test_save_checkpoints(self):\n }\n \n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n kwargs[\"train_batch_size\"] = (\n kwargs[\"train_micro_batch_size_per_gpu\"]\n * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n@@ -594,6 +590,81 @@ def test_autofill_dsconfig(self):\n accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n )\n \n+ @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n+ def test_autofill_dsconfig_from_ds_plugin(self, dtype):\n+ ds_config = self.ds_config_dict[\"zero3\"]\n+ if dtype == BF16:\n+ del ds_config[\"fp16\"]\n+ else:\n+ del ds_config[\"bf16\"]\n+ ds_config[dtype][\"enabled\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"stage\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"offload_param\"][\"device\"] = \"auto\"\n+ ds_config[\"gradient_accumulation_steps\"] = \"auto\"\n+ ds_config[\"gradient_clipping\"] = \"auto\"\n+\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ hf_ds_config=ds_config,\n+ zero3_init_flag=True,\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ )\n+\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)\n+ deepspeed_plugin = accelerator.state.deepspeed_plugin\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_clipping\"], 1.0)\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"], 1)\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage\"], 2)\n+ self.assertEqual(\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"], \"cpu\"\n+ )\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_param\"][\"device\"], \"cpu\")\n+ self.assertTrue(\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n+ )\n+ self.assertTrue(deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n+\n+ AcceleratorState._reset_state()\n+ diff_dtype = \"bf16\" if dtype == \"fp16\" else \"fp16\"\n+ with mockenv_context(**self.dist_env):\n+ with self.assertRaises(ValueError) as cm:\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype)\n+ self.assertTrue(\n+ f\"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file.\"\n+ in str(cm.exception)\n+ )\n+\n+ def test_ds_config_assertions(self):\n+ ambiguous_env = self.dist_env.copy()\n+ ambiguous_env[\n+ \"ACCELERATE_CONFIG_DS_FIELDS\"\n+ ] = \"gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision\"\n+\n+ with mockenv_context(**ambiguous_env):\n+ with self.assertRaises(ValueError) as cm:\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ hf_ds_config=self.ds_config_file[ZERO3],\n+ zero3_init_flag=True,\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=ZERO2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ )\n+ _ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16)\n+ self.assertTrue(\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\"\n+ in str(cm.exception)\n+ )\n+\n def test_basic_run(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n test_file_path = os.path.sep.join(\n", "code_comments": [{"body": "Nit: all `none`->`None`", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": false}, {"body": "```suggestion\r\n \"If you are using an accelerate config file, set `mixed_precision=no` \"\r\n```", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"", "from_author": false}, {"body": "```suggestion\r\n \"and make sure to not specify these config variables in `accelerate launch` command. \\n\"\r\n```", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"\n+ \"and remove others config variables mentioned in the above specified list; \"\n+ \"else don't specify these config variables in `accelerate launch` command. \\n\"", "from_author": false}, {"body": "Hello, `none` is the string option as possible values are `cpu`|`nvme`l`none`", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": true}, {"body": "Oh then add quotes?", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": false}, {"body": "Done.", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": true}, {"body": "Did I understand it right that here you suggest to the user to set `mixed_precision=no` even if they use mixed precision in ds_config.json? If so this now is worse than before, as now you're proposing to force 2 opposite values in the same config entry. As in `mixed_precision=no` and `fp16 { enabled: true}`.\r\n\r\nUnless I'm misunderstanding the wording that is. \r\n\r\nI think it should be either `mixed_precision=xyz` or `ds_config.json` (with fp16 or bf16 blocks or no block at all as fp16 is the default), but not both.", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false}, {"body": "```suggestion\r\n \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n\"\r\n```", "diff_hunk": "@@ -578,10 +578,9 @@ def _deepspeed_config_checks(self):\n raise ValueError(\n f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n- \"If you are using an accelerate config file, set `mixed_precision=no` \"\n- \"and remove others config variables mentioned in the above specified list; \"\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list; \"\n \"and make sure to not specify these config variables in `accelerate launch` command. \\n\"\n- \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\n\"\n+ \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\n\"", "from_author": false}, {"body": "Done. I've simplified the error message wherein user can just remove all the ambiguous entries from accelerate config file and not specify them in launch command", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": true}, {"body": "to focus we are dealing with `mixed_precision` duplicity here:\r\n\r\nI've just tried:\r\n\r\n```\r\ndeepspeed_config:\r\n deepspeed_multinode_launcher: standard\r\n deepspeed_config_file: ./configs/vopt-large-z3/ds_config.json\r\n zero3_init_flag: true\r\ndistributed_type: DEEPSPEED\r\nmixed_precision: 'no'\r\n```\r\n\r\n```\r\n{\r\n \"fp16\": {\r\n \"enabled\": true,\r\n [...]\r\n```\r\nand it doesn't assert.\r\n\r\nwith the latest commit of `dd2d57b6a1`\r\n\r\n", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false}, {"body": "(incidentally `mixed_precision: fp16|bf16|'no'` is odd - why do different values don't follow the same style - i.e 'no' in quotes but not the other values)", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false}, {"body": "Hello @stas, this case can't be helped as 'no' is the default value and if one doesn't specify the entry in accelerate config file, it will get default value of 'no' and thereby we have no way to check if user has given the default in config file or was it set to default when reading the config file. It would mean a lot of code rewriting as `mixed_precision` is used in various other non DeepSpeed parts.\n\nThe default value of `mixed_precision` would be overriden by that in `deepspeed_config_file`. Now, if you answer the questionnaire via `accelerate config`, the entry of `mixed_precision` won't be there when using `deepspeed_config_file`. \n\n\n@sgugger, what are your thoughts around this. ", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": true}, {"body": "understood! in which case I think this should work:\r\n\r\n1. if the `mixed_precision` config isn't in the accelerate config file all is good\r\n2. if the `mixed_precision` config is in the accelerate config file and ds_config file is used, the value of the former must match the value of the latter or assert.", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false}, {"body": "```suggestion\r\nBelow is a sample script using `deepspeed_config_file` in different scenarios.\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.", "from_author": false}, {"body": "Are the lines of `-` intended? Should there be a new line before the text?", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ", "from_author": false}, {"body": "```suggestion\r\n1. Content of the `accelerate` config:\r\n```\r\n(to not mistake this with the command `accelerate config`", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:", "from_author": false}, {"body": "```suggestion\r\n2. Content of the `accelerate` config:\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:", "from_author": false}, {"body": "```suggestion\r\n**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.", "from_author": false}, {"body": "```suggestion\r\n1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\n+\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:", "from_author": false}, {"body": "```suggestion\r\n**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\n+\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": \"auto\",\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\n+ \"offload_optimizer\": {\n+ \"device\": \"auto\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"auto\"\n+ }\n+ },\n+ \"gradient_clipping\": \"auto\",\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: fp16\n+ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}\n+```\n+\n+**Note**: Remaining `auto` values are handled in `accelerator.prepare()` call as explained in point 2 of ", "from_author": false}, {"body": "Why is this set to True by default now?", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": false}, {"body": "these are the exact outputs from the new config #830. Added new lines. ", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ", "from_author": true}, {"body": "Hello, as stas mentioned in Issue #922 \r\n\r\n> this flag should be True by default as zero stage 3 is for large models, it's very unlikely the user will be able to load those models w/o zero.Init", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": true}, {"body": "Is this only used if zero3 is enabled in the rest of the code? It should at least only default to True when Zero-3 is enabled otherwise.", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": false}, {"body": "Yes, only used when zero3 is enabled else will give a warning and gets set to False as mentioned in on of the above threads. Done, defaults to True only for stage 3.", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": true}], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}, {"body": "I tried this branch, getting:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"m4/training/main.py\", line 47, in Learn the basics and become familiar with using \ud83e\udd17 Accelerate. Start here if you are using \ud83e\udd17 Accelerate for the first time! Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use \ud83e\udd17 Accelerate to solve real-world problems. High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed. Technical descriptions of how \ud83e\udd17 Accelerate classes and methods work. Learn the basics and become familiar with using \ud83e\udd17 Accelerate. Start here if you are using \ud83e\udd17 Accelerate for the first time! Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use \ud83e\udd17 Accelerate to solve real-world problems. Technical descriptions of how \ud83e\udd17 Accelerate classes and methods work.In Multi GPU launcher, if one isn't using `--debug` then they won't see any error stack trace at all (which might be hard to read but important for root cause and resolution). This makes `--debug` sort of mandatory instead of optional. Can the user have normal error traceback if `--debug` isn't given? Please let me know if I am missing something or way off. \r\n\r\nWent over it again with a sample code, \r\n```python\r\nfrom rich.traceback import \r\ninstall install(show_locals=True)\r\n``` \r\nmakes rich the default traceback handler. `--debug` meaning is no more what Sylvain was referring to, hence my confusion. ", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/613", "pr_id": 1020716188}, {"diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex c0c9c62f2..cb0a7ee42 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -57,10 +57,16 @@ jobs:\n git fetch && git checkout ${{ github.sha }}\n pip install -e . --no-deps\n \n- - name: Run test on GPUs\n+ - name: Run core and big modeling tests on GPUs\n run: |\n source activate accelerate\n- make test\n+ make test_big_modeling\n+ make test_core\n+\n+ - name: Run Integration tests on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_integrations\n \n - name: Run examples on GPUs\n run: |\ndiff --git a/Makefile b/Makefile\nindex 100a1484a..c1c79d6e8 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -44,6 +44,8 @@ test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\n \n # Broken down example tests for the CI runners\n+test_integrations:\n+\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp\n test_example_differences:\n \tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\n \ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 30eb2a4d8..1be794421 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -35,7 +35,6 @@\n require_cuda,\n require_deepspeed,\n require_multi_gpu,\n- skip,\n slow,\n )\n from accelerate.test_utils.training import RegressionDataset\n@@ -697,7 +696,6 @@ def test_performance(self):\n with patch_environment(omp_num_threads=1):\n execute_subprocess_async(cmd_stage, env=os.environ.copy())\n \n- @skip\n def test_checkpointing(self):\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\n cmd = [\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}, {"body": "Thank you \ud83d\ude04. This is neat and finally all the tests would run as expected \ud83e\udd17. ", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/612", "pr_id": 1020571705}, {"diff": "diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/build_and_run_tests.yml\nsimilarity index 82%\nrename from .github/workflows/check_dependencies.yml\nrename to .github/workflows/build_and_run_tests.yml\nindex f8ac1f492..a5378353d 100644\n--- a/.github/workflows/check_dependencies.yml\n+++ b/.github/workflows/build_and_run_tests.yml\n@@ -10,7 +10,7 @@ env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n \n jobs:\n- check-for-setup:\n+ check-for-source:\n runs-on: ubuntu-latest\n name: Check if setup was changed\n outputs:\n@@ -28,7 +28,7 @@ jobs:\n id: was_changed\n run: |\n for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n- if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ if [ `basename \"${file}\"` == \"setup.py\" ]; then\n echo ::set-output name=changed::\"1\"\n fi\n done\n@@ -36,10 +36,10 @@ jobs:\n build-docker-containers:\n needs: check-for-setup\n if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n- uses: ./.github/workflows/build-docker-images.yml\n+ uses: ./.github/workflows/build_docker_images.yml\n secrets: inherit\n \n- run-tests:\n+ run-merge-tests:\n needs: build-docker-containers\n if: always()\n- uses: ./.github/workflows/on-merge.yml\n\\ No newline at end of file\n+ uses: ./.github/workflows/run_merge_tests.yml\n\\ No newline at end of file\ndiff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build_docker_images.yml\nsimilarity index 100%\nrename from .github/workflows/build-docker-images.yml\nrename to .github/workflows/build_docker_images.yml\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/run_merge_tests.yml\nsimilarity index 100%\nrename from .github/workflows/on-merge.yml\nrename to .github/workflows/run_merge_tests.yml\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex d39d36951..5d8ff0c1b 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -1,6 +1,13 @@\n name: Run Tests\n \n-on: [pull_request]\n+on:\n+ pull_request:\n+ paths:\n+ - \"src/**\"\n+ - \"tests/**\"\n+ - \".github/**\"\n+ - \"setup.py\"\n+ types: [opened, synchronize, reopened]\n \n env:\n HF_HOME: ~/hf_cache\n", "code_comments": [{"body": "I feel this name should be `Checks if setup.py were changed`, as I can only see it checks that file.\r\n\r\nAnd on merge event to `main`, it seems the tests are always run.", "diff_hunk": "@@ -10,9 +10,9 @@ env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n \n jobs:\n- check-for-setup:\n+ check-for-source:\n runs-on: ubuntu-latest\n- name: Check if setup was changed\n+ name: Checks if source code or tests were changed", "from_author": false}], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}, {"body": "So the purpose of `check-for-source` in `build_and_run_tests.yml` and `test.yml` are different?\r\n\r\n- on PR request: check source file, and run the tests only if any changed found.\r\n- on merge (to main): check `setup.py` to build the image if necessary. Then always run the tests?", "from_author": false}, {"body": "I am probably wrong above, as I see you have `needs: build-docker-containers`. \r\nBut I am confused a bit: currently (without this PR), what happens (on merge) if `setup.py` is not changed, but some `.py` files change?\r\n\r\nI will review in more detail tomorrow.\r\n", "from_author": false}, {"body": "@ydshieh if some `.py` was changed but not the setup then the tests are ran but Docker images won't be rebuilt", "from_author": true}, {"body": "Thanks, @muellerzr . So in this PR, what happens (on merge) if no `.py` file changes at all (`setup.py` not change + no other `.py` change)? If I understand, the tests still run. Is this the expected behavior?", "from_author": false}, {"body": "My only remaining question is as above: on merge, should we skip the tests if no `.py` file changes at all. Otherwise, all LGTM.", "from_author": false}, {"body": "@ydshieh (sorry this got buried in my priorities):\r\n\r\nRe: on merge, yes it is because we never know if some other dependency has a regression issue or what. So we're still made aware of when this happens post merge and can immediately know it's unrelated to something directly in that PR without it silently not failing until the nightlies", "from_author": true}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/611", "pr_id": 1020543878}, {"diff": "diff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex 3f0f58802..1664fd7a2 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -55,7 +55,7 @@ accelerate launch {my_script.py}\n >Copy to CPU with non_blocking=False (default)
\r\n\r\nIn this case `.to()` adds a `cudaStreamSynchronize` op which makes the CPU use the correct value of the tensor when printing\r\n![image](https://user-images.githubusercontent.com/29777165/194038267-91033b35-fc18-4a0d-9af1-cfb1e382721e.png)\r\n\r\nCopy to CPU with non_blocking=True
\r\n\r\n\r\nIn this case the CPU submits the kernels for `.to()` to the GPU then moves on to perform the print operation which uses an incorrect value for the tensor `tensor(0.)` **(The dangerous part)** \r\n![image](https://user-images.githubusercontent.com/29777165/194038545-88833ffc-5141-4fd4-a503-c140202f0bed.png)\r\n\r\nCopy to another GPU with non_blocking=True
\r\n\r\nIt seems that the `non_blocking` here doesn\u2019t do much (we get basically the same thing using `non_blocking=True` ). In both cases we have GPU 1 waiting for GPU 0 to finish working on the tensor, and THEN copy it to GPU 1. And finally the CPU prints the tensor that\u2019s now located on GPU 1\r\nIn this case `.to()` creates a `cudaStreamWaitEvent` event (figure 2) which makes GPU 1 waits for GPU 0. I made [an issue](https://discuss.pytorch.org/t/to-cuda-1-non-blocking-true-creates-cudastreamwaitevent/162296) on Pytorch\u2019s forums to investigate why is this the case\r\n\r\n![image](https://user-images.githubusercontent.com/29777165/194038566-56a7c178-d659-4c3f-a759-d94da8c0f152.png)\r\n\r\n![image](https://user-images.githubusercontent.com/29777165/194038587-5a089b5c-af42-4bab-86b1-516e2ebe3300.png)\r\n\r\nStill todo is update the script docs, but code wise it's good \r\n\r\nDone!", "from_author": true}, {"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/317", "pr_id": 913389268}, {"diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 514b3a4f4..a79012d5c 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -29,7 +29,7 @@\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n-from .tracking import CometMLTracker, GeneralTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n+from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n DeepSpeedPlugin,\n LoggerType,\n@@ -39,9 +39,6 @@\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n- is_comet_ml_available,\n- is_tensorboard_available,\n- is_wandb_available,\n pad_across_processes,\n save,\n wait_for_everyone,\n@@ -132,32 +129,13 @@ def __init__(\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n- logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n+ logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- loggers = []\n- if log_with is not None:\n- if not isinstance(log_with, (list, tuple)):\n- log_with = [log_with]\n- logger.debug(f\"{log_with}\")\n- if \"all\" in log_with or LoggerType.ALL in log_with:\n- loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n- else:\n- for log_type in log_with:\n- if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n- raise ValueError(\n- f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n- )\n- if issubclass(type(log_type), GeneralTracker):\n- loggers.append(log_type)\n- else:\n- log_type = LoggerType(log_type)\n- if log_type not in loggers:\n- loggers.append(log_type)\n- self.log_with = loggers\n self.logging_dir = logging_dir\n+ self.log_with = filter_trackers(log_with, self.logging_dir)\n \n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n@@ -644,12 +622,13 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n if issubclass(type(tracker), GeneralTracker):\n # Custom trackers are already initialized\n self.trackers.append(tracker)\n- elif str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n- self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n- elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n- self.trackers.append(WandBTracker(project_name))\n- elif str(tracker).lower() == \"comet_ml\" and is_comet_ml_available():\n- self.trackers.append(CometMLTracker(project_name))\n+ else:\n+ tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]\n+ if getattr(tracker_init, \"requires_logging_directory\"):\n+ # We can skip this check since it was done in `__init__`\n+ self.trackers.append(tracker_init(project_name, self.logging_dir))\n+ else:\n+ self.trackers.append(tracker_init(project_name))\n if config is not None:\n for tracker in self.trackers:\n tracker.store_init_configuration(config)\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 8d496a3d1..de3a19310 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -17,8 +17,8 @@\n \n import logging\n import os\n-from abc import ABCMeta, abstractmethod\n-from typing import Optional, Union\n+from abc import ABCMeta, abstractmethod, abstractproperty\n+from typing import List, Optional, Union\n \n from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n \n@@ -54,6 +54,13 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`.\n+ \"\"\"\n+ pass\n+\n @abstractmethod\n def store_init_configuration(self, values: dict):\n \"\"\"\n@@ -100,7 +107,9 @@ class TensorBoardTracker(GeneralTracker):\n Location for TensorBoard logs to be stored.\n \"\"\"\n \n- def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \"\"):\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]):\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n self.writer = tensorboard.SummaryWriter(self.logging_dir)\n@@ -157,6 +166,8 @@ class WandBTracker(GeneralTracker):\n The name of the experiment run.\n \"\"\"\n \n+ requires_logging_directory = False\n+\n def __init__(self, run_name: str):\n self.run_name = run_name\n self.run = wandb.init(self.run_name)\n@@ -209,6 +220,8 @@ class CometMLTracker(GeneralTracker):\n The name of the experiment run.\n \"\"\"\n \n+ requires_logging_directory = False\n+\n def __init__(self, run_name: str):\n self.run_name = run_name\n self.writer = Experiment(project_name=run_name)\n@@ -250,3 +263,59 @@ def finish(self):\n \"\"\"\n self.writer.end()\n logger.info(\"CometML run closed\")\n+\n+\n+LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+\n+\n+def filter_trackers(\n+ log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike] = None\n+):\n+ \"\"\"\n+ Takes in a list of potential tracker types and checks that:\n+ - The tracker wanted is available in that environment\n+ - Filters out repeats of tracker types\n+ - If `all` is in `log_with`, will return all trackers in the environment\n+ - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`\n+\n+ Args:\n+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):\n+ A list of loggers to be setup for experiment tracking. Should be one or several of:\n+\n+ - `\"all\"`\n+ - `\"tensorboard\"`\n+ - `\"wandb\"`\n+ - `\"comet_ml\"`\n+ If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\n+ accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n+ logging_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing logs of locally-compatible loggers.\n+ \"\"\"\n+ loggers = []\n+ if log_with is not None:\n+ if not isinstance(log_with, (list, tuple)):\n+ log_with = [log_with]\n+ logger.debug(f\"{log_with}\")\n+ if \"all\" in log_with or LoggerType.ALL in log_with:\n+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n+ else:\n+ for log_type in log_with:\n+ if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n+ raise ValueError(f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\")\n+ if issubclass(type(log_type), GeneralTracker):\n+ loggers.append(log_type)\n+ else:\n+ log_type = LoggerType(log_type)\n+ if log_type not in loggers:\n+ if log_type in get_available_trackers():\n+ tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]\n+ if getattr(tracker_init, \"requires_logging_directory\"):\n+ if logging_dir is None:\n+ raise ValueError(\n+ f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ )\n+ loggers.append(log_type)\n+ else:\n+ logger.info(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\n+\n+ return loggers\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex b3974afe2..c5b940a46 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -103,6 +103,12 @@ def test_log(self):\n self.assertEqual(iteration, values[\"iteration\"])\n self.assertEqual(my_text, values[\"my_text\"])\n \n+ def test_logging_dir(self):\n+ with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\n+ _ = Accelerator(log_with=\"tensorboard\")\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n+\n \n @mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n class WandBTrackingTest(TempDirTestCase, MockingTestCase):\n@@ -237,6 +243,8 @@ class MyCustomTracker(GeneralTracker):\n \"some_string\",\n ]\n \n+ requires_logging_directory = False\n+\n def __init__(self, dir: str):\n self.f = open(f\"{dir}/log.csv\", \"w+\")\n self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\n", "code_comments": [{"body": "Why not use a class attribute directly? We could set a default then (I don't think this actually defaults to False).", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": false}, {"body": "Very nice!", "diff_hunk": "@@ -644,12 +653,13 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n if issubclass(type(tracker), GeneralTracker):\n # Custom trackers are already initialized\n self.trackers.append(tracker)\n- elif str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n- self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n- elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n- self.trackers.append(WandBTracker(project_name))\n- elif str(tracker).lower() == \"comet_ml\" and is_comet_ml_available():\n- self.trackers.append(CometMLTracker(project_name))\n+ else:", "from_author": false}, {"body": "This won't right. (This is also evidence of me going back and forth with it). Having it like this IMO forces implementations to think about how they want the behavior to go, and have it explicit in the source code for users interested in the code to see. \r\n\r\nFor example, forcing it to be an abstract property/function, W&B could eventually make it a conditional where if it's in `offline_mode` (by checking the env var), then it would be True, otherwise False.\r\n\r\nBut, I'm not against just making it a normal class attr directly", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": true}, {"body": "In that case, you should remove the default behavior mention in the docstring, as there is no default :-)", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": false}, {"body": "Much cleaner!", "diff_hunk": "@@ -132,32 +129,13 @@ def __init__(\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n- logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n+ logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- loggers = []\n- if log_with is not None:\n- if not isinstance(log_with, (list, tuple)):\n- log_with = [log_with]\n- logger.debug(f\"{log_with}\")\n- if \"all\" in log_with or LoggerType.ALL in log_with:\n- loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n- else:\n- for log_type in log_with:\n- if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n- raise ValueError(\n- f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n- )\n- if issubclass(type(log_type), GeneralTracker):\n- loggers.append(log_type)\n- else:\n- log_type = LoggerType(log_type)\n- if log_type not in loggers:\n- loggers.append(log_type)\n- self.log_with = loggers\n self.logging_dir = logging_dir\n+ self.log_with = filter_trackers(log_with, self.logging_dir)", "from_author": false}], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}, {"body": "@sgugger pinging for a rereview just to make sure the refactor seems sound to you \ud83d\ude04 ", "from_author": true}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/316", "pr_id": 910332479}, {"diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex e18686e12..514b3a4f4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -440,7 +440,10 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n )\n \n- result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+ result = [\n+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n+ for obj in args\n+ ]\n \n model = None\n optimizer = None\n", "code_comments": [], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/315", "pr_id": 908519146}, {"diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex c213a9167..5020c6cc4 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -14,4 +14,17 @@ jobs:\n - name: Install Python dependencies\n run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n - name: Run Tests\n- run: make test\n\\ No newline at end of file\n+ run: make test\n+ \n+ test_examples:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python 3.6\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: 3.6\n+ - name: Install Python dependencies\n+ run: pip install setuptools==59.5.0; pip install -e .[test] tensorboard\n+ - name: Run Tests\n+ run: make test_examples\n\\ No newline at end of file\ndiff --git a/Makefile b/Makefile\nindex e5b557f10..db7893565 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,4 +25,7 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/\n\\ No newline at end of file\n+\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/ --ignore=./tests/test_examples.py\n+\n+test_examples:\n+\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/test_examples.py\ndiff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 35205107d..e595a8db7 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -115,14 +115,17 @@ def training_function(config, args):\n \n # New Code #\n # Parse out whether we are saving every epoch or after a certain number of batches\n- if args.checkpointing_steps == \"epoch\":\n- checkpointing_steps = args.checkpointing_steps\n- elif args.checkpointing_steps.isdigit():\n- checkpointing_steps = int(args.checkpointing_steps)\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n else:\n- raise ValueError(\n- f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n- )\n+ checkpointing_steps = None\n \n set_seed(seed)\n \n@@ -162,21 +165,29 @@ def training_function(config, args):\n \n # New Code #\n # We need to keep track of how many total steps we have iterated over\n- if isinstance(checkpointing_steps, int):\n- overall_step = 0\n+ overall_step = 0\n \n # We need to load the checkpoint back in before training here with `load_state`\n # The total number of epochs is adjusted based on where the state is being loaded from,\n # as we assume continuation of the same training script\n if args.resume_from_checkpoint:\n- accelerator.print(f\"Resuming from checkpoint: {args.resume_from_checkpoint}\")\n- accelerator.load_state(args.resume_from_checkpoint)\n-\n- if \"epoch\" in args.resume_from_checkpoint:\n- num_epochs -= int(args.resume_from_checkpoint.replace(\"epoch_\", \"\"))\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ path = os.path.basename(args.resume_from_checkpoint)\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n resume_step = None\n else:\n- resume_step = int(args.resume_from_checkpoint.replace(\"step_\", \"\"))\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n num_epochs -= resume_step // len(train_dataloader)\n # If resuming by step, we also need to know exactly how far into the DataLoader we went\n resume_step = (num_epochs * len(train_dataloader)) - resume_step\n@@ -200,6 +211,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ # New Code #\n+ overall_step += 1\n \n # New Code #\n # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state`\n@@ -237,7 +250,7 @@ def training_function(config, args):\n # Will contain files: \"pytorch_model.bin\", \"optimizer.bin\", \"scheduler.bin\", and \"random_states.pkl\"\n # If mixed precision was used, will also save a \"scalar.bin\" file\n if checkpointing_steps == \"epoch\":\n- output_dir = f\"epoch_{num_epochs}\"\n+ output_dir = f\"epoch_{epoch}\"\n if args.output_dir is not None:\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n@@ -258,7 +271,7 @@ def main():\n parser.add_argument(\n \"--checkpointing_steps\",\n type=str,\n- default=\"epoch\",\n+ default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n parser.add_argument(\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex ca11fa04b..170211e5e 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -13,6 +13,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import argparse\n+import logging\n+import os\n \n import torch\n from torch.utils.data import DataLoader\n@@ -28,6 +30,9 @@\n )\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n ########################################################################\n # This is a fully working simple example to use Accelerate,\n # specifically showcasing the experiment tracking capability,\n@@ -110,7 +115,9 @@ def training_function(config, args):\n # Note: If using a custom `Tracker` class, should be passed in here such as:\n # >>> log_with = [\"all\", MyCustomTrackerClassInstance()]\n if args.with_tracking:\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n@@ -158,7 +165,10 @@ def training_function(config, args):\n # New Code #\n # We need to initalize the trackers we use. Overall configurations can also be stored\n if args.with_tracking:\n- accelerator.init_trackers(\"accelerate_glue_with_tracking\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Now we train the model\n for epoch in range(num_epochs):\n@@ -173,7 +183,8 @@ def training_function(config, args):\n outputs = model(**batch)\n loss = outputs.loss\n # New Code #\n- total_loss += loss.detach().float()\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n loss = loss / gradient_accumulation_steps\n accelerator.backward(loss)\n if step % gradient_accumulation_steps == 0:\n@@ -202,14 +213,21 @@ def training_function(config, args):\n # New Code #\n # To actually log, we call `Accelerator.log`\n # The values passed can be of `str`, `int`, or `float`\n- accelerator.log(\n- {\"accuracy\": eval_metric[\"accuracy\"], \"f1\": eval_metric[\"f1\"], \"train_loss\": total_loss, \"epoch\": epoch}\n- )\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"train_loss\": total_loss,\n+ \"epoch\": epoch,\n+ }\n+ )\n \n # New Code #\n # When a run is finished, you should call `accelerator.end_training()`\n # to close all of the open trackers\n- accelerator.end_training()\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n@@ -229,6 +247,12 @@ def main():\n action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex e62568a39..01033498c 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -74,16 +74,12 @@ def __getitem__(self, idx):\n def training_function(config, args):\n # Initialize accelerator\n if args.with_tracking:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n- if hasattr(args.checkpointing_steps, \"isdigit\"):\n- checkpointing_steps = args.checkpointing_steps\n- if args.checkpointing_steps.isdigit():\n- checkpointing_steps = int(args.checkpointing_steps)\n- else:\n- checkpointing_steps = None\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n@@ -93,9 +89,25 @@ def training_function(config, args):\n if not isinstance(image_size, (list, tuple)):\n image_size = (image_size, image_size)\n \n+ # Parse out whether we are saving every epoch or after a certain number of batches\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- accelerator.init_trackers(\"cv_example\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Grab all the image filenames\n file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\n@@ -163,24 +175,27 @@ def training_function(config, args):\n )\n \n # Potentially load in the weights and states from a previous save\n- state_restored = True\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n- resume_step = None\n+ path = os.path.basename(args.resume_from_checkpoint)\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n- if \"epoch\" in path.name:\n- num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n- else:\n- resume_step = int(path.name.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n- state_restored = False\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n \n overall_step = 0\n # Now we train the model\n@@ -190,8 +205,9 @@ def training_function(config, args):\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n- continue\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n inputs = (batch[\"image\"] - mean) / std\n@@ -206,32 +222,40 @@ def training_function(config, args):\n optimizer.zero_grad()\n overall_step += 1\n if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n if overall_step % checkpointing_steps == 0:\n- accelerator.save_state(f\"step_{overall_step}\")\n- if state_restored:\n- model.eval()\n- accurate = 0\n- num_elems = 0\n- for step, batch in enumerate(eval_dataloader):\n- # We could avoid this line since we set the accelerator with `device_placement=True`.\n- batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n- inputs = (batch[\"image\"] - mean) / std\n- with torch.no_grad():\n- outputs = model(inputs)\n- predictions = outputs.argmax(dim=-1)\n- accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n- num_elems += accurate_preds.shape[0]\n- accurate += accurate_preds.long().sum()\n-\n- eval_metric = accurate.item() / num_elems\n- # Use accelerator.print to print only on the main process.\n- accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n- if args.with_tracking:\n- accelerator.log(\n- {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n- )\n- if args.checkpointing_steps == \"epoch\":\n- accelerator.save_state(f\"epoch_{epoch}\")\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+\n+ eval_metric = accurate.item() / num_elems\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n+ )\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n@@ -254,6 +278,12 @@ def main():\n default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n parser.add_argument(\n \"--resume_from_checkpoint\",\n type=str,\n@@ -262,9 +292,15 @@ def main():\n )\n parser.add_argument(\n \"--with_tracking\",\n- required=False,\n+ action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\n training_function(config, args)\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 57125aa29..a0e8d568c 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -55,14 +55,21 @@\n def training_function(config, args):\n # Initialize accelerator\n if args.with_tracking:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n if hasattr(args.checkpointing_steps, \"isdigit\"):\n- checkpointing_steps = args.checkpointing_steps\n- if args.checkpointing_steps.isdigit():\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n else:\n checkpointing_steps = None\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n@@ -74,7 +81,10 @@ def training_function(config, args):\n \n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- accelerator.init_trackers(\"nlp_example\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n@@ -143,27 +153,31 @@ def collate_fn(examples):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n+ overall_step = 0\n+\n # Potentially load in the weights and states from a previous save\n- state_restored = True\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n- resume_step = None\n+ path = os.path.basename(args.resume_from_checkpoint)\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n- if \"epoch\" in path.name:\n- num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n- else:\n- resume_step = int(path.name.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n- state_restored = False\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n \n- overall_step = 0\n # Now we train the model\n for epoch in range(num_epochs):\n model.train()\n@@ -171,8 +185,9 @@ def collate_fn(examples):\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n- continue\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n outputs = model(**batch)\n@@ -190,42 +205,51 @@ def collate_fn(examples):\n overall_step += 1\n \n if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n if overall_step % checkpointing_steps == 0:\n- accelerator.save_state(f\"step_{overall_step}\")\n- if state_restored:\n- model.eval()\n- for step, batch in enumerate(eval_dataloader):\n- # We could avoid this line since we set the accelerator with `device_placement=True`.\n- batch.to(accelerator.device)\n- with torch.no_grad():\n- outputs = model(**batch)\n- predictions = outputs.logits.argmax(dim=-1)\n- metric.add_batch(\n- predictions=accelerator.gather(predictions),\n- references=accelerator.gather(batch[\"labels\"]),\n- )\n-\n- eval_metric = metric.compute()\n- # Use accelerator.print to print only on the main process.\n- accelerator.print(f\"epoch {epoch}:\", eval_metric)\n- if args.with_tracking:\n- accelerator.log(\n- {\n- \"accuracy\": eval_metric[\"accuracy\"],\n- \"f1\": eval_metric[\"f1\"],\n- \"total_loss\": total_loss,\n- \"epoch\": epoch,\n- },\n- step=overall_step,\n- )\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"train_loss\": total_loss,\n+ \"epoch\": epoch,\n+ }\n+ )\n+\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n \n- if args.checkpointing_steps == \"epoch\":\n- accelerator.save_state(f\"epoch_{epoch}\")\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n@@ -250,9 +274,21 @@ def main():\n )\n parser.add_argument(\n \"--with_tracking\",\n- required=False,\n+ action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex b14b067be..a80b42d13 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -73,7 +73,7 @@ def __getitem__(self, idx):\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mix_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mix_precision)\n \n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n@@ -185,7 +185,6 @@ def training_function(config, args):\n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n parser.add_argument(\"--data_dir\", required=True, help=\"The data folder on disk.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n@@ -195,6 +194,12 @@ def main():\n \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n \"and an Nvidia Ampere GPU.\",\n )\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 915d35807..87cfd7698 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -49,21 +49,19 @@\n EVAL_BATCH_SIZE = 32\n \n \n-def training_function(config, args):\n- # Initialize accelerator\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n- lr = config[\"lr\"]\n- num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n- seed = int(config[\"seed\"])\n- batch_size = int(config[\"batch_size\"])\n-\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n- metric = load_metric(\"glue\", \"mrpc\")\n-\n- set_seed(seed)\n \n def tokenize_function(examples):\n # max_length=None => use the model max length (it's actually the default)\n@@ -81,12 +79,6 @@ def tokenize_function(examples):\n # transformers library\n tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n \n- # If the batch size is too big we use gradient accumulation\n- gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n- gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n- batch_size = MAX_GPU_BATCH_SIZE\n-\n def collate_fn(examples):\n # On TPU it's best to pad everything to the same length or training will be very slow.\n if accelerator.distributed_type == DistributedType.TPU:\n@@ -101,6 +93,29 @@ def collate_fn(examples):\n tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n )\n \n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n \n@@ -148,9 +163,10 @@ def collate_fn(examples):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n metric.add_batch(\n- predictions=accelerator.gather(predictions),\n- references=accelerator.gather(batch[\"labels\"]),\n+ predictions=predictions,\n+ references=references,\n )\n \n eval_metric = metric.compute()\ndiff --git a/setup.py b/setup.py\nindex 0ce1e5af4..7e26759d1 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,12 @@\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n- \"pytest-xdist\"\n+ \"pytest-xdist\",\n+ \"pytest-subtests\",\n+ \"datasets\",\n+ \"transformers\",\n+ \"scipy\",\n+ \"sklearn\"\n ]\n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\ndiff --git a/src/accelerate/test_utils/examples.py b/src/accelerate/test_utils/examples.py\nnew file mode 100644\nindex 000000000..4e4092c0e\n--- /dev/null\n+++ b/src/accelerate/test_utils/examples.py\n@@ -0,0 +1,139 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each\n+`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the\n+others are used to either get the code that matters, or to preprocess them (such as stripping comments)\n+\"\"\"\n+\n+import os\n+from typing import List\n+\n+\n+def get_function_contents_by_name(lines: List[str], name: str):\n+ \"\"\"\n+ Extracts a function from `lines` of segmented source code with the name `name`.\n+\n+ Args:\n+ lines (`List[str]`):\n+ Source code of a script seperated by line.\n+ name (`str`):\n+ The name of the function to extract. Should be either `training_function` or `main`\n+ \"\"\"\n+ if name != \"training_function\" and name != \"main\":\n+ raise ValueError(f\"Incorrect function name passed: {name}, choose either 'main' or 'training_function'\")\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and f\"def {name}\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if name == \"training_function\" and \"def main\" in line:\n+ return good_lines\n+ if name == \"main\" and \"if __name__\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def clean_lines(lines: List[str]):\n+ \"\"\"\n+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\\n')\n+\n+ Args:\n+ lines (`List[str]`):\n+ Source code of a script seperated by line.\n+ \"\"\"\n+ return [line for line in lines if not line.lstrip().startswith(\"#\") and line != \"\\n\"]\n+\n+\n+def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):\n+ \"\"\"\n+ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be\n+ used when testing to see if `complete_*_.py` examples have all of the implementations from each of the\n+ `examples/by_feature/*` scripts.\n+\n+ It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code\n+ is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the\n+ `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.\n+\n+ Args:\n+ base_filename (`str` or `os.PathLike`):\n+ The filepath of a single \"complete\" example script to test, such as `examples/complete_cv_example.py`\n+ feature_filename (`str` or `os.PathLike`):\n+ The filepath of a single feature example script. The contents of this script are checked to see if they\n+ exist in `base_filename`\n+ parser_only (`bool`):\n+ Whether to compare only the `main()` sections in both files, or to compare the contents of\n+ `training_loop()`\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary filepath that should be included in the check. This function extracts the base\n+ functionalities off of \"examples/nlp_example.py\", so if `base_filename` is a script other than\n+ `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`\n+ \"\"\"\n+ with open(base_filename, \"r\") as f:\n+ base_file_contents = f.readlines()\n+ with open(os.path.abspath(os.path.join(\"examples\", \"nlp_example.py\")), \"r\") as f:\n+ full_file_contents = f.readlines()\n+ with open(feature_filename, \"r\") as f:\n+ feature_file_contents = f.readlines()\n+ if secondary_filename is not None:\n+ with open(secondary_filename, \"r\") as f:\n+ secondary_file_contents = f.readlines()\n+\n+ # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content\n+ if parser_only:\n+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, \"main\"))\n+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, \"main\"))\n+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, \"main\"))\n+ if secondary_filename is not None:\n+ secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, \"main\"))\n+ else:\n+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, \"training_function\"))\n+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, \"training_function\"))\n+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, \"training_function\"))\n+ if secondary_filename is not None:\n+ secondary_file_func = clean_lines(\n+ get_function_contents_by_name(secondary_file_contents, \"training_function\")\n+ )\n+\n+ _dl_line = \"train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\\n\"\n+\n+ # Specific code in our script that differs from the full version, aka what is new\n+ new_feature_code = []\n+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement\n+ for i, line in enumerate(feature_file_func):\n+ if i not in passed_idxs:\n+ if (line not in full_file_func) and (line.lstrip() != _dl_line):\n+ new_feature_code.append(line)\n+ passed_idxs.append(i)\n+\n+ # Extract out just the new parts from the full_file_training_func\n+ new_full_example_parts = []\n+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement\n+ for i, line in enumerate(base_file_func):\n+ if i not in passed_idxs:\n+ if (line not in full_file_func) and (line.lstrip() != _dl_line):\n+ new_full_example_parts.append(line)\n+ passed_idxs.append(i)\n+\n+ # Finally, get the overall diff\n+ diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]\n+ if secondary_filename is not None:\n+ diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]\n+ diff_from_example = [line for line in diff_from_example if line not in diff_from_two]\n+\n+ return diff_from_example\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nnew file mode 100644\nindex 000000000..d60d7c08f\n--- /dev/null\n+++ b/tests/test_examples.py\n@@ -0,0 +1,190 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line. If one fails,\n+ then a complete example does not contain all of the features in the features\n+ scripts, and should be updated.\n+\n+ Each example script should be a single test (such as `test_nlp_example`),\n+ and should run `one_complete_example` twice: once with `parser_only=True`,\n+ and the other with `parser_only=False`. This is so that when the test\n+ failures are returned to the user, they understand if the discrepancy lies in\n+ the `main` function, or the `training_loop` function. Otherwise it will be\n+ unclear.\n+\n+ Also, if there are any expected differences between the base script used and\n+ `complete_nlp_example.py` (the canonical base script), these should be included in\n+ `special_strings`. These would be differences in how something is logged, print statements,\n+ etc (such as calls to `Accelerate.log()`)\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+\n+ def test_cv_examples(self):\n+ cv_path = os.path.abspath(os.path.join(\"examples\", \"cv_example.py\"))\n+ special_strings = [\n+ \" \" * 16 + \"{\\n\\n\",\n+ \" \" * 18 + '\"accuracy\": eval_metric[\"accuracy\"],\\n\\n',\n+ \" \" * 18 + '\"f1\": eval_metric[\"f1\"],\\n\\n',\n+ \" \" * 18 + '\"train_loss\": total_loss,\\n\\n',\n+ \" \" * 18 + '\"epoch\": epoch,\\n\\n',\n+ \" \" * 16 + \"}\\n\",\n+ \" \" * 8,\n+ ]\n+ self.one_complete_example(\"complete_cv_example.py\", True, cv_path, special_strings)\n+ self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n+\n+\n+class FeatureExamplesTests(unittest.TestCase):\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_epoch(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"step_2\")))\n+\n+ @mock.patch(\"tracking.get_dataloaders\", mocked_dataloaders)\n+ def test_tracking(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ tracking.py\n+ --with_tracking\n+ --logging_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ tracking.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\ndiff --git a/tests/test_samples/MRPC/dev.csv b/tests/test_samples/MRPC/dev.csv\nnew file mode 100644\nindex 000000000..96beccda9\n--- /dev/null\n+++ b/tests/test_samples/MRPC/dev.csv\n@@ -0,0 +1,7 @@\n+label,sentence1,sentence2\n+equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,\"\"\" The foodservice pie business does not fit our long-term growth strategy .\"\n+not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,\"His wife said he was \"\" 100 percent behind George Bush \"\" and looked forward to using his years of training in the war .\"\n+not_equivalent,\"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .\",\"The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .\"\n+equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .\n+not_equivalent,No dates have been set for the civil or the criminal trial .,\"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .\"\n+equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .\ndiff --git a/tests/test_samples/MRPC/train.csv b/tests/test_samples/MRPC/train.csv\nnew file mode 100644\nindex 000000000..96beccda9\n--- /dev/null\n+++ b/tests/test_samples/MRPC/train.csv\n@@ -0,0 +1,7 @@\n+label,sentence1,sentence2\n+equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,\"\"\" The foodservice pie business does not fit our long-term growth strategy .\"\n+not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,\"His wife said he was \"\" 100 percent behind George Bush \"\" and looked forward to using his years of training in the war .\"\n+not_equivalent,\"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .\",\"The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .\"\n+equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .\n+not_equivalent,No dates have been set for the civil or the criminal trial .,\"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .\"\n+equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .\n", "code_comments": [{"body": "Why not always do this?", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": false}, {"body": "It's only needed if we save via checkpointing steps rather than epoch, so didn't want people to assume we *always* need to do that. \r\n\r\n(Which means a comment is needed!)", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": true}, {"body": "Yeah I get the variable won't be used if we don't checkpoint with steps, but it doesn't hurt to always have it (and would save one line of code).", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": false}, {"body": "```suggestion\r\n# Copyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.", "from_author": false}, {"body": "A quick intro here would be useful.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+", "from_author": false}, {"body": "```suggestion\r\ndef get_train_func(lines: list):\r\n```\r\nIf we use type annotations, better use `List[str]` here.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):", "from_author": false}, {"body": "Sounds like it could be refactored with the previous one and an extra arg?", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):\n+ \"\"\"\n+ Finds the main training function from inside segmented source code.\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def training_function\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"def main\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def get_main_func(lines: list):", "from_author": false}, {"body": "Those do not match the signature. Also this could be expanded a little bit, I'm not sure I fully understand.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):\n+ \"\"\"\n+ Finds the main training function from inside segmented source code.\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def training_function\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"def main\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def get_main_func(lines: list):\n+ \"\"\"\n+ Finds the main function from inside segmented source code\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def main\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"if __name__\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def clean_lines(lines: list):\n+ \"\"\"\n+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\\n')\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ return [line for line in lines if not line.lstrip().startswith(\"#\") and line != \"\\n\"]\n+\n+\n+def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):\n+ \"\"\"\n+ Checks whether the content aligned in `test_filename` is included inside of `full_filename`.", "from_author": false}, {"body": "```suggestion\r\n# Copyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.", "from_author": false}, {"body": "It seems we always use the method with both flags, should we just remove that arg and put the two tests inside?", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line.\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+", "from_author": false}, {"body": "I'm making a note for this inside of the documentation, but the reasoning for separation is it lets the test failure be more readable as to what section failed, rather than one complete error.\r\nNotice the `tested_section` part\r\nE.g.:\r\n\r\n```bash\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='training_function()') _________\r\n```\r\nvs:\r\n```bash\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='main()') _________\r\n```\r\n\r\n(This is a `pytest-subtest` hack)", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line.\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+", "from_author": true}, {"body": "Should go inside triple quotes.", "diff_hunk": "@@ -0,0 +1,143 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each\n+# `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing,\n+# while the others are used to either get the code that matters, or to preprocess them (such as stripping comments)", "from_author": false}], "context": [{"body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false}, {"body": "I've come up with a solution to make sure that also the main `complete_` examples will raise an error if they differ from the bits inside of `by_feature`. Now eventually it would be good to have an exclusion list of files for when we *wouldn't* expect to see some feature added (such as say cross validation). But we'll cross that bridge when we get there. In the meantime here is the report generated back to the user from pytest:\r\n\r\n(Note: `pytest-subtests` was added as a dep to make sure we could use `TestCase.subTest` and make this code clean and efficient)\r\n```bash\r\n======================================================================= short test summary info =======================================================================\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_body - AssertionError: 14 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_body - AssertionError: 27 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_parser - AssertionError: 1 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_parser - AssertionError: 4 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_nlp_example_parser - AssertionError: 1 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_nlp_example_parser - AssertionError: 1 != 0\r\n=============================================================== 6 failed, 4 passed, 1 warning in 1.64s ================================================================\r\n```\r\n\r\n{{ Removed full trace, see the following message for an example }} ", "from_author": true}, {"body": "Slightly tweaked how they look, I've now included it posting the source code diffs. I believe this is a must as it will tell the user exactly what parts were missing from the full example:\r\n\r\n```bash\r\n============================================================================== FAILURES ===============================================================================\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='training_function()') _________\r\n\r\nself =
Run your *raw* PyTorch training script on any kind of device\n \n \n+
Original training code (CPU or mono-GPU only) | \n- With Accelerate (CPU/GPU/multi-GPUs/TPUs/fp16) | \n-
---|---|
\n-\n-```python\n+```diff\n import torch\n import torch.nn.functional as F\n from datasets import load_dataset\n \n-\n-\n-device = 'cpu'\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+- device = 'cpu'\n++ device = accelerator.device\n \n model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(\n- model.parameters()\n-)\n+optim = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(\n- dataset\n-)\n-\n-\n-\n+data = torch.utils.data.Dataloader(dataset)\n \n++ model, optim, data = accelerator.prepare(model, optim, data)\n \n model.train()\n for epoch in range(10):\n@@ -92,166 +79,55 @@ for epoch in range(10):\n optimizer.zero_grad()\n \n output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n-\n- loss.backward()\n-\n- optimizer.step()\n-```\n-\n- | \n-\n-\n-```python\n- import torch\n- import torch.nn.functional as F\n- from datasets import load_dataset\n-\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n-+ device = accelerator.device\n-\n- model = torch.nn.Transformer().to(device)\n- optim = torch.optim.Adam(\n- model.parameters()\n- )\n-\n- dataset = load_dataset('my_dataset')\n- data = torch.utils.data.Dataloader(\n- dataset\n- )\n-\n-+ model, optim, data = accelerator.prepare(\n-+ model, optim, data\n-+ )\n-\n- model.train()\n- for epoch in range(10):\n- for source, targets in data:\n- source = source.to(device)\n- targets = targets.to(device)\n-\n- optimizer.zero_grad()\n-\n- output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n+- loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n- | \n-
Original training code (CPU or mono-GPU only) | \n- With Accelerate (CPU/GPU/multi-GPUs/TPUs/fp16) | \n-
---|---|
\n-\n-```python\n+```diff\n import torch\n import torch.nn.functional as F\n from datasets import load_dataset\n \n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+- device = 'cpu'\n \n-\n-device = 'cpu'\n-\n-model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(\n- model.parameters()\n-)\n++ model = torch.nn.Transformer()\n+- model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(\n- dataset\n-)\n-\n-\n-\n+data = torch.utils.data.Dataloader(dataset)\n \n++ model, optim, data = accelerator.prepare(model, optim, data)\n \n model.train()\n for epoch in range(10):\n for source, targets in data:\n- source = source.to(device)\n- targets = targets.to(device)\n+- source = source.to(device)\n+- targets = targets.to(device)\n \n optimizer.zero_grad()\n \n output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n-\n- loss.backward()\n-\n- optimizer.step()\n-```\n-\n- | \n-\n-\n-```python\n- import torch\n- import torch.nn.functional as F\n- from datasets import load_dataset\n-\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n--\n-\n-+ model = torch.nn.Transformer()\n- optim = torch.optim.Adam(\n- model.parameters()\n- )\n-\n- dataset = load_dataset('my_dataset')\n- data = torch.utils.data.Dataloader(\n- dataset\n- )\n-\n-+ model, optim, data = accelerator.prepare(\n-+ model, optim, data\n-+ )\n-\n- model.train()\n- for epoch in range(10):\n- for source, targets in data:\n--\n--\n-\n- optimizer.zero_grad()\n-\n- output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n+- loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n- | \n-
Original training code (CPU or mono-GPU only) | \n+With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16 | \n+
---|---|
\n+\n+```python\n+import torch\n+import torch.nn.functional as F\n+from datasets import load_dataset\n+\n+\n+\n+device = 'cpu'\n+\n+model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n+\n+dataset = load_dataset('my_dataset')\n+data = torch.utils.data.Dataloader(dataset)\n+\n+\n+\n+\n+model.train()\n+for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n+ loss.backward()\n+\n+ optimizer.step()\n+```\n+\n+ | \n+\n+\n+```python\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n+\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n++ device = accelerator.device\n+\n+ model = torch.nn.Transformer().to(device)\n+ optim = torch.optim.Adam(model.parameters())\n+\n+ dataset = load_dataset('my_dataset')\n+ data = torch.utils.data.Dataloader(dataset)\n+\n++ model, optim, data = accelerator.prepare(\n+ model, optim, data)\n+\n+ model.train()\n+ for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n++ accelerate.backward(loss)\n+\n+ optimizer.step()\n+```\n+\n+ | \n+
Run your *raw* PyTorch training script on any kind of device\n \n \n-\ud83e\udd17 Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run on your local machine for debugging or your training environment.\n+\ud83e\udd17 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\n+\n+\ud83e\udd17 Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\n+\n+Here is an example:\n+\n+
Original training code (CPU or mono-GPU only) | \n+With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16 | \n+
---|---|
\n+\n+```python\n+import torch\n+import torch.nn.functional as F\n+from datasets import load_dataset\n+\n+\n+\n+device = 'cpu'\n+\n+model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n+\n+dataset = load_dataset('my_dataset')\n+data = torch.utils.data.Dataloader(dataset)\n+\n+\n+\n+\n+model.train()\n+for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n+ loss.backward()\n+\n+ optimizer.step()\n+```\n+\n+ | \n+\n+\n+```python\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n+\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()", "from_author": false}], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/1", "pr_id": 577481934}]} |