applied-ai-018 commited on
Commit
3b73534
·
verified ·
1 Parent(s): 88e20df

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. bigscience/bigscience/bigscience.py +1 -0
  2. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00294-of-00532.arrow +3 -0
  3. bigscience/evaluation/utilities/convert_results_to_json.py +111 -0
  4. bigscience/inference/README.md +15 -0
  5. bigscience/inference/modeling_gpt2_alibi_prefix_lm.py +1750 -0
  6. bigscience/jz/.gitignore +133 -0
  7. bigscience/jz/README.md +27 -0
  8. bigscience/jz/compute-resources.md +190 -0
  9. bigscience/jz/configs/dec_only_t5/decoder_only_t5-large.json +22 -0
  10. bigscience/jz/configs/dec_only_t5/decoder_only_t5-medium.json +22 -0
  11. bigscience/jz/configs/dec_only_t5/decoder_only_t5-small.json +22 -0
  12. bigscience/jz/envs/README.md +662 -0
  13. bigscience/jz/envs/apex/build.sh +4 -0
  14. bigscience/jz/envs/deepspeed/build.sh +7 -0
  15. bigscience/jz/envs/start-prod +60 -0
  16. bigscience/jz/envs/start-user +59 -0
  17. bigscience/jz/envs/workarounds.md +8 -0
  18. bigscience/jz/frameworks/deepspeed.md +105 -0
  19. bigscience/jz/frameworks/megatron-lm.md +92 -0
  20. bigscience/jz/hpc-specs.md +38 -0
  21. bigscience/jz/scripts/custom_callbacks.py +95 -0
  22. bigscience/jz/scripts/run_clm.py +520 -0
  23. bigscience/jz/scripts/run_clm_prompted.py +534 -0
  24. bigscience/jz/scripts/run_text2text.py +514 -0
  25. bigscience/jz/slurm/README.md +861 -0
  26. bigscience/jz/slurm/hf-ds-gpt2-multi-node.slurm +67 -0
  27. bigscience/jz/slurm/meg-gpt2-multi-node.slurm +86 -0
  28. bigscience/jz/slurm/multi-node-launcher3.slurm +100 -0
  29. bigscience/jz/slurm/openwebtext-jsonl-to-meg-gpt2.slurm +25 -0
  30. bigscience/jz/slurm/openwebtext-jsonl-to-meg-t5.slurm +24 -0
  31. bigscience/jz/slurms_scripts/README.md +16 -0
  32. bigscience/jz/slurms_scripts/multi_node_deconlyt5.slurm +76 -0
  33. bigscience/jz/slurms_scripts/preprocess_deconlyt5.slurm +52 -0
  34. bigscience/jz/tools/diagnostics.md +28 -0
  35. bigscience/jz/tools/google-cloud-sdk.md +57 -0
  36. bigscience/jz/tools/monitoring.md +10 -0
  37. bigscience/jz/tools/tensorboard.md +13 -0
  38. bigscience/tools/README.md +87 -0
  39. bigscience/tools/fixing_checkpoints_for_from_pretrained.sh +21 -0
  40. bigscience/tools/fs-watchdog.py +185 -0
  41. bigscience/tools/fs-watchdog.slurm +23 -0
  42. bigscience/tools/hub-auth.py +23 -0
  43. bigscience/tools/hub-sync.py +295 -0
  44. bigscience/tools/slurm-status.py +181 -0
  45. bigscience/train/tr1-13B-base/README.md +850 -0
  46. bigscience/train/tr1-13B-base/chronicles.md +425 -0
  47. bigscience/train/tr1-13B-base/start-tr1-13B +57 -0
  48. bigscience/train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm +23 -0
  49. bigscience/train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm +23 -0
  50. bigscience/train/tr1-13B-base/tr1-13B-round1.slurm +174 -0
bigscience/bigscience/bigscience.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Main module."""
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00294-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe5725828e3d7908305077200cd3b83eb22986dc47d0263189850b027c1a979d
3
+ size 500890168
bigscience/evaluation/utilities/convert_results_to_json.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import math
3
+ import os
4
+ from argparse import ArgumentParser
5
+ from os import listdir
6
+ from os.path import isfile
7
+
8
+ def get_args():
9
+ parser = ArgumentParser()
10
+ # --experiments tr3d-1B3-oscar-checkpoints,tr3e-1B3-c4-checkpoints,tr3m-1B3-pile-checkpoints
11
+ parser.add_argument('--experiment', type=str, required=True,
12
+ help='Experiment we want to download.')
13
+ parser.add_argument('--result-dir', type=str, required=True,
14
+ help='Result directory containing all results, and to store aggregated json results.')
15
+ parser.add_argument('--batch-size', type=int, default=512,
16
+ help='Experiment training batch size.')
17
+ parser.add_argument('--sequence_length', type=int, default=2048,
18
+ help='Experiment training sequence length.')
19
+ parser.add_argument('--rampup-batch-size', type=lambda s: tuple(int(item) for item in s.split(',')), default=(32, 32, 2_000_000),
20
+ help='Experiment training batch size rampup.')
21
+ return parser.parse_args()
22
+
23
+ def checkpoint_step_to_tokens(checkpoint_step, args) -> int:
24
+ def fn(checkpoint_step) -> int:
25
+ if not hasattr(checkpoint_step_to_tokens, "CACHE"):
26
+ checkpoint_step_to_tokens.CACHE = {}
27
+
28
+ BATCH_SIZE=args.batch_size
29
+ SEQUENCE_LENGTH=args.sequence_length
30
+ # Linear increase in terms of samples.
31
+ RAMPUP_BATCH_SIZE = args.rampup_batch_size
32
+
33
+ # Compute RAMPUP checkpoint_step
34
+ if not hasattr(checkpoint_step_to_tokens, "RAMPUP_OFFSET"):
35
+ initial_batch_size, increment_batch_size, sample_limit_for_rampup = RAMPUP_BATCH_SIZE
36
+ number_of_increments = (BATCH_SIZE - initial_batch_size) // increment_batch_size
37
+ assert (BATCH_SIZE - initial_batch_size) % increment_batch_size == 0
38
+
39
+ offset_step = 0
40
+ start_sample = 0
41
+ for incr in range(number_of_increments):
42
+ batch_size = initial_batch_size + incr * increment_batch_size
43
+ end_sample = int(math.ceil((incr + 1) * sample_limit_for_rampup / number_of_increments))
44
+ number_of_step_per_increment = int(math.ceil((end_sample - start_sample) / batch_size))
45
+ checkpoint_step_to_tokens.CACHE.update({
46
+ offset_step + i: (start_sample + i * batch_size) * SEQUENCE_LENGTH
47
+ for i in range(number_of_step_per_increment)
48
+ })
49
+ offset_step += number_of_step_per_increment
50
+ start_sample += number_of_step_per_increment * batch_size
51
+
52
+ checkpoint_step_to_tokens.CACHE[offset_step] = start_sample * SEQUENCE_LENGTH
53
+ checkpoint_step_to_tokens.RAMPUP_OFFSET = offset_step
54
+
55
+ if checkpoint_step in checkpoint_step_to_tokens.CACHE:
56
+ return checkpoint_step_to_tokens.CACHE[checkpoint_step]
57
+
58
+ number_steps_after_rampup = checkpoint_step - checkpoint_step_to_tokens.RAMPUP_OFFSET
59
+ assert number_steps_after_rampup >= 0
60
+
61
+ slope = BATCH_SIZE * SEQUENCE_LENGTH
62
+
63
+ checkpoint_step_to_tokens.CACHE[checkpoint_step] = \
64
+ checkpoint_step_to_tokens.CACHE[checkpoint_step_to_tokens.RAMPUP_OFFSET] + \
65
+ slope * number_steps_after_rampup
66
+ return checkpoint_step_to_tokens.CACHE[checkpoint_step]
67
+ return fn(checkpoint_step)
68
+
69
+ def main():
70
+ args = get_args()
71
+ result_dir = args.result_dir
72
+ experiment = args.experiment
73
+
74
+ results_file_per_checkpoint = [
75
+ file
76
+ for file in listdir(result_dir)
77
+ if isfile(os.path.join(result_dir, file)) and file.startswith(experiment)
78
+ ]
79
+ checkpoint_steps = sorted([int(file.split("_")[-1].split(".json")[0]) for file in results_file_per_checkpoint])
80
+ absolute_paths = [f"{result_dir}/{experiment}_{checkpoint_step}.json" for checkpoint_step in checkpoint_steps]
81
+ # format = "{EXPERIMENT_NAME}_{CHECKPOINT_STEP}.json"
82
+ tokens = [checkpoint_step_to_tokens(checkpoint_step, args) for checkpoint_step in checkpoint_steps]
83
+
84
+ result_json = {}
85
+ for absolute_path in absolute_paths:
86
+ with open(absolute_path, 'r') as fi:
87
+ results = json.load(fi)["results"]
88
+
89
+ for task in results:
90
+ if task not in result_json:
91
+ result_json[task] = {}
92
+
93
+ for metric in results[task]:
94
+ if metric not in result_json[task]:
95
+ result_json[task][metric] = []
96
+
97
+ result_json[task][metric].append(results[task][metric])
98
+
99
+ # check
100
+ for task in result_json:
101
+ assert len(tokens) == len(checkpoint_steps)
102
+ for metric in result_json[task]:
103
+ assert len(result_json[task][metric]) == len(checkpoint_steps)
104
+
105
+ output_path = os.path.join(result_dir, f"{experiment}_agg.json")
106
+ print(f"Printing results to {output_path}")
107
+ with open(output_path, 'w') as fo:
108
+ json.dump({"tokens": tokens, "checkpoints": checkpoint_steps, "results": result_json}, fo, indent=2)
109
+
110
+ if __name__ == "__main__":
111
+ main()
bigscience/inference/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference
2
+
3
+ Notes on the plans to do inference with the pre-trained model
4
+
5
+ # Large Model on limited hardware
6
+
7
+ - inferencing and tinkering on a single host (150-200B model)
8
+
9
+ Solution: We can do this with ZeRO-Infinity. Seems like @Shaden Smith already has the code to load the model parameters checkpoints from Megatron+DeepSpeed 3D to Megatron+ DeepSpeed ZeRO-Infinity. The remaining work is to add an inference only mode to ZeRO-Infinity that drops all the non-parameter states.
10
+
11
+ Hardware Requirements : Would require about 500-1000 GB of memory (can be CPU, GPU or NVMe). Single Node with enough CPU or NVMe memory should work here.
12
+
13
+ The single node can be as little as 4x 32GB-V100. It will be just slower than say, 8x 80GB-A100.
14
+
15
+ Estimated Work: If all works as expected, 1-3 weeks based on bandwidth availability. Tuning for the best performance might another week or so, but that wont be blocking the availability of the functionality.
bigscience/inference/modeling_gpt2_alibi_prefix_lm.py ADDED
@@ -0,0 +1,1750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch OpenAI GPT-2 model with AliBi."""
17
+
18
+ ## integrating some AliBi code from https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/c839a8aa30731f71b3738d56009be9668508e366/megatron/model/transformer.py
19
+ # I am keeping the name of the classes as GPT2 because some of transformer's code like pipeline classes check class names in order to do things, and
20
+ # creating a new class that have different names sometimes break things.
21
+
22
+ import os
23
+ import enum
24
+ from dataclasses import dataclass
25
+ from typing import Optional, Tuple
26
+
27
+ import torch
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import CrossEntropyLoss, MSELoss
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.file_utils import (
34
+ ModelOutput,
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ replace_return_docstrings,
39
+ )
40
+ from transformers.modeling_outputs import (
41
+ BaseModelOutputWithPastAndCrossAttentions,
42
+ CausalLMOutputWithCrossAttentions,
43
+ SequenceClassifierOutputWithPast,
44
+ TokenClassifierOutput,
45
+ )
46
+ from transformers.modeling_utils import (
47
+ Conv1D,
48
+ PreTrainedModel,
49
+ SequenceSummary,
50
+ find_pruneable_heads_and_indices,
51
+ prune_conv1d_layer,
52
+ )
53
+ from transformers.utils import logging
54
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
55
+ from transformers.models.gpt2.configuration_gpt2 import GPT2Config
56
+
57
+ from collections import OrderedDict
58
+ from typing import Any, Mapping, Optional
59
+
60
+ from transformers import PreTrainedTokenizer, TensorType, is_torch_available
61
+
62
+ from transformers.configuration_utils import PretrainedConfig
63
+ from transformers.onnx import OnnxConfigWithPast
64
+
65
+
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
70
+ "gpt2": "https://huggingface.co/gpt2/resolve/main/config.json",
71
+ "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/config.json",
72
+ "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/config.json",
73
+ "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/config.json",
74
+ "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/config.json",
75
+ }
76
+
77
+ PositionEmbeddingType_rotary = 1 # not implemented
78
+ PositionEmbeddingType_absolute = 2
79
+ PositionEmbeddingType_alibi = 3
80
+
81
+
82
+ class GPT2Config(PretrainedConfig):
83
+ """
84
+ This is the configuration class to store the configuration of a :class:`~transformers.GPT2Model` or a
85
+ :class:`~transformers.TFGPT2Model`. It is used to instantiate a GPT-2 model according to the specified arguments,
86
+ defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
87
+ to that of the GPT-2 `small <https://huggingface.co/gpt2>`__ architecture.
88
+ Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
89
+ outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
90
+ Args:
91
+ vocab_size (:obj:`int`, `optional`, defaults to 50257):
92
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
93
+ :obj:`inputs_ids` passed when calling :class:`~transformers.GPT2Model` or
94
+ :class:`~transformers.TFGPT2Model`.
95
+ n_positions (:obj:`int`, `optional`, defaults to 1024):
96
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
97
+ just in case (e.g., 512 or 1024 or 2048).
98
+ n_ctx (:obj:`int`, `optional`, defaults to 1024):
99
+ Dimensionality of the causal mask (usually same as n_positions).
100
+ n_embd (:obj:`int`, `optional`, defaults to 768):
101
+ Dimensionality of the embeddings and hidden states.
102
+ n_layer (:obj:`int`, `optional`, defaults to 12):
103
+ Number of hidden layers in the Transformer encoder.
104
+ n_head (:obj:`int`, `optional`, defaults to 12):
105
+ Number of attention heads for each attention layer in the Transformer encoder.
106
+ n_inner (:obj:`int`, `optional`, defaults to None):
107
+ Dimensionality of the inner feed-forward layers. :obj:`None` will set it to 4 times n_embd
108
+ activation_function (:obj:`str`, `optional`, defaults to :obj:`"gelu"`):
109
+ Activation function, to be selected in the list :obj:`["relu", "silu", "gelu", "tanh", "gelu_new"]`.
110
+ resid_pdrop (:obj:`float`, `optional`, defaults to 0.1):
111
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
112
+ embd_pdrop (:obj:`int`, `optional`, defaults to 0.1):
113
+ The dropout ratio for the embeddings.
114
+ attn_pdrop (:obj:`float`, `optional`, defaults to 0.1):
115
+ The dropout ratio for the attention.
116
+ layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
117
+ The epsilon to use in the layer normalization layers
118
+ initializer_range (:obj:`float`, `optional`, defaults to 0.02):
119
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
120
+ summary_type (:obj:`string`, `optional`, defaults to :obj:`"cls_index"`):
121
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
122
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
123
+ Has to be one of the following options:
124
+ - :obj:`"last"`: Take the last token hidden state (like XLNet).
125
+ - :obj:`"first"`: Take the first token hidden state (like BERT).
126
+ - :obj:`"mean"`: Take the mean of all tokens hidden states.
127
+ - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
128
+ - :obj:`"attn"`: Not implemented now, use multi-head attention.
129
+ summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`):
130
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
131
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
132
+ Whether or not to add a projection after the vector extraction.
133
+ summary_activation (:obj:`str`, `optional`):
134
+ Argument used when doing sequence summary. Used in for the multiple choice head in
135
+ :class:`~transformers.GPT2DoubleHeadsModel`.
136
+ Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation.
137
+ summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`):
138
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
139
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
140
+ Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes.
141
+ summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1):
142
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
143
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
144
+ The dropout ratio to be used after the projection and activation.
145
+ scale_attn_weights (:obj:`bool`, `optional`, defaults to :obj:`True`):
146
+ Scale attention weights by dividing by sqrt(hidden_size)..
147
+ use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
148
+ Whether or not the model should return the last key/values attentions (not used by all models).
149
+ Example::
150
+ >>> from transformers import GPT2Model, GPT2Config
151
+ >>> # Initializing a GPT2 configuration
152
+ >>> configuration = GPT2Config()
153
+ >>> # Initializing a model from the configuration
154
+ >>> model = GPT2Model(configuration)
155
+ >>> # Accessing the model configuration
156
+ >>> configuration = model.config
157
+ """
158
+
159
+ model_type = "gpt2"
160
+ keys_to_ignore_at_inference = ["past_key_values"]
161
+ attribute_map = {
162
+ "hidden_size": "n_embd",
163
+ "max_position_embeddings": "n_positions",
164
+ "num_attention_heads": "n_head",
165
+ "num_hidden_layers": "n_layer",
166
+ }
167
+
168
+ def __init__(
169
+ self,
170
+ vocab_size=50257,
171
+ n_positions=1024,
172
+ n_ctx=1024,
173
+ n_embd=768,
174
+ n_layer=12,
175
+ n_head=12,
176
+ n_inner=None,
177
+ activation_function="gelu_new",
178
+ resid_pdrop=0.1,
179
+ embd_pdrop=0.1,
180
+ attn_pdrop=0.1,
181
+ layer_norm_epsilon=1e-5,
182
+ initializer_range=0.02,
183
+ summary_type="cls_index",
184
+ summary_use_proj=True,
185
+ summary_activation=None,
186
+ summary_proj_to_labels=True,
187
+ summary_first_dropout=0.1,
188
+ scale_attn_weights=True,
189
+ use_cache=True,
190
+ bos_token_id=50256,
191
+ eos_token_id=50256,
192
+ position_embedding_type=PositionEmbeddingType_absolute,
193
+ **kwargs
194
+ ):
195
+ self.vocab_size = vocab_size
196
+ self.n_ctx = n_ctx
197
+ self.n_positions = n_positions
198
+ self.n_embd = n_embd
199
+ self.n_layer = n_layer
200
+ self.n_head = n_head
201
+ self.n_inner = n_inner
202
+ self.activation_function = activation_function
203
+ self.resid_pdrop = resid_pdrop
204
+ self.embd_pdrop = embd_pdrop
205
+ self.attn_pdrop = attn_pdrop
206
+ self.layer_norm_epsilon = layer_norm_epsilon
207
+ self.initializer_range = initializer_range
208
+ self.summary_type = summary_type
209
+ self.summary_use_proj = summary_use_proj
210
+ self.summary_activation = summary_activation
211
+ self.summary_first_dropout = summary_first_dropout
212
+ self.summary_proj_to_labels = summary_proj_to_labels
213
+ self.scale_attn_weights = scale_attn_weights
214
+ self.use_cache = use_cache
215
+
216
+ self.bos_token_id = bos_token_id
217
+ self.eos_token_id = eos_token_id
218
+ self.position_embedding_type = position_embedding_type
219
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
220
+
221
+
222
+ class GPT2OnnxConfig(OnnxConfigWithPast):
223
+ @property
224
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
225
+ common_inputs = OrderedDict({"input_ids": {0: "batch"}})
226
+ if self.use_past:
227
+ for i in range(self._config.n_layer * 2):
228
+ common_inputs[f"past_key_values.{i}"] = {0: "batch", 2: "sequence"}
229
+
230
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
231
+ else:
232
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
233
+
234
+ return common_inputs
235
+
236
+ @property
237
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
238
+ common_outputs = OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}})
239
+ if self.use_past:
240
+ for i in range(self._config.n_layer * 2):
241
+ common_outputs[f"present.{i}"] = {0: "batch", 2: "sequence"}
242
+
243
+ return common_outputs
244
+
245
+ return common_outputs
246
+
247
+ def generate_dummy_inputs(
248
+ self,
249
+ tokenizer: PreTrainedTokenizer,
250
+ batch_size: int = -1,
251
+ seq_length: int = -1,
252
+ is_pair: bool = False,
253
+ framework: Optional[TensorType] = None,
254
+ ) -> Mapping[str, Any]:
255
+ common_inputs = super().generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
256
+
257
+ # We need to order the input in the way they appears in the forward()
258
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
259
+
260
+ # Need to add the past_keys
261
+ if self.use_past:
262
+ if not is_torch_available():
263
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
264
+ else:
265
+ import torch
266
+
267
+ batch = common_inputs["input_ids"].shape[0]
268
+ ordered_inputs["past_key_values"] = [
269
+ (
270
+ torch.zeros((batch, self._config.n_head, 1, self._config.hidden_size // self._config.n_head)),
271
+ torch.zeros((batch, self._config.n_head, 1, self._config.hidden_size // self._config.n_head)),
272
+ )
273
+ for _ in range(self._config.n_layer)
274
+ ]
275
+
276
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
277
+ return ordered_inputs
278
+
279
+
280
+ # need to change the checkpoints to be the bigscience checkpoints
281
+ _CHECKPOINT_FOR_DOC = "gpt2"
282
+ _CONFIG_FOR_DOC = "GPT2Config"
283
+ _TOKENIZER_FOR_DOC = "GPT2Tokenizer"
284
+
285
+ GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
286
+ "gpt2",
287
+ "gpt2-medium",
288
+ "gpt2-large",
289
+ "gpt2-xl",
290
+ "distilgpt2",
291
+ # See all GPT-2 models at https://huggingface.co/models?filter=gpt2
292
+ ]
293
+
294
+
295
+
296
+
297
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
298
+ """Load tf checkpoints in a pytorch model"""
299
+ try:
300
+ import re
301
+
302
+ import tensorflow as tf
303
+ except ImportError:
304
+ logger.error(
305
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
306
+ "https://www.tensorflow.org/install/ for installation instructions."
307
+ )
308
+ raise
309
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
310
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
311
+ # Load weights from TF model
312
+ init_vars = tf.train.list_variables(tf_path)
313
+ names = []
314
+ arrays = []
315
+ for name, shape in init_vars:
316
+ logger.info(f"Loading TF weight {name} with shape {shape}")
317
+ array = tf.train.load_variable(tf_path, name)
318
+ names.append(name)
319
+ arrays.append(array.squeeze())
320
+
321
+ for name, array in zip(names, arrays):
322
+ name = name[6:] # skip "model/"
323
+ name = name.split("/")
324
+ pointer = model
325
+ for m_name in name:
326
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
327
+ scope_names = re.split(r"(\d+)", m_name)
328
+ else:
329
+ scope_names = [m_name]
330
+ if scope_names[0] == "w" or scope_names[0] == "g":
331
+ pointer = getattr(pointer, "weight")
332
+ elif scope_names[0] == "b":
333
+ pointer = getattr(pointer, "bias")
334
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
335
+ pointer = getattr(pointer, scope_names[0])
336
+ pointer = getattr(pointer, "weight")
337
+ else:
338
+ pointer = getattr(pointer, scope_names[0])
339
+ if len(scope_names) >= 2:
340
+ num = int(scope_names[1])
341
+ pointer = pointer[num]
342
+ try:
343
+ assert (
344
+ pointer.shape == array.shape
345
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
346
+ except AssertionError as e:
347
+ e.args += (pointer.shape, array.shape)
348
+ raise
349
+ logger.info(f"Initialize PyTorch weight {name}")
350
+ pointer.data = torch.from_numpy(array)
351
+ return model
352
+
353
+
354
+ class GPT2Attention(nn.Module):
355
+ def __init__(self, config, is_cross_attention=False):
356
+ super().__init__()
357
+
358
+ max_positions = config.max_position_embeddings
359
+ self.register_buffer(
360
+ "bias",
361
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
362
+ 1, 1, max_positions, max_positions
363
+ ),
364
+ )
365
+ self.register_buffer("masked_bias", torch.tensor(-1e4))
366
+
367
+ self.embed_dim = config.hidden_size
368
+ self.num_heads = config.num_attention_heads
369
+ self.head_dim = self.embed_dim // self.num_heads
370
+ self.split_size = self.embed_dim
371
+ if self.head_dim * self.num_heads != self.embed_dim:
372
+ raise ValueError(
373
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
374
+ )
375
+
376
+ self.scale_attn_weights = config.scale_attn_weights
377
+ self.is_cross_attention = is_cross_attention
378
+
379
+ if self.is_cross_attention:
380
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
381
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
382
+ else:
383
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
384
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
385
+
386
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
387
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
388
+
389
+ self.pruned_heads = set()
390
+ self.position_embedding_type = config.position_embedding_type
391
+
392
+ def prune_heads(self, heads):
393
+ if len(heads) == 0:
394
+ return
395
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
396
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
397
+
398
+ # Prune conv1d layers
399
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
400
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
401
+
402
+ # Update hyper params
403
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
404
+ self.num_heads = self.num_heads - len(heads)
405
+ self.pruned_heads = self.pruned_heads.union(heads)
406
+
407
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
408
+
409
+ # [b, np, sq, sk]
410
+ output_size = (query.size(1),
411
+ query.size(2),
412
+ query.size(0),
413
+ key.size(0))
414
+ # preallocting result tensor: [b * np, sq, sk]
415
+ if alibi is None:
416
+ matmul_result = torch.empty(
417
+ output_size[0]*output_size[1],
418
+ output_size[2],
419
+ output_size[3],
420
+ dtype=query_layer.dtype,
421
+ device=torch.cuda.current_device())
422
+ else:
423
+ matmul_result = alibi[:output_size[0]*output_size[1], :, :output_size[3]]
424
+
425
+ # [sq, b, np, hn] -> [sq, b * np, hn]
426
+ query = query.view(output_size[2],
427
+ output_size[0] * output_size[1], -1)
428
+ # [sk, b, np, hn] -> [sk, b * np, hn]
429
+ key = key.view(output_size[3],
430
+ output_size[0] * output_size[1], -1)
431
+ # Raw attention scores. [b * np, sq, sk]
432
+ attn_weights = torch.baddbmm(
433
+ matmul_result,
434
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
435
+ key_layer.transpose(0, 1).transpose(-1, -2), # [b * np, hn, sk]
436
+ beta=0.0 if alibi is None else 1.0, alpha=(1.0/self.norm_factor))
437
+
438
+ #attn_weights = torch.matmul(query, key.transpose(-1, -2))
439
+
440
+ # change view to [b, np, sq, sk]
441
+ attn_weights = attn_weights.view(*output_size)
442
+
443
+ # do we need this scaling. does the alpha do the scaling as above?
444
+ if self.scale_attn_weights:
445
+ attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
446
+
447
+ if not self.is_cross_attention:
448
+ # if only "normal" attention layer implements causal mask
449
+ query_length, key_length = query.size(-2), key.size(-2)
450
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
451
+ attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
452
+
453
+ if attention_mask is not None:
454
+ # Apply the attention mask
455
+ attn_weights = attn_weights + attention_mask
456
+
457
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
458
+ attn_weights = self.attn_dropout(attn_weights)
459
+
460
+ # Mask heads if we want to
461
+ if head_mask is not None:
462
+ attn_weights = attn_weights * head_mask
463
+
464
+ attn_output = torch.matmul(attn_weights, value)
465
+
466
+ return attn_output, attn_weights
467
+
468
+ def _split_heads(self, tensor, num_heads, attn_head_size):
469
+ """
470
+ Splits hidden_size dim into attn_head_size and num_heads
471
+ """
472
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
473
+ tensor = tensor.view(*new_shape)
474
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
475
+
476
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
477
+ """
478
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
479
+ """
480
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
481
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
482
+ return tensor.view(new_shape)
483
+
484
+ def forward(
485
+ self,
486
+ hidden_states,
487
+ layer_past=None,
488
+ attention_mask=None,
489
+ head_mask=None,
490
+ encoder_hidden_states=None,
491
+ encoder_attention_mask=None,
492
+ alibi=None,
493
+ use_cache=False,
494
+ output_attentions=False,
495
+
496
+ ):
497
+ if encoder_hidden_states is not None:
498
+ if not hasattr(self, "q_attn"):
499
+ raise ValueError(
500
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
501
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
502
+ )
503
+
504
+ query = self.q_attn(hidden_states)
505
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
506
+ attention_mask = encoder_attention_mask
507
+ else:
508
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
509
+
510
+ query = self._split_heads(query, self.num_heads, self.head_dim)
511
+ key = self._split_heads(key, self.num_heads, self.head_dim)
512
+ value = self._split_heads(value, self.num_heads, self.head_dim)
513
+
514
+ if layer_past is not None:
515
+ past_key, past_value = layer_past
516
+ key = torch.cat((past_key, key), dim=-2)
517
+ value = torch.cat((past_value, value), dim=-2)
518
+
519
+ if use_cache is True:
520
+ present = (key, value)
521
+ else:
522
+ present = None
523
+
524
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
525
+
526
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
527
+ attn_output = self.c_proj(attn_output)
528
+ attn_output = self.resid_dropout(attn_output)
529
+
530
+ outputs = (attn_output, present)
531
+ if output_attentions:
532
+ outputs += (attn_weights,)
533
+
534
+ return outputs # a, present, (attentions)
535
+
536
+
537
+ class GPT2MLP(nn.Module):
538
+ def __init__(self, intermediate_size, config):
539
+ super().__init__()
540
+ embed_dim = config.hidden_size
541
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
542
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
543
+ self.act = ACT2FN[config.activation_function]
544
+ self.dropout = nn.Dropout(config.resid_pdrop)
545
+
546
+ def forward(self, hidden_states):
547
+ hidden_states = self.c_fc(hidden_states)
548
+ hidden_states = self.act(hidden_states)
549
+ hidden_states = self.c_proj(hidden_states)
550
+ hidden_states = self.dropout(hidden_states)
551
+ return hidden_states
552
+
553
+
554
+ class GPT2Block(nn.Module):
555
+ def __init__(self, config):
556
+ super().__init__()
557
+ hidden_size = config.hidden_size
558
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
559
+
560
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
561
+ self.attn = GPT2Attention(config)
562
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
563
+
564
+ if config.add_cross_attention:
565
+ self.crossattention = GPT2Attention(config, is_cross_attention=True)
566
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
567
+
568
+ self.mlp = GPT2MLP(inner_dim, config)
569
+
570
+ def forward(
571
+ self,
572
+ hidden_states,
573
+ layer_past=None,
574
+ attention_mask=None,
575
+ head_mask=None,
576
+ encoder_hidden_states=None,
577
+ encoder_attention_mask=None,
578
+ alibi=None,
579
+ use_cache=False,
580
+ output_attentions=False,
581
+ ):
582
+ residual = hidden_states
583
+ hidden_states = self.ln_1(hidden_states)
584
+ attn_outputs = self.attn(
585
+ hidden_states,
586
+ layer_past=layer_past,
587
+ attention_mask=attention_mask,
588
+ head_mask=head_mask,
589
+ alibi=alibi,
590
+ use_cache=use_cache,
591
+ output_attentions=output_attentions,
592
+ )
593
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
594
+ outputs = attn_outputs[1:]
595
+ # residual connection
596
+ hidden_states = attn_output + residual
597
+
598
+ if encoder_hidden_states is not None:
599
+ # add one self-attention block for cross-attention
600
+ if not hasattr(self, "crossattention"):
601
+ raise ValueError(
602
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
603
+ "cross-attention layers by setting `config.add_cross_attention=True`"
604
+ )
605
+ residual = hidden_states
606
+ hidden_states = self.ln_cross_attn(hidden_states)
607
+ cross_attn_outputs = self.crossattention(
608
+ hidden_states,
609
+ attention_mask=attention_mask,
610
+ head_mask=head_mask,
611
+ encoder_hidden_states=encoder_hidden_states,
612
+ encoder_attention_mask=encoder_attention_mask,
613
+ alibi=alibi,
614
+ output_attentions=output_attentions,
615
+ )
616
+ attn_output = cross_attn_outputs[0]
617
+ # residual connection
618
+ hidden_states = residual + attn_output
619
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
620
+
621
+ residual = hidden_states
622
+ hidden_states = self.ln_2(hidden_states)
623
+ feed_forward_hidden_states = self.mlp(hidden_states)
624
+ # residual connection
625
+ hidden_states = residual + feed_forward_hidden_states
626
+
627
+ if use_cache:
628
+ outputs = (hidden_states,) + outputs
629
+ else:
630
+ outputs = (hidden_states,) + outputs[1:]
631
+
632
+ return outputs # hidden_states, present, (attentions, cross_attentions)
633
+
634
+
635
+ class GPT2PreTrainedModel(PreTrainedModel):
636
+ """
637
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
638
+ models.
639
+ """
640
+
641
+ config_class = GPT2Config
642
+ load_tf_weights = load_tf_weights_in_gpt2
643
+ base_model_prefix = "transformer"
644
+ is_parallelizable = True
645
+ supports_gradient_checkpointing = True
646
+
647
+ def __init__(self, *inputs, **kwargs):
648
+ super().__init__(*inputs, **kwargs)
649
+
650
+
651
+ def _init_weights(self, module):
652
+ """Initialize the weights."""
653
+ if isinstance(module, (nn.Linear, Conv1D)):
654
+ # Slightly different from the TF version which uses truncated_normal for initialization
655
+ # cf https://github.com/pytorch/pytorch/pull/5617
656
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
657
+ if module.bias is not None:
658
+ module.bias.data.zero_()
659
+ elif isinstance(module, nn.Embedding):
660
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
661
+ if module.padding_idx is not None:
662
+ module.weight.data[module.padding_idx].zero_()
663
+ elif isinstance(module, nn.LayerNorm):
664
+ module.bias.data.zero_()
665
+ module.weight.data.fill_(1.0)
666
+
667
+ def _set_gradient_checkpointing(self, module, value=False):
668
+ if isinstance(module, GPT2Model):
669
+ module.gradient_checkpointing = value
670
+
671
+
672
+
673
+ @dataclass
674
+ class GPT2DoubleHeadsModelOutput(ModelOutput):
675
+ """
676
+ Base class for outputs of models predicting if two sentences are consecutive or not.
677
+
678
+ Args:
679
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
680
+ Language modeling loss.
681
+ mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
682
+ Multiple choice classification loss.
683
+ logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
684
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
685
+ mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
686
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
687
+ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
688
+ Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,
689
+ sequence_length, embed_size_per_head)`).
690
+
691
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
692
+ :obj:`past_key_values` input) to speed up sequential decoding.
693
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
694
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
695
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
696
+
697
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
698
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
699
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
700
+ sequence_length, sequence_length)`.
701
+
702
+ GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
703
+ self-attention heads.
704
+ """
705
+
706
+ loss: Optional[torch.FloatTensor] = None
707
+ mc_loss: Optional[torch.FloatTensor] = None
708
+ logits: torch.FloatTensor = None
709
+ mc_logits: torch.FloatTensor = None
710
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
711
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
712
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
713
+
714
+
715
+ GPT2_START_DOCSTRING = r"""
716
+
717
+ This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
718
+ methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
719
+ pruning heads etc.)
720
+
721
+ This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
722
+ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
723
+ general usage and behavior.
724
+
725
+ Parameters:
726
+ config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
727
+ Initializing with a config file does not load the weights associated with the model, only the
728
+ configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
729
+ weights.
730
+ """
731
+
732
+ GPT2_INPUTS_DOCSTRING = r"""
733
+ Args:
734
+ input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
735
+ :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
736
+ ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
737
+ sequence tokens in the vocabulary.
738
+
739
+ If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
740
+ passed as ``input_ids``.
741
+
742
+ Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
743
+ :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
744
+ details.
745
+
746
+ `What are input IDs? <../glossary.html#input-ids>`__
747
+ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):
748
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
749
+ :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
750
+ have their past given to this model should not be passed as ``input_ids`` as they have already been
751
+ computed.
752
+ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
753
+ Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
754
+
755
+ - 1 for tokens that are **not masked**,
756
+ - 0 for tokens that are **masked**.
757
+
758
+ `What are attention masks? <../glossary.html#attention-mask>`__
759
+ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
760
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
761
+ 1]``:
762
+
763
+ - 0 corresponds to a `sentence A` token,
764
+ - 1 corresponds to a `sentence B` token.
765
+
766
+ `What are token type IDs? <../glossary.html#token-type-ids>`_
767
+ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
768
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
769
+ config.max_position_embeddings - 1]``.
770
+
771
+ `What are position IDs? <../glossary.html#position-ids>`_
772
+ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
773
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
774
+
775
+ - 1 indicates the head is **not masked**,
776
+ - 0 indicates the head is **masked**.
777
+
778
+ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
779
+ Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
780
+ This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
781
+ vectors than the model's internal embedding lookup matrix.
782
+
783
+ If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
784
+ :obj:`past_key_values`).
785
+ use_cache (:obj:`bool`, `optional`):
786
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
787
+ decoding (see :obj:`past_key_values`).
788
+ output_attentions (:obj:`bool`, `optional`):
789
+ Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
790
+ tensors for more detail.
791
+ output_hidden_states (:obj:`bool`, `optional`):
792
+ Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
793
+ more detail.
794
+ return_dict (:obj:`bool`, `optional`):
795
+ Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
796
+ """
797
+ PARALLELIZE_DOCSTRING = r"""
798
+ This is an experimental feature and is a subject to change at a moment's notice.
799
+
800
+ Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
801
+ it will evenly distribute blocks across all devices.
802
+
803
+ Args:
804
+ device_map (:obj:`Dict[int, list]`, optional, defaults to None):
805
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
806
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
807
+ have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
808
+ following number of attention modules:
809
+
810
+ - gpt2: 12
811
+ - gpt2-medium: 24
812
+ - gpt2-large: 36
813
+ - gpt2-xl: 48
814
+
815
+ Example::
816
+
817
+ # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
818
+ model = GPT2LMHeadModel.from_pretrained('gpt2-xl')
819
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
820
+
821
+ 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
822
+ 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
823
+ 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}
824
+ model.parallelize(device_map)
825
+ """
826
+ DEPARALLELIZE_DOCSTRING = r"""
827
+ Moves the model to cpu from a model parallel state.
828
+
829
+ Example::
830
+
831
+ # On a 4 GPU machine with gpt2-large:
832
+ model = GPT2LMHeadModel.from_pretrained('gpt2-large')
833
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],
834
+
835
+ 1: [8, 9, 10, 11, 12, 13, 14, 15],
836
+ 2: [16, 17, 18, 19, 20, 21, 22, 23],
837
+ 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}
838
+ model.parallelize(device_map) # Splits the model across several devices
839
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
840
+ """
841
+
842
+
843
+ @add_start_docstrings(
844
+ "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
845
+ GPT2_START_DOCSTRING,
846
+ )
847
+ class GPT2Model(GPT2PreTrainedModel):
848
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
849
+
850
+ def __init__(self, config):
851
+ super().__init__(config)
852
+
853
+ self.embed_dim = config.hidden_size
854
+
855
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
856
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
857
+
858
+ self.drop = nn.Dropout(config.embd_pdrop)
859
+ self.h = nn.ModuleList([GPT2Block(config) for _ in range(config.num_hidden_layers)])
860
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
861
+
862
+ self.init_weights()
863
+
864
+ # Model parallel
865
+ self.model_parallel = False
866
+ self.device_map = None
867
+ self.gradient_checkpointing = False
868
+ config = kwargs.get('config',inputs[0])
869
+ if args.position_embedding_type == PositionEmbeddingType_alibi:
870
+ self.alibi = self._build_alibi_tensor(args.seq_length, args.num_attention_heads, args.micro_batch_size).to(torch.cuda.current_device())
871
+ if args.params_dtype == torch.float16:
872
+ self.alibi = self.alibi.to(torch.float16)
873
+ elif args.params_dtype == torch.bfloat16:
874
+ self.alibi = self.alibi.to(torch.bfloat16)
875
+ else:
876
+ self.alibi = None
877
+
878
+ @staticmethod
879
+ def _build_alibi_tensor(max_seq_len, num_attention_heads, batch_size):
880
+ # Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
881
+ """Returns tensor shaped (batch_size * num_attention_heads, 1, max_seq_len)"""
882
+ def get_slopes(n):
883
+ def get_slopes_power_of_2(n):
884
+ start = (2 ** (-2 ** -(math.log2(n) - 3)))
885
+ ratio = start
886
+ return [start * ratio ** i for i in range(n)]
887
+
888
+ if math.log2(n).is_integer():
889
+ return get_slopes_power_of_2(n)
890
+ else:
891
+ closest_power_of_2 = 2 ** math.floor(math.log2(n))
892
+ return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2 * closest_power_of_2)[0::2][
893
+ :n - closest_power_of_2]
894
+ slopes = torch.Tensor(get_slopes(num_attention_heads))
895
+ alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(max_seq_len).unsqueeze(0).unsqueeze(0).expand(num_attention_heads, -1, -1)
896
+ alibi = alibi.repeat(batch_size, 1, 1)
897
+ return alibi
898
+
899
+
900
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
901
+ def parallelize(self, device_map=None):
902
+ # Check validity of device_map
903
+ self.device_map = (
904
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
905
+ )
906
+ assert_device_map(self.device_map, len(self.h))
907
+ self.model_parallel = True
908
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
909
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
910
+ self.wte = self.wte.to(self.first_device)
911
+ self.wpe = self.wpe.to(self.first_device)
912
+ # Load onto devices
913
+ for k, v in self.device_map.items():
914
+ for block in v:
915
+ cuda_device = "cuda:" + str(k)
916
+ self.h[block] = self.h[block].to(cuda_device)
917
+ # ln_f to last
918
+ self.ln_f = self.ln_f.to(self.last_device)
919
+
920
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
921
+ def deparallelize(self):
922
+ self.model_parallel = False
923
+ self.device_map = None
924
+ self.first_device = "cpu"
925
+ self.last_device = "cpu"
926
+ self.wte = self.wte.to("cpu")
927
+ self.wpe = self.wpe.to("cpu")
928
+ for index in range(len(self.h)):
929
+ self.h[index] = self.h[index].to("cpu")
930
+ self.ln_f = self.ln_f.to("cpu")
931
+ torch.cuda.empty_cache()
932
+
933
+ def get_input_embeddings(self):
934
+ return self.wte
935
+
936
+ def set_input_embeddings(self, new_embeddings):
937
+ self.wte = new_embeddings
938
+
939
+ def _prune_heads(self, heads_to_prune):
940
+ """
941
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
942
+ """
943
+ for layer, heads in heads_to_prune.items():
944
+ self.h[layer].attn.prune_heads(heads)
945
+
946
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
947
+ @add_code_sample_docstrings(
948
+ tokenizer_class=_TOKENIZER_FOR_DOC,
949
+ checkpoint=_CHECKPOINT_FOR_DOC,
950
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
951
+ config_class=_CONFIG_FOR_DOC,
952
+ )
953
+ def forward(
954
+ self,
955
+ input_ids=None,
956
+ past_key_values=None,
957
+ attention_mask=None,
958
+ token_type_ids=None,
959
+ position_ids=None,
960
+ head_mask=None,
961
+ inputs_embeds=None,
962
+ encoder_hidden_states=None,
963
+ encoder_attention_mask=None,
964
+ use_cache=None,
965
+ output_attentions=None,
966
+ output_hidden_states=None,
967
+ return_dict=None,
968
+ prefix_lm_token_id = None
969
+ ):
970
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
971
+ output_hidden_states = (
972
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
973
+ )
974
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
975
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
976
+
977
+ if input_ids is not None and inputs_embeds is not None:
978
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
979
+ elif input_ids is not None:
980
+ input_shape = input_ids.size()
981
+ input_ids = input_ids.view(-1, input_shape[-1])
982
+ batch_size = input_ids.shape[0]
983
+ elif inputs_embeds is not None:
984
+ input_shape = inputs_embeds.size()[:-1]
985
+ batch_size = inputs_embeds.shape[0]
986
+ else:
987
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
988
+
989
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
990
+
991
+ if token_type_ids is not None:
992
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
993
+ if position_ids is not None:
994
+ position_ids = position_ids.view(-1, input_shape[-1])
995
+
996
+ if past_key_values is None:
997
+ past_length = 0
998
+ past_key_values = tuple([None] * len(self.h))
999
+ else:
1000
+ past_length = past_key_values[0][0].size(-2)
1001
+ if position_ids is None:
1002
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
1003
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
1004
+
1005
+ # GPT2Attention mask.
1006
+ if attention_mask is not None:
1007
+ if batch_size <= 0:
1008
+ raise ValueError("batch_size has to be defined and > 0")
1009
+ attention_mask = attention_mask.view(batch_size, -1)
1010
+ # do prefix_lm masking if we have input_ids. We find the prefix_lm_toke_id token as the prefix_lm boundry.
1011
+ if prefix_lm_token_id is not None and input_ids is not None:
1012
+ for attention_mask_row, input_ids_row in zip(attention_mask, input_ids): # do this in the bs dimension
1013
+ attention_mask_row[: (input_ids_row == prefix_lm_token_id).nonzero(as_tuple=True)[0], :] = 1.0 # is this right?
1014
+
1015
+ # We create a 3D attention mask from a 2D tensor mask.
1016
+ # Sizes are [batch_size, 1, 1, to_seq_length]
1017
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
1018
+ # this attention mask is more simple than the triangular masking of causal attention
1019
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
1020
+ attention_mask = attention_mask[:, None, None, :]
1021
+
1022
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1023
+ # masked positions, this operation will create a tensor which is 0.0 for
1024
+ # positions we want to attend and -10000.0 for masked positions.
1025
+ # Since we are adding it to the raw scores before the softmax, this is
1026
+ # effectively the same as removing these entirely.
1027
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
1028
+ attention_mask = (1.0 - attention_mask) * -10000.0
1029
+
1030
+ # If a 2D ou 3D attention mask is provided for the cross-attention
1031
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1032
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
1033
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1034
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1035
+ if encoder_attention_mask is None:
1036
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1037
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1038
+ else:
1039
+ encoder_attention_mask = None
1040
+
1041
+ # Prepare head mask if needed
1042
+ # 1.0 in head_mask indicate we keep the head
1043
+ # attention_probs has shape bsz x n_heads x N x N
1044
+ # head_mask has shape n_layer x batch x n_heads x N x N
1045
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
1046
+
1047
+ if inputs_embeds is None:
1048
+ inputs_embeds = self.wte(input_ids)
1049
+ position_embeds = self.wpe(position_ids)
1050
+ hidden_states = inputs_embeds + position_embeds
1051
+
1052
+ if token_type_ids is not None:
1053
+ token_type_embeds = self.wte(token_type_ids)
1054
+ hidden_states = hidden_states + token_type_embeds
1055
+
1056
+ hidden_states = self.drop(hidden_states)
1057
+
1058
+ output_shape = input_shape + (hidden_states.size(-1),)
1059
+
1060
+ presents = () if use_cache else None
1061
+ all_self_attentions = () if output_attentions else None
1062
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1063
+ all_hidden_states = () if output_hidden_states else None
1064
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
1065
+
1066
+ # Model parallel
1067
+ if self.model_parallel:
1068
+ torch.cuda.set_device(hidden_states.device)
1069
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
1070
+ if layer_past is not None:
1071
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
1072
+ # Ensure that attention_mask is always on the same device as hidden_states
1073
+ if attention_mask is not None:
1074
+ attention_mask = attention_mask.to(hidden_states.device)
1075
+ if isinstance(head_mask, torch.Tensor):
1076
+ head_mask = head_mask.to(hidden_states.device)
1077
+ if output_hidden_states:
1078
+ all_hidden_states = all_hidden_states + (hidden_states,)
1079
+
1080
+ if self.gradient_checkpointing and self.training:
1081
+
1082
+ if use_cache:
1083
+ logger.warning(
1084
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1085
+ )
1086
+ use_cache = False
1087
+
1088
+ def create_custom_forward(module):
1089
+ def custom_forward(*inputs):
1090
+ # None for past_key_value
1091
+ return module(*inputs, use_cache, output_attentions)
1092
+
1093
+ return custom_forward
1094
+
1095
+ outputs = torch.utils.checkpoint.checkpoint(
1096
+ create_custom_forward(block),
1097
+ hidden_states,
1098
+ None,
1099
+ attention_mask,
1100
+ head_mask[i],
1101
+ encoder_hidden_states,
1102
+ encoder_attention_mask,
1103
+ self.alibi
1104
+ )
1105
+ else:
1106
+ outputs = block(
1107
+ hidden_states,
1108
+ layer_past=layer_past,
1109
+ attention_mask=attention_mask,
1110
+ head_mask=head_mask[i],
1111
+ encoder_hidden_states=encoder_hidden_states,
1112
+ encoder_attention_mask=encoder_attention_mask,
1113
+ use_cache=use_cache,
1114
+ output_attentions=output_attentions,
1115
+ alibi=self.alibi
1116
+ )
1117
+
1118
+ hidden_states = outputs[0]
1119
+ if use_cache is True:
1120
+ presents = presents + (outputs[1],)
1121
+
1122
+ if output_attentions:
1123
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
1124
+ if self.config.add_cross_attention:
1125
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
1126
+
1127
+ # Model Parallel: If it's the last layer for that device, put things on the next device
1128
+ if self.model_parallel:
1129
+ for k, v in self.device_map.items():
1130
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
1131
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
1132
+
1133
+ hidden_states = self.ln_f(hidden_states)
1134
+
1135
+ hidden_states = hidden_states.view(*output_shape)
1136
+ # Add last hidden state
1137
+ if output_hidden_states:
1138
+ all_hidden_states = all_hidden_states + (hidden_states,)
1139
+
1140
+ if not return_dict:
1141
+ return tuple(
1142
+ v
1143
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
1144
+ if v is not None
1145
+ )
1146
+
1147
+ return BaseModelOutputWithPastAndCrossAttentions(
1148
+ last_hidden_state=hidden_states,
1149
+ past_key_values=presents,
1150
+ hidden_states=all_hidden_states,
1151
+ attentions=all_self_attentions,
1152
+ cross_attentions=all_cross_attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
1159
+ embeddings).
1160
+ """,
1161
+ GPT2_START_DOCSTRING,
1162
+ )
1163
+ class GPT2LMHeadModel(GPT2PreTrainedModel):
1164
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
1165
+
1166
+ def __init__(self, config):
1167
+ super().__init__(config)
1168
+ self.transformer = GPT2Model(config)
1169
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1170
+
1171
+ self.init_weights()
1172
+
1173
+ # Model parallel
1174
+ self.model_parallel = False
1175
+ self.device_map = None
1176
+
1177
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1178
+ def parallelize(self, device_map=None):
1179
+ self.device_map = (
1180
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1181
+ if device_map is None
1182
+ else device_map
1183
+ )
1184
+ assert_device_map(self.device_map, len(self.transformer.h))
1185
+ self.transformer.parallelize(self.device_map)
1186
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1187
+ self.model_parallel = True
1188
+
1189
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1190
+ def deparallelize(self):
1191
+ self.transformer.deparallelize()
1192
+ self.transformer = self.transformer.to("cpu")
1193
+ self.lm_head = self.lm_head.to("cpu")
1194
+ self.model_parallel = False
1195
+ torch.cuda.empty_cache()
1196
+
1197
+ def get_output_embeddings(self):
1198
+ return self.lm_head
1199
+
1200
+ def set_output_embeddings(self, new_embeddings):
1201
+ self.lm_head = new_embeddings
1202
+
1203
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1204
+ token_type_ids = kwargs.get("token_type_ids", None)
1205
+ # only last token for inputs_ids if past is defined in kwargs
1206
+ if past:
1207
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1208
+ if token_type_ids is not None:
1209
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1210
+
1211
+ attention_mask = kwargs.get("attention_mask", None)
1212
+ position_ids = kwargs.get("position_ids", None)
1213
+
1214
+ if attention_mask is not None and position_ids is None:
1215
+ # create position_ids on the fly for batch generation
1216
+ position_ids = attention_mask.long().cumsum(-1) - 1
1217
+ position_ids.masked_fill_(attention_mask == 0, 1)
1218
+ if past:
1219
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1220
+ else:
1221
+ position_ids = None
1222
+ return {
1223
+ "input_ids": input_ids,
1224
+ "past_key_values": past,
1225
+ "use_cache": kwargs.get("use_cache"),
1226
+ "position_ids": position_ids,
1227
+ "attention_mask": attention_mask,
1228
+ "token_type_ids": token_type_ids,
1229
+ }
1230
+
1231
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1232
+ @add_code_sample_docstrings(
1233
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1234
+ checkpoint=_CHECKPOINT_FOR_DOC,
1235
+ output_type=CausalLMOutputWithCrossAttentions,
1236
+ config_class=_CONFIG_FOR_DOC,
1237
+ )
1238
+ def forward(
1239
+ self,
1240
+ input_ids=None,
1241
+ past_key_values=None,
1242
+ attention_mask=None,
1243
+ token_type_ids=None,
1244
+ position_ids=None,
1245
+ head_mask=None,
1246
+ inputs_embeds=None,
1247
+ encoder_hidden_states=None,
1248
+ encoder_attention_mask=None,
1249
+ labels=None,
1250
+ use_cache=None,
1251
+ output_attentions=None,
1252
+ output_hidden_states=None,
1253
+ return_dict=None,
1254
+ ):
1255
+ r"""
1256
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1257
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1258
+ ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
1259
+ ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
1260
+ """
1261
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1262
+
1263
+ transformer_outputs = self.transformer(
1264
+ input_ids,
1265
+ past_key_values=past_key_values,
1266
+ attention_mask=attention_mask,
1267
+ token_type_ids=token_type_ids,
1268
+ position_ids=position_ids,
1269
+ head_mask=head_mask,
1270
+ inputs_embeds=inputs_embeds,
1271
+ encoder_hidden_states=encoder_hidden_states,
1272
+ encoder_attention_mask=encoder_attention_mask,
1273
+ use_cache=use_cache,
1274
+ output_attentions=output_attentions,
1275
+ output_hidden_states=output_hidden_states,
1276
+ return_dict=return_dict,
1277
+ )
1278
+ hidden_states = transformer_outputs[0]
1279
+
1280
+ # Set device for model parallelism
1281
+ if self.model_parallel:
1282
+ torch.cuda.set_device(self.transformer.first_device)
1283
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1284
+
1285
+ lm_logits = self.lm_head(hidden_states)
1286
+
1287
+ loss = None
1288
+ if labels is not None:
1289
+ # Shift so that tokens < n predict n
1290
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1291
+ shift_labels = labels[..., 1:].contiguous()
1292
+ # Flatten the tokens
1293
+ loss_fct = CrossEntropyLoss()
1294
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1295
+
1296
+ if not return_dict:
1297
+ output = (lm_logits,) + transformer_outputs[1:]
1298
+ return ((loss,) + output) if loss is not None else output
1299
+
1300
+ return CausalLMOutputWithCrossAttentions(
1301
+ loss=loss,
1302
+ logits=lm_logits,
1303
+ past_key_values=transformer_outputs.past_key_values,
1304
+ hidden_states=transformer_outputs.hidden_states,
1305
+ attentions=transformer_outputs.attentions,
1306
+ cross_attentions=transformer_outputs.cross_attentions,
1307
+ )
1308
+
1309
+ @staticmethod
1310
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1311
+ """
1312
+ This function is used to re-order the :obj:`past_key_values` cache if
1313
+ :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
1314
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
1315
+ """
1316
+ return tuple(
1317
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1318
+ for layer_past in past
1319
+ )
1320
+
1321
+
1322
+ @add_start_docstrings(
1323
+ """
1324
+ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
1325
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
1326
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
1327
+ input sequence).
1328
+ """,
1329
+ GPT2_START_DOCSTRING,
1330
+ )
1331
+ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
1332
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
1333
+
1334
+ def __init__(self, config):
1335
+ super().__init__(config)
1336
+ config.num_labels = 1
1337
+ self.transformer = GPT2Model(config)
1338
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1339
+ self.multiple_choice_head = SequenceSummary(config)
1340
+
1341
+ self.init_weights()
1342
+
1343
+ # Model parallel
1344
+ self.model_parallel = False
1345
+ self.device_map = None
1346
+
1347
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1348
+ def parallelize(self, device_map=None):
1349
+ self.device_map = (
1350
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1351
+ if device_map is None
1352
+ else device_map
1353
+ )
1354
+ assert_device_map(self.device_map, len(self.transformer.h))
1355
+ self.transformer.parallelize(self.device_map)
1356
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1357
+ self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device)
1358
+ self.model_parallel = True
1359
+
1360
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1361
+ def deparallelize(self):
1362
+ self.transformer.deparallelize()
1363
+ self.transformer = self.transformer.to("cpu")
1364
+ self.lm_head = self.lm_head.to("cpu")
1365
+ self.multiple_choice_head = self.multiple_choice_head.to("cpu")
1366
+ self.model_parallel = False
1367
+ torch.cuda.empty_cache()
1368
+
1369
+ def get_output_embeddings(self):
1370
+ return self.lm_head
1371
+
1372
+ def set_output_embeddings(self, new_embeddings):
1373
+ self.lm_head = new_embeddings
1374
+
1375
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1376
+ token_type_ids = kwargs.get("token_type_ids", None)
1377
+ # only last token for inputs_ids if past is defined in kwargs
1378
+ if past:
1379
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1380
+ if token_type_ids is not None:
1381
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1382
+
1383
+ attention_mask = kwargs.get("attention_mask", None)
1384
+ position_ids = kwargs.get("position_ids", None)
1385
+
1386
+ if attention_mask is not None and position_ids is None:
1387
+ # create position_ids on the fly for batch generation
1388
+ position_ids = attention_mask.long().cumsum(-1) - 1
1389
+ position_ids.masked_fill_(attention_mask == 0, 1)
1390
+ if past:
1391
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1392
+ else:
1393
+ position_ids = None
1394
+
1395
+ return {
1396
+ "input_ids": input_ids,
1397
+ "past_key_values": past,
1398
+ "use_cache": kwargs.get("use_cache"),
1399
+ "position_ids": position_ids,
1400
+ "attention_mask": attention_mask,
1401
+ "token_type_ids": token_type_ids,
1402
+ }
1403
+
1404
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1405
+ @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
1406
+ def forward(
1407
+ self,
1408
+ input_ids=None,
1409
+ past_key_values=None,
1410
+ attention_mask=None,
1411
+ token_type_ids=None,
1412
+ position_ids=None,
1413
+ head_mask=None,
1414
+ inputs_embeds=None,
1415
+ mc_token_ids=None,
1416
+ labels=None,
1417
+ mc_labels=None,
1418
+ use_cache=None,
1419
+ output_attentions=None,
1420
+ output_hidden_states=None,
1421
+ return_dict=None,
1422
+ **kwargs,
1423
+ ):
1424
+ r"""
1425
+ mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
1426
+ Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1427
+ 1[``.
1428
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1429
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1430
+ ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size - 1]`` All labels set to
1431
+ ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size - 1]``
1432
+ mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
1433
+ Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
1434
+ num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
1435
+ `input_ids` above)
1436
+
1437
+ Return:
1438
+
1439
+ Example::
1440
+
1441
+ >>> import torch
1442
+ >>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
1443
+
1444
+ >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
1445
+ >>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
1446
+
1447
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
1448
+ >>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
1449
+
1450
+ >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
1451
+
1452
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
1453
+ >>> encoded_choices = [tokenizer.encode(s) for s in choices]
1454
+ >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
1455
+
1456
+ >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
1457
+ >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
1458
+
1459
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
1460
+ >>> lm_logits = outputs.logits
1461
+ >>> mc_logits = outputs.mc_logits
1462
+
1463
+ """
1464
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1465
+
1466
+ transformer_outputs = self.transformer(
1467
+ input_ids,
1468
+ past_key_values=past_key_values,
1469
+ attention_mask=attention_mask,
1470
+ token_type_ids=token_type_ids,
1471
+ position_ids=position_ids,
1472
+ head_mask=head_mask,
1473
+ inputs_embeds=inputs_embeds,
1474
+ use_cache=use_cache,
1475
+ output_attentions=output_attentions,
1476
+ output_hidden_states=output_hidden_states,
1477
+ return_dict=return_dict,
1478
+ )
1479
+
1480
+ hidden_states = transformer_outputs[0]
1481
+
1482
+ # Set device for model parallelism
1483
+ if self.model_parallel:
1484
+ torch.cuda.set_device(self.transformer.first_device)
1485
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1486
+
1487
+ lm_logits = self.lm_head(hidden_states)
1488
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
1489
+
1490
+ mc_loss = None
1491
+ if mc_labels is not None:
1492
+ loss_fct = CrossEntropyLoss()
1493
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
1494
+ lm_loss = None
1495
+ if labels is not None:
1496
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1497
+ shift_labels = labels[..., 1:].contiguous()
1498
+ loss_fct = CrossEntropyLoss()
1499
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1500
+
1501
+ if not return_dict:
1502
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
1503
+ if mc_loss is not None:
1504
+ output = (mc_loss,) + output
1505
+ return ((lm_loss,) + output) if lm_loss is not None else output
1506
+
1507
+ return GPT2DoubleHeadsModelOutput(
1508
+ loss=lm_loss,
1509
+ mc_loss=mc_loss,
1510
+ logits=lm_logits,
1511
+ mc_logits=mc_logits,
1512
+ past_key_values=transformer_outputs.past_key_values,
1513
+ hidden_states=transformer_outputs.hidden_states,
1514
+ attentions=transformer_outputs.attentions,
1515
+ )
1516
+
1517
+ @staticmethod
1518
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1519
+ """
1520
+ This function is used to re-order the :obj:`past_key_values` cache if
1521
+ :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
1522
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
1523
+ """
1524
+ return tuple(
1525
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1526
+ for layer_past in past
1527
+ )
1528
+
1529
+
1530
+ @add_start_docstrings(
1531
+ """
1532
+ The GPT2 Model transformer with a sequence classification head on top (linear layer).
1533
+
1534
+ :class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
1535
+ other causal models (e.g. GPT-1) do.
1536
+
1537
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1538
+ :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
1539
+ row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
1540
+ guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
1541
+ the last value in each row of the batch).
1542
+ """,
1543
+ GPT2_START_DOCSTRING,
1544
+ )
1545
+ class GPT2ForSequenceClassification(GPT2PreTrainedModel):
1546
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
1547
+
1548
+ def __init__(self, config):
1549
+ super().__init__(config)
1550
+ self.num_labels = config.num_labels
1551
+ self.transformer = GPT2Model(config)
1552
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1553
+
1554
+ self.init_weights()
1555
+
1556
+ # Model parallel
1557
+ self.model_parallel = False
1558
+ self.device_map = None
1559
+
1560
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1561
+ @add_code_sample_docstrings(
1562
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1563
+ checkpoint="microsoft/DialogRPT-updown",
1564
+ output_type=SequenceClassifierOutputWithPast,
1565
+ config_class=_CONFIG_FOR_DOC,
1566
+ )
1567
+ def forward(
1568
+ self,
1569
+ input_ids=None,
1570
+ past_key_values=None,
1571
+ attention_mask=None,
1572
+ token_type_ids=None,
1573
+ position_ids=None,
1574
+ head_mask=None,
1575
+ inputs_embeds=None,
1576
+ labels=None,
1577
+ use_cache=None,
1578
+ output_attentions=None,
1579
+ output_hidden_states=None,
1580
+ return_dict=None,
1581
+ ):
1582
+ r"""
1583
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1584
+ Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
1585
+ config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1586
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1587
+ """
1588
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1589
+
1590
+ transformer_outputs = self.transformer(
1591
+ input_ids,
1592
+ past_key_values=past_key_values,
1593
+ attention_mask=attention_mask,
1594
+ token_type_ids=token_type_ids,
1595
+ position_ids=position_ids,
1596
+ head_mask=head_mask,
1597
+ inputs_embeds=inputs_embeds,
1598
+ use_cache=use_cache,
1599
+ output_attentions=output_attentions,
1600
+ output_hidden_states=output_hidden_states,
1601
+ return_dict=return_dict,
1602
+ )
1603
+ hidden_states = transformer_outputs[0]
1604
+ logits = self.score(hidden_states)
1605
+
1606
+ if input_ids is not None:
1607
+ batch_size, sequence_length = input_ids.shape[:2]
1608
+ else:
1609
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1610
+
1611
+ assert (
1612
+ self.config.pad_token_id is not None or batch_size == 1
1613
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1614
+ if self.config.pad_token_id is None:
1615
+ sequence_lengths = -1
1616
+ else:
1617
+ if input_ids is not None:
1618
+ sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
1619
+ else:
1620
+ sequence_lengths = -1
1621
+ logger.warning(
1622
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1623
+ f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1624
+ )
1625
+
1626
+ pooled_logits = logits[range(batch_size), sequence_lengths]
1627
+
1628
+ loss = None
1629
+ if labels is not None:
1630
+ if self.num_labels == 1:
1631
+ # We are doing regression
1632
+ loss_fct = MSELoss()
1633
+ loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
1634
+ else:
1635
+ loss_fct = CrossEntropyLoss()
1636
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1637
+
1638
+ if not return_dict:
1639
+ output = (pooled_logits,) + transformer_outputs[1:]
1640
+ return ((loss,) + output) if loss is not None else output
1641
+
1642
+ return SequenceClassifierOutputWithPast(
1643
+ loss=loss,
1644
+ logits=pooled_logits,
1645
+ past_key_values=transformer_outputs.past_key_values,
1646
+ hidden_states=transformer_outputs.hidden_states,
1647
+ attentions=transformer_outputs.attentions,
1648
+ )
1649
+
1650
+
1651
+ @add_start_docstrings(
1652
+ """
1653
+ GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1654
+ Named-Entity-Recognition (NER) tasks.
1655
+ """,
1656
+ GPT2_START_DOCSTRING,
1657
+ )
1658
+ class GPT2ForTokenClassification(GPT2PreTrainedModel):
1659
+ def __init__(self, config):
1660
+ super().__init__(config)
1661
+ self.num_labels = config.num_labels
1662
+
1663
+ self.transformer = GPT2Model(config)
1664
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1665
+ classifier_dropout = config.classifier_dropout
1666
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1667
+ classifier_dropout = config.hidden_dropout
1668
+ else:
1669
+ classifier_dropout = 0.1
1670
+ self.dropout = nn.Dropout(classifier_dropout)
1671
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1672
+
1673
+ self.init_weights()
1674
+
1675
+ # Model parallel
1676
+ self.model_parallel = False
1677
+ self.device_map = None
1678
+
1679
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1680
+ @add_code_sample_docstrings(
1681
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1682
+ checkpoint="microsoft/DialogRPT-updown",
1683
+ output_type=TokenClassifierOutput,
1684
+ config_class=_CONFIG_FOR_DOC,
1685
+ )
1686
+ def forward(
1687
+ self,
1688
+ input_ids=None,
1689
+ past_key_values=None,
1690
+ attention_mask=None,
1691
+ token_type_ids=None,
1692
+ position_ids=None,
1693
+ head_mask=None,
1694
+ inputs_embeds=None,
1695
+ labels=None,
1696
+ use_cache=None,
1697
+ output_attentions=None,
1698
+ output_hidden_states=None,
1699
+ return_dict=None,
1700
+ ):
1701
+ r"""
1702
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1703
+ Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
1704
+ config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1705
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1706
+ """
1707
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1708
+
1709
+ transformer_outputs = self.transformer(
1710
+ input_ids,
1711
+ past_key_values=past_key_values,
1712
+ attention_mask=attention_mask,
1713
+ token_type_ids=token_type_ids,
1714
+ position_ids=position_ids,
1715
+ head_mask=head_mask,
1716
+ inputs_embeds=inputs_embeds,
1717
+ use_cache=use_cache,
1718
+ output_attentions=output_attentions,
1719
+ output_hidden_states=output_hidden_states,
1720
+ return_dict=return_dict,
1721
+ )
1722
+
1723
+ hidden_states = transformer_outputs[0]
1724
+ hidden_states = self.dropout(hidden_states)
1725
+ logits = self.classifier(hidden_states)
1726
+
1727
+ loss = None
1728
+ if labels is not None:
1729
+ loss_fct = CrossEntropyLoss()
1730
+ # Only keep active parts of the loss
1731
+ if attention_mask is not None:
1732
+ active_loss = attention_mask.view(-1) == 1
1733
+ active_logits = logits.view(-1, self.num_labels)
1734
+ active_labels = torch.where(
1735
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1736
+ )
1737
+ loss = loss_fct(active_logits, active_labels)
1738
+ else:
1739
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1740
+
1741
+ if not return_dict:
1742
+ output = (logits,) + transformer_outputs[2:]
1743
+ return ((loss,) + output) if loss is not None else output
1744
+
1745
+ return TokenClassifierOutput(
1746
+ loss=loss,
1747
+ logits=logits,
1748
+ hidden_states=transformer_outputs.hidden_states,
1749
+ attentions=transformer_outputs.attentions,
1750
+ )
bigscience/jz/.gitignore ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # Slurm job output and error
132
+ *.err
133
+ *.out
bigscience/jz/README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # jay-z
2
+
3
+ Jean Zay aka JZ pronounced "Jay-Z"
4
+
5
+ This section of the repo is all about how things are done on JZ.
6
+
7
+ Main documents:
8
+
9
+ - [Compute Resources](./compute-resources.md)
10
+ - [JZ Specs](./hpc-specs.md)
11
+ - [Framework-specific notes](./frameworks/)
12
+ - [Model-specific Instructions](./archs/)
13
+
14
+ Code:
15
+ - [Work Env and Setup](./envs/README.md)
16
+ - [SLURM scripts](./scripts/)
17
+ - [Config files](./configs/)
18
+
19
+ Tools:
20
+ - [SLURM HowTo](./slurm/)
21
+ - [Various Tools](./tools/)
22
+
23
+ General JZ Docs:
24
+
25
+ - HF Internal: https://github.com/huggingface/conf/wiki/JZ
26
+ - Official: http://www.idris.fr/eng/jean-zay/
27
+ - Collaborative doc: https://jean-zay-doc.readthedocs.io/en/latest/
bigscience/jz/compute-resources.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Compute Resources
2
+
3
+ ## Login Instance
4
+
5
+ This is the shell you get into when ssh'ng from outside
6
+
7
+ - Networked (except ssh to outside)
8
+ - 1 core per user
9
+ - 5 GB of RAM per user
10
+ - 30 min of CPU time per process
11
+
12
+ ## Pre/post processing Instance
13
+
14
+ Activated with `--partition=prepost`
15
+
16
+ - Networked
17
+ - only 4 nodes
18
+ - 2 to 20 hours
19
+ - No limitations of the login shell
20
+ - 1x V100-16GB
21
+ - The computing hours are not deducted from your allocation
22
+
23
+ to request:
24
+ ```
25
+ srun --pty --partition=prepost --account=six@cpu --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=1:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
26
+ ```
27
+
28
+ or to work interactively there, `srun` into the box (though no control which of the 4 you get):
29
+
30
+ ```
31
+ srun -p prepost -A six@cpu --time=20:00:00 --pty bash
32
+ ```
33
+
34
+ To choose a specific box (if some are too overload by other users), one could ssh directly to that partition via:
35
+ ```
36
+ ssh jean-zay-pp # from inside
37
+ ssh jean-zay-pp.idris.fr # from outside
38
+ ```
39
+ There are 4 boxes, so `jean-zay-pp1`, ..., `jean-zay-pp4`. It's possible that larger numbers have less users, but not necessarily.
40
+
41
+ In this case there is no need to do SLURM.
42
+
43
+ But in this approach only 30min will be given before any running process will be killed. Just like the login shell. I think the only difference is more CPU usage is given here before the process is killed than on the login shell.
44
+
45
+ Note: `--partition=compil` too has internet, but can't ssh there.
46
+
47
+ In general the `compil` partition is usually less busy than `prepost`.
48
+
49
+
50
+ ## GPU Instances
51
+
52
+ - No network to outside world
53
+ - 160 GB of usable memory. The memory allocation is 4 GB per reserved CPU core if hyperthreading is deactivated (`--hint=nomultithread`). So max per node is `--cpus-per-task=40`
54
+
55
+ To select this type of partition use `--account=six@gpu`.
56
+
57
+
58
+ ## CPU Instances
59
+
60
+ - All cpus of the same partition are the same
61
+ - Different partitions are likely to have different cpus
62
+
63
+ For example on `gpu_p1` partitions (4x v100-32gb)
64
+
65
+ ```
66
+ $ lscpu | grep name
67
+ Model name: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
68
+ ```
69
+
70
+ To select this type of partition use `--account=six@cpu`.
71
+
72
+
73
+ ## Quotas
74
+
75
+ Group/project (`six`):
76
+
77
+ - `$six_ALL_CCFRSCRATCH` - 400TB / ??? inodes fastest (full SSD), → files removed after 30 days without access
78
+ - `$six_ALL_CCFRWORK` - 25TB / 500k inodes (slower than SCRATCH) → sources, constantly used input/output files
79
+ - `$six_ALL_CCFRSTORE` - 100TB / 100k inodes (slow) → for long term storage in tar files (very few inodes!)
80
+ - `/gpfsssd/worksf/projects/rech/six/commun/` - 1TB / 3M inodes → for conda and python git clones that take tens of thousands of inodes
81
+
82
+ Personal:
83
+
84
+ - `$HOME` - 3GB / 150k inodes (for small files)
85
+ - `$SCRATCH` - fastest (full SSD), no quota, files removed after 30 days without access
86
+ - `$WORK` - Shared with the `$six_ALL_CCFRWORK` quota, that is `du -sh $six_ALL_CCFRWORK/..`
87
+ - `$STORE` - Shared with the `$six_ALL_CCFRSTORE` quota, that is `du -sh $six_ALL_CCFRSTORE/..`
88
+
89
+ Note that WORK and STORE group quotas of the project include all project's users' WORK and STORE usage correspondingly.
90
+
91
+ [Detailed information](http://www.idris.fr/eng/jean-zay/cpu/jean-zay-cpu-calculateurs-disques-eng.html)
92
+
93
+ Checking usage:
94
+ ```
95
+ idrquota -m # $HOME @ user
96
+ idrquota -s -p six # $STORE @ shared (this is updated every 30min)
97
+ idrquota -w -p six # $WORK @ shared
98
+ ```
99
+
100
+
101
+ if you prefer it the easy way here is an alias to add to `~/.bashrc`:
102
+ ```
103
+ alias dfi=' \
104
+ echo \"*** Total \(six\) ***\"; \
105
+ idrquota -w -p six; \
106
+ idrquota -s -p six; \
107
+ echo SCRATCH: $(du -hs /gpfsscratch/rech/six/ | cut -f1) \(out of 400TB\); \
108
+ echo WORKSF: $(du -hs /gpfsssd/worksf/projects/rech/six | cut -f1) \(out of 2TB\); \
109
+ echo WORKSF: $(du -hs --inodes /gpfsssd/worksf/projects/rech/six | cut -f1) inodes \(out of 3M\); \
110
+ echo; \
111
+ echo \"*** Personal ***\"; \
112
+ idrquota -m; \
113
+ echo WORK: $(du -hs $WORK | cut -f1); \
114
+ echo WORK: $(du -hs --inodes $WORK | cut -f1) inodes; \
115
+ echo STORE: $(du -hs $STORE | cut -f1); \
116
+ echo STORE: $(du -hs --inodes $STORE | cut -f1) inodes; \
117
+ echo SCRATCH: $(du -hs $SCRATCH | cut -f1); \
118
+ echo SCRATCH: $(du -hs --inodes $SCRATCH | cut -f1) inodes; \
119
+ '
120
+ ```
121
+ This includes the report on usage of personal WORK and SCRATCH partitions.
122
+
123
+
124
+
125
+ ## Directories
126
+
127
+ - `$six_ALL_CCFRSCRATCH` - for checkpoints - make sure to copy important ones to WORK or tarball to STORE
128
+ - `$six_ALL_CCFRWORK` - for everything else
129
+ - `$six_ALL_CCFRSTORE` - for long term storage in tar files (very few inodes!)
130
+ - `/gpfsssd/worksf/projects/rech/six/commun/` - for conda and python git clones that take tens of thousands of inodes - it's a small partition with a huge number of inodes. 1TB and 3M inodes.
131
+ XXX: update this and above once env var was created.
132
+
133
+
134
+ More specifically:
135
+
136
+ - `$six_ALL_CCFRWORK/cache_dir` - `CACHE_DIR` points here
137
+ - `$six_ALL_CCFRWORK/checkpoints` - symlink to `$six_ALL_CCFRWORK/checkpoints` - point slurm scripts here
138
+ - `$six_ALL_CCFRWORK/code` - clones of repos we use as source (`transformers`, `megatron-lm`, etc.)
139
+ - `$six_ALL_CCFRWORK/conda` - our production conda environment
140
+ - `$six_ALL_CCFRWORK/datasets` - cached datasets (normally under `~/.cache/huggingface/datasets`)
141
+ - `$six_ALL_CCFRWORK/datasets-custom` - Manually created datasets are here (do not delete these - some take many hours to build):
142
+ - `$six_ALL_CCFRWORK/downloads` - (normally under `~/.cache/huggingface/downloads`)
143
+ - `$six_ALL_CCFRWORK/envs` - custom scripts to create easy to use environments
144
+ - `$six_ALL_CCFRWORK/models-custom` - manually created or converted models
145
+ - `$six_ALL_CCFRWORK/modules` - (normally under `~/.cache/huggingface/modules`)
146
+
147
+
148
+
149
+ ## Diagnosing the Lack of Disc Space
150
+
151
+ To help diagnose the situations when we are short of disc space here are some tools:
152
+
153
+ Useful commands:
154
+
155
+ * Get current dir's sub-dir usage breakdown sorted by highest usage first:
156
+ ```
157
+ du -ahd1 | sort -rh
158
+ ```
159
+
160
+ * Check that users don't consume too much of their personal `$WORK` space, which goes towards the total WORK space limit.
161
+
162
+ ```
163
+ du -ahd1 $six_ALL_CCFRWORK/.. | sort -rh
164
+ ```
165
+
166
+
167
+ ## Efficient tar-balling to STORE
168
+
169
+ When short on space you don't want to create large tarballs in the WORK dir, instead tar directly to the destination, e.g.
170
+
171
+ e.g. w/o gzip since we already have arrow binary files
172
+
173
+ ```
174
+ mkdir -p $six_ALL_CCFRSTORE/datasets
175
+ cd $six_ALL_CCFRWORK/datasets
176
+ tar -cvf $six_ALL_CCFRSTORE/datasets/openwebtext.tar openwebtext
177
+ ```
178
+
179
+
180
+ e.g. w/ gzip for non-binary data
181
+ ```
182
+ tar -czvf $six_ALL_CCFRSTORE/datasets/openwebtext.tgz openwebtext
183
+ ```
184
+
185
+ If the file is large and takes some resources to build, `tar` will get killed, in such case you can't do it from the login instance and have to use one of the beefier instances. e.g.:
186
+ ```
187
+ srun --pty --nodes=1 --ntasks=1 -A six@cpu --cpus-per-task=40 --hint=nomultithread --time=2:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
188
+ tar ...
189
+ ```
190
+ and if that's not enough do a slurm job
bigscience/jz/configs/dec_only_t5/decoder_only_t5-large.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 5120,
6
+ "d_kv": 64,
7
+ "d_model": 1280,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": true,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 20,
15
+ "num_layers": 36,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
bigscience/jz/configs/dec_only_t5/decoder_only_t5-medium.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 4096,
6
+ "d_kv": 64,
7
+ "d_model": 1024,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": true,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 16,
15
+ "num_layers": 24,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
bigscience/jz/configs/dec_only_t5/decoder_only_t5-small.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 3072,
6
+ "d_kv": 64,
7
+ "d_model": 768,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": false,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 12,
15
+ "num_layers": 12,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
bigscience/jz/envs/README.md ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Work Environment Info
2
+
3
+
4
+ ## Users and Accounts
5
+
6
+ **Accounts:**
7
+
8
+ - `six` - the BigScience allocation - our main allocation
9
+ - `ajs` - original dynamic access allocations - use it if you can as we still have resources there - but it will give low priority on scheduling - hence use primarily for jobs that can be bumped down in the queue for a few days.
10
+
11
+ To switch to `six` as the main project:
12
+ ```
13
+ idrproj -d six
14
+ ```
15
+ and logout/login.
16
+
17
+ Check which projects one belongs to: `idrproj`
18
+
19
+ **Users:**
20
+
21
+ Use `idracct six` to see which username belongs to which real person.
22
+
23
+
24
+ ## First time setup
25
+
26
+ Make sure that your `~/.bashrc` is executed on login by creating if you don't already have `~/.bash_profile` with contents:
27
+
28
+ ```
29
+ # if running bash
30
+ if [ -n "$BASH_VERSION" ]; then
31
+ # include .bashrc if it exists
32
+ if [ -f "$HOME/.bashrc" ]; then
33
+ . "$HOME/.bashrc"
34
+ fi
35
+ fi
36
+ ```
37
+
38
+ It of course could have other contents, but make sure the above is there.
39
+
40
+ Now add this to your `~/.bashrc` and run `bash` for the changes to take effect.
41
+
42
+ ```
43
+ # ~/.bashrc: executed by bash(1) for non-login shells.
44
+ [[ $- != *i* ]] && return
45
+
46
+ # Log in with correct group - relevant to all users as we have multiple groups we belong to
47
+ if [[ $(id -gn) != "six" ]]
48
+ then
49
+ newgrp six
50
+ exit
51
+ fi
52
+
53
+ # start production environment:
54
+ # this loads modules, conda and sets all the relevant env vars
55
+ alias start-prod="source $six_ALL_CCFRWORK/start-prod"
56
+
57
+ # our production conda env is here:
58
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
59
+
60
+ # SLURM / Account specific settings
61
+
62
+ # share dirs/files with the group
63
+ umask 0007
64
+
65
+ # specific caches
66
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
67
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
68
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
69
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
70
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
71
+
72
+ # shortcut
73
+ export PROD=$six_ALL_CCFRWORK
74
+
75
+ # handy shortcuts
76
+ alias myjobs="squeue -u `whoami`"
77
+
78
+ # our shared conda base
79
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
80
+ ```
81
+
82
+ note: wrt `newgrp six` - if you want to use it elsewhere and not `~/.bashrc` you may use this `newgrp - six` syntax instead, but don't use it in `~/.bashrc` or it will break many things.
83
+
84
+ Also since most of our work is at `$six_ALL_CCFRWORK` you may want to add symlinks:
85
+ ```
86
+ ln -s $six_ALL_CCFRWORK ~/prod
87
+ ln -s $six_ALL_CCFRSCRATCH ~/prod-scratch
88
+ ln -s $six_ALL_CCFRSTORE ~/prod-store
89
+ ln -s /gpfsssd/worksf/projects/rech/six/commun ~/prod-worksf
90
+ ```
91
+ and then you can quickly `cd` there w/o needing to type too much, and with the shortcut `$PROD` env var you now you can do one of 2 ways:
92
+ ```
93
+ cd ~/prod
94
+ cd $PROD
95
+ ```
96
+
97
+ Some users prefer to use the env vars, so let's try to not expect the symlinks to be there for everybody.
98
+
99
+ If you intend to use `gsutil`, add the following lines:
100
+
101
+ ```
102
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc'; fi
103
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc'; fi
104
+ ```
105
+
106
+ Without them, `gsutil` on Jean Zay fails with a hard-to-debug `TypeError: argument should be integer or bytes-like object, not 'str'` error.
107
+
108
+ ## Production environment
109
+
110
+ In order to use the production environment, run:
111
+
112
+ ```
113
+ start-prod
114
+ ```
115
+ which will:
116
+ - setup env vars
117
+ - configure nice git-prompt with lots of useful info built in
118
+ - load the right `module`s
119
+ - activate our custom production conda environment which has everything in it
120
+
121
+ so basically use it when running production scripts.
122
+
123
+ The alias should have been set in `~/.bashrc` as instructed above.
124
+
125
+ Note: the fancy [bash-git-prompt](https://github.com/magicmonty/bash-git-prompt) tells you which conda env you are in, and then which branch your are in and a ton of useful git enfo, and it was extended to tell you whether you're in the login instance (prefix `0-1`) or whether you're on a GPU instance where it then shows something like `4-40` - the 2 numbers stand for `${SLURM_NNODES}-${SLURM_CPUS_PER_TASK}` - so you know what `srun` configuration you're logged into (or the login shell where you get no nodes, with 0 gpus and 1 cpu hence `0-1`).
126
+
127
+ The production conda env `hf-prod` is too set up already, so you don't need to do anything, but here are some details on how it was done should you want to know.
128
+
129
+ Our production shared conda env is at `$six_ALL_CCFRWORK/conda`, you can make it visible by either doing this one:
130
+ ```
131
+ conda config --append envs_dirs $six_ALL_CCFRWORK/conda
132
+ ```
133
+ which will add this path to `~/.condarc` or use:
134
+ ```
135
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
136
+ ```
137
+ in your `~/.bashrc`.
138
+
139
+ You can use it for anything but please don't install anything into it (unless coordinating with others), as we want this to be a reliable environment for all to share.
140
+
141
+ Additionally you will most likely will want to do:
142
+
143
+ ```
144
+ mv ~/.conda ~/.conda-old
145
+ ln -s $six_ALL_CCFRWORK/.conda ~/.conda
146
+ ```
147
+
148
+ because otherwise conda will try to use your HOME dir which is only 3GB-large. You can then nuke `~/.conda-old` or move it elsewhere.
149
+
150
+
151
+
152
+
153
+ ## Creating production conda env
154
+
155
+ **Do not run any of the instructions in this section**. Please co-ordinate any changes to this environment on #bigscience-jz on slack since many users use it for their experiments. If you want to create your custom conda env, please read the following sections instead.
156
+
157
+ If the production environment got broken, here is how it can be re-built.
158
+
159
+ This should be done on a login instance, since we need the network.
160
+
161
+ ```
162
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
163
+
164
+ conda create -y -n hf-prod python=3.8
165
+ conda activate hf-prod
166
+
167
+ # pt-1.10.1 / cuda 11.3
168
+ conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
169
+ pip install deepspeed
170
+
171
+ cd $six_ALL_CCFRWORK/code/transformers
172
+ pip install -e .[dev]
173
+
174
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
175
+ pip install -r requirements.txt
176
+
177
+ cd $six_ALL_CCFRWORK/code/deepspeed
178
+ ./build.sh
179
+
180
+ # to build custom tokenizers make sure that if run on JZ your `~/.cargo/config.toml` contains the following:
181
+ [net]
182
+ git-fetch-with-cli = true
183
+
184
+ # if needed first:
185
+ # git clone https://github.com/huggingface/tokenizers $six_ALL_CCFRWORK/code/tokenizers
186
+ cd $six_ALL_CCFRWORK/code/tokenizers
187
+ git checkout bigscience_fork
188
+ module load rust
189
+ pip install setuptools_rust
190
+ pip install -e bindings/python
191
+ ```
192
+
193
+ while we are going to override some of these with our custom installs, we first install these normally to get all the dependencies right.
194
+
195
+ Then finally to build apex you need a non-login instance since it is very demanding on resources and such build on the login instance will get killed:
196
+
197
+ ```
198
+ srun --pty -A six@cpu --qos=qos_cpu-dev --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
199
+ cd $six_ALL_CCFRWORK/code/apex
200
+ ./build.sh
201
+ ```
202
+ Note: if using a no-gpu instance to build `apex` it will warn that it can't detect any GPUs but will cross-compile for several archs. But you could also tell it to build for V100 and A100 explicitly by simply adding the desired archs:
203
+
204
+ ```
205
+ TORCH_CUDA_ARCH_LIST="7.0 8.0" pip install ...
206
+ ```
207
+
208
+ ## Personal environment
209
+
210
+ You can use these dirs, which are your private spaces:
211
+
212
+ - `$WORK`
213
+ - `$SCRATCH`
214
+ - `$STORE`
215
+
216
+ So you probably want to mimic the production env,
217
+
218
+ We also agreed to use
219
+
220
+ ```
221
+ ln -s $WORK ~/user
222
+ ln -s $SCRATCH ~/user-scratch
223
+ ln -s $STORE ~/user-store
224
+ ```
225
+ and then you can quickly `cd` there w/o needing to type too much:
226
+ ```
227
+ cd ~/user
228
+ ```
229
+
230
+ Since we are going to use `~/user/...` in scripts, it now should be possible to re-use our scripts w/o modifying them. To change the script to use the production setup, it'll be just `s/user/prod/`.
231
+
232
+
233
+
234
+ ## Custom private conda env
235
+
236
+ First follow the instructions for [Production environment](production-environment) which should have already set up most things to make it very easy to add your custom conda env.
237
+
238
+ If wanting to work with variations of packages, create your own conda env, e.g. env `stas`:
239
+
240
+ ```
241
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
242
+
243
+ conda create -y -n stas python=3.8
244
+ conda activate stas
245
+ conda install pytorch torchvision cudatoolkit=11.3 -c pytorch-lts -c nvidia
246
+ pip install deepspeed
247
+
248
+ cd ~/user/code/transformers
249
+ pip install -e .[dev]
250
+
251
+ cd ~/user/code/Megatron-Deepspeed
252
+ pip install -r requirements.txt
253
+
254
+ cd ~/user/code/deepspeed
255
+ ./build.sh
256
+
257
+ cd ~/user/code/apex
258
+ ./build.sh
259
+ ```
260
+
261
+ See a special note on how to build apex in [Creating production conda env](creating-production-conda-env).
262
+
263
+
264
+ ## Login node
265
+
266
+ If the login node is heavily used by someone, one can switch to another node
267
+
268
+ `host jean-zay.idris.fr` will tell you which login nodes are currently in the alias
269
+ 
270
+ if the DNS round robin doesn't send you to another login node, you can target a specific login node (`jean-zayN.idris.fr` , with N from 1 to 5, though some might not be available so using the alias is always better)
271
+
272
+
273
+ ## Dealing with running out of disc space
274
+
275
+ Find out where disc space is used up:
276
+ ```
277
+ du -ahd1 $six_ALL_CCFRWORK | sort -rh
278
+ du -ahd1 $six_ALL_CCFRSTORE | sort -rh
279
+ ```
280
+
281
+ Find out where inodes are used up:
282
+ ```
283
+ du -ahd1 --inodes $six_ALL_CCFRWORK | sort -rh
284
+ du -ahd1 --inodes $six_ALL_CCFRSTORE | sort -rh
285
+ ```
286
+
287
+ Some busy git clones can be pruned of unused files with: `git gc`, e.g. to prune a dir with multiple-clones as sub-dirs:
288
+
289
+ ```
290
+ cd $six_ALL_CCFRWORK/code
291
+ du -hs .
292
+ du -hs --inodes .
293
+ find . -mindepth 1 -maxdepth 1 -type d -exec bash -c "cd '{}' && git gc" +
294
+ du -hs .
295
+ du -hs --inodes .
296
+ ```
297
+
298
+ ## Finding things
299
+
300
+ Our WORK is indexed by mlocate, after adding this alias:
301
+ ```
302
+ alias locate="/usr/bin/locate -d $ALL_CCFRWORK/lib/mlocate/work.db:$ALL_CCFRWORK/lib/mlocate/worksf.db"
303
+ ```
304
+ You can now do:
305
+ ```
306
+ locate -i megatron
307
+ ```
308
+ (remove `-i` if you want case-sensitive search)
309
+
310
+ the index is being updated by `$six_ALL_CCFRWORK/bin/mlocate-update` in a crontab job in `$six_ALL_CCFRWORK/cron/cron.daily/mlocate-update.slurm`.
311
+
312
+ For more details on the emulated crontab job see: [crontab](../crontab/README.md).
313
+
314
+
315
+ ## Syncing the perms
316
+
317
+ We use `umask 0007` in `~/.bashrc` to get the shared dirs have `g+rwx` perms, so that we can all operate on those, but it doesn't always help. When a tarball is extracted it will often retain the original perms on the files, so if those didn't have `w` for the group it'll remain as such. Therefore occasionally and especially after installing a new dataset please run:
318
+
319
+ We also need `g+s` on dirs, so that new dirs and files created in the sub-dir get created with the same group as the parent dir (e.g. important when `scp`-ing from outside, but also in many other cases).
320
+
321
+ Then note that `chgrp` removes the sgid bit, as it has to be restored immediately, so do not run it alone!
322
+
323
+ For some reason group perms go wrong at times. We need all files to be `g+wrxs` (dirs), `g+rw` (files), `six` (group name), so here is how to fix things back to normal:
324
+
325
+ ```
326
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
327
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
328
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
329
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
330
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
331
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
332
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
333
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
334
+ ```
335
+
336
+ If somehow we lost the sgid bit on some dirs, to restore just those:
337
+ ```
338
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
339
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
340
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
341
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
342
+ ```
343
+ albeit, the set of commands above should have already done the right thing, as they include `g+rwxs`.
344
+
345
+
346
+
347
+ ## Activate production script
348
+
349
+ This can be safely added at the beginning of slurm scripts:
350
+
351
+ ```
352
+ source $six_ALL_CCFRWORK/start-prod
353
+ ```
354
+
355
+ And if you made the symlink from your `$HOME`, interactively it's easier to remember to type:
356
+
357
+ ```
358
+ source $six_ALL_CCFRWORK/start-prod
359
+ ```
360
+
361
+
362
+
363
+ ## Building things from source
364
+
365
+
366
+ The building should happen on a beefy instance - or things just get killed
367
+
368
+ Normally use the free `-p compil` partition:
369
+
370
+ ```
371
+ srun --pty -A six@cpu -p compil --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
372
+ ```
373
+
374
+ if it doesn't yield use `idrsrv` ones by adding `-c 10` (10 cpu cores)
375
+ ```
376
+ srun --pty -A six@cpu -p compil -c 10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
377
+ ```
378
+
379
+ but if it has to be really fast, use a dedicated instance with pre-allocated cpu cores:
380
+ ```
381
+ srun --pty -A six@cpu --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
382
+ ```
383
+
384
+ same with 1 gpu if the build env requires one (neither `apex` nor `deepspeed` require one):
385
+ ```
386
+ srun --pty -A six@gpu --nodes=1 --ntasks=1 --cpus-per-task=10 --gres=gpu:1 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
387
+ ```
388
+
389
+ `/tmp` is tiny on gpu instances, at least apex needs a big `/tmp` folder:
390
+
391
+
392
+ Quick instructions (detailed listing follow):
393
+
394
+ ```
395
+ export TMPDIR=$six_ALL_CCFRWORK/tmp
396
+ mkdir -p $TMPDIR
397
+
398
+ cd $six_ALL_CCFRWORK/code/deepspeed
399
+ ./build.sh
400
+
401
+ cd $six_ALL_CCFRWORK/code/apex
402
+ ./build.sh
403
+ ```
404
+
405
+
406
+ ### deepspeed
407
+
408
+
409
+ To pre-build deepspeed (as compared to have it built via JIT at runtime):
410
+
411
+ ```
412
+ export TMPDIR=$six_ALL_CCFRWORK/tmp
413
+ mkdir -p $TMPDIR
414
+ cd $six_ALL_CCFRWORK/code/deepspeed
415
+ ./build.sh
416
+ ```
417
+
418
+ what's in the build:
419
+ ```
420
+ $ cat build.sh
421
+ #!/bin/bash
422
+
423
+ rm -rf build
424
+
425
+ time TORCH_CUDA_ARCH_LIST="7.0 8.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
426
+ ```
427
+
428
+ ### apex
429
+
430
+ To build apex (needed by megatron-lm):
431
+
432
+ build:
433
+ ```
434
+ cd $six_ALL_CCFRWORK/code/apex
435
+ ./build.sh
436
+ ```
437
+
438
+ what's in the build:
439
+ ```
440
+ $ cat build.sh
441
+ #!/bin/bash
442
+
443
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
444
+ ```
445
+
446
+ Note that since we are using pt/cuda-11.1 and JZ has cuda-11.2, apex won't build unless we skip the version check (which is totally not necessary - things work just fine), so should you reset the clone and removed the local patch, you can restore it with this diff: https://github.com/NVIDIA/apex/issues/988#issuecomment-726343453
447
+
448
+
449
+
450
+ ## Aliases
451
+
452
+ ```
453
+ # autogenerate the hostfile for deepspeed
454
+ # 1. deals with: SLURM_JOB_NODELIST in either of 2 formats:
455
+ # r10i1n8,r10i2n0
456
+ # r10i1n[7-8]
457
+ # 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node
458
+ #
459
+ # usage:
460
+ # makehostfile > hostfile
461
+ function makehostfile() {
462
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
463
+ $slots=4 if $slots==0; # workaround 4 gpu machines
464
+ while ($ENV{"SLURM_JOB_NODELIST"} =~ m/(\w+)(?:\[([\d-,]+)\])?,?/msg) {
465
+ $b=$1; $s=$2||q[""]; $s=~s/-/../g;
466
+ print map { "$b$_ slots=$slots\n" } eval $s }'
467
+ }
468
+ ```
469
+
470
+ ```
471
+ # auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7
472
+ # so here we want r10i1n3
473
+ function get_master_address() {
474
+ perl -le '$_=$ENV{"SLURM_JOB_NODELIST"}; s/,.*//; s/-.*//; s/\[//; print'
475
+ }
476
+ ```
477
+
478
+ Better solutions for the same as above:
479
+
480
+ ```
481
+ # autogenerate the hostfile for deepspeed
482
+ # 1. deals with: SLURM_JOB_NODELIST in either of 2 formats:
483
+ # r10i1n8,r10i2n0
484
+ # r10i1n[7-8]
485
+ # 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node
486
+ #
487
+ # usage:
488
+ # makehostfile > hostfile
489
+ function makehostfile() {
490
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
491
+ $slots=8 if $slots==0; # workaround 8 gpu machines
492
+ @nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}];
493
+ print map { "$b$_ slots=$slots\n" } @nodes'
494
+ }
495
+ ```
496
+
497
+ ```
498
+ # auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7
499
+ # so here we want r10i1n3
500
+ function get_master_address() {
501
+ echo $(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
502
+ }
503
+ ```
504
+
505
+
506
+ ## Troubleshooting
507
+
508
+ ### pip install
509
+
510
+ If it's trying to install into your local `~/.local` folder it's because `pip` is in that `$PATH` before
511
+ `$six_ALL_CCFRWORK/conda/hf-prod/bin/` - push the last one to be first - or best don't install any python things locally - use conda for that. Check with `which pip` - it should be under `$six_ALL_CCFRWORK/conda/hf-prod/bin/pip`.
512
+
513
+
514
+
515
+ ### Running `py-spy` diagnostics on multiple nodes at once
516
+
517
+ To do some monitoring of multiple nodes running an `srun` job:
518
+
519
+ (This is just an example of starting a job, most of the time it'll be running already:
520
+ ```
521
+ cd ~/prod/code/tr8b-104B/bigscience/train/tr11-200B-ml/
522
+
523
+ salloc --partition=gpu_p5 --constraint=a100 --nodes=48 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
524
+
525
+ bash 200B-n40-bf16-mono.slurm
526
+ ```
527
+
528
+ Then in another shell:
529
+
530
+ ```
531
+ squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"
532
+ srun --overlap --jobid=1729333 --gres=gpu:0 --nodes=48 --tasks-per-node=1 --output=trace-%N.out sh -c 'source $six_ALL_CCFRWORK/start-prod; pgrep -P $(pgrep -o python) | xargs -I {} py-spy dump --pid {}' || echo "failed"
533
+ ```
534
+
535
+ This will create a log file per node, e.g. `trace-jean-zay-iam52.out` which will contain the output of the command on that node.
536
+
537
+ Notes:
538
+ - adjust `--jobid` to the desired job (output of `squeue`). If using a job array and the job id looks like `1728318_2` first translate the virtual JobId into an actual JobID:
539
+ ```
540
+ scontrol show job 1728318_2 | perl -nle 'm/JobId=(\d+)/ && print $1'
541
+ ```
542
+ - adjust `--nodes=48` to match the same setting as the original `salloc` or `srun` command
543
+ - `--overlap` allows a new job to run on nodes allocated by another job.
544
+
545
+ `py-spy`-specific notes:
546
+
547
+ - run the command via `sh`. It may be possible to run `bash`, but I run into `py-spy: Permission denied` - it shouldn't need `sudo` but something in my bash dotfile triggers this problem, even though it doesn't happen if I run bash interactively.
548
+ - `pgrep -P $(pgrep -o python)` will give the immediate children of the launcher - 8 processes per node on A100 - which is what we want most of the time.
549
+ - if you want all children and grandchildren (e.g. dataloader helpers) - can be hundreds of processes! then use just `pgrep python`
550
+
551
+
552
+
553
+ #### using ds_ssh
554
+
555
+ It's a bit tricky and doesn't work for `py-spy` (see notes in the section above - it seems to do with `bash`'s dotfiles).
556
+
557
+
558
+ ```
559
+ salloc --partition=gpu_p5 --constraint=a100 --nodes=2 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
560
+ ```
561
+
562
+ ```
563
+ bash 20B-n2-fp16.slurm
564
+ ```
565
+
566
+ ```
567
+ function makehostfile() {
568
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
569
+ $slots=8 if $slots==0; # workaround 8 gpu machines
570
+ @nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}];
571
+ print map { "$b$_ slots=$slots\n" } @nodes'
572
+ }
573
+ makehostfile > hostfile
574
+ ```
575
+
576
+ ```
577
+ ds_ssh -f hostfile "source ~/.pdshrc; nvidia-smi"
578
+ ```
579
+
580
+ the tricky part is to get the remote env loaded, I have a mostly ok hack, but which doesn't work for `py-spy` - something is wrong in the env.
581
+
582
+ So the special env-loading file is:
583
+ ```
584
+ $ cat ~/.pdshrc
585
+
586
+ source /etc/profile.d/z_modules.sh;
587
+
588
+ #source ~/.bashrc
589
+
590
+ module purge
591
+ #module load pytorch-gpu/py3/1.8.1
592
+ module load nvtop git git-lfs github-cli mc
593
+
594
+ # specific caches
595
+
596
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
597
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
598
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
599
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
600
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
601
+
602
+ ### CONDA ###
603
+
604
+ # >>> conda initialize >>>
605
+ # !! Contents within this block are managed by 'conda init' !!
606
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
607
+ if [ $? -eq 0 ]; then
608
+ eval "$__conda_setup"
609
+ else
610
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
611
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
612
+ else
613
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
614
+ fi
615
+ fi
616
+ unset __conda_setup
617
+ # <<< conda initialize <<<
618
+
619
+ conda activate base
620
+ conda activate /gpfswork/rech/six/commun/conda/py38-pt111
621
+ ```
622
+
623
+ `ds_ssh` uses pdsh behind the scenes.
624
+
625
+ Note that `py-spy` works just fine when actually ssh'ed to the compute node:
626
+
627
+ ```
628
+ ps aux | grep python | egrep -v '(srun|grep)' | grep `whoami` | awk '{print $2}' | xargs -I {} py-spy dump --pid {}
629
+ ```
630
+
631
+ #### using pdsh
632
+
633
+ To access just one running node it's simpler to just use `pdsh` directly.
634
+
635
+ ```
636
+ pdsh -w jean-zay-iam01 "source ~/.pdshrc; nvidia-smi"
637
+ ```
638
+
639
+
640
+ ## Older info
641
+
642
+ Probably of no use any longer, but still here in case it is needed (might move to another file).
643
+
644
+ ## Local resources
645
+
646
+ For your own personal explorations you can either create your own `conda` envr or use your local python, which has a few of issues, but it allows you to continue using JZ's pytorch `module`.
647
+
648
+ `pip install` installs into `$HOME/.local/lib/python3.7/site-packages`, however system-wide packages may take precedence. For example to do `develop` install of transformers use this workaround:
649
+ ```
650
+ git clone https://github.com/huggingface/transformers
651
+ cd transformers
652
+ pip install --user --no-use-pep517 -e .
653
+ ```
654
+
655
+ May still have to override `PYTHONPATH=$WORK/hf/transformers-master/src` (edit to wherever your clone is) if you want to emulate `develop` build. Test:
656
+ ```
657
+ export PYTHONPATH=$WORK/hf/transformers-master/src
658
+ python -c "import transformers; print(transformers.__version__)"
659
+ # 4.6.0.dev0
660
+ ```
661
+
662
+ See [`envs`](./envs) for instructions on how to build conda and packages
bigscience/jz/envs/apex/build.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
4
+
bigscience/jz/envs/deepspeed/build.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ rm -rf build
4
+
5
+ time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_FUSED_LAMB=1 DS_BUILD_TRANSFORMER=1 DS_BUILD_STOCHASTIC_TRANSFORMER=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
6
+
7
+ # time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_OPS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
bigscience/jz/envs/start-prod ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a python production script for JZ
2
+ #
3
+ # Activate with:
4
+ #
5
+ # source ./start-prod
6
+ #
7
+ #
8
+
9
+ # if this session isn't run via a login shell, which is the case when running a
10
+ # command which is not shell via ssh, the bash function `module` will be missing.
11
+ # so work around it by emulating part of the login shell that loads modules environment
12
+ # if [ -z $(type -t module) ]
13
+ # then
14
+ # . /etc/profile.d/z_modules.sh
15
+ # fi
16
+ module purge
17
+ module load pytorch-gpu/py3/1.8.1
18
+ module load nvtop git git-lfs github-cli mc
19
+
20
+ # git prompt
21
+ export GIT_PROMPT_ONLY_IN_REPO=0;
22
+ export GIT_PROMPT_THEME="JZPRod"
23
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
24
+
25
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
26
+ #
27
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets`
28
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
29
+
30
+ # specific caches
31
+
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+
37
+ #export PYTHONPATH=$WORK/hf/transformers-master/src
38
+
39
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
40
+
41
+ ### CONDA ###
42
+
43
+ # >>> conda initialize >>>
44
+ # !! Contents within this block are managed by 'conda init' !!
45
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
46
+ if [ $? -eq 0 ]; then
47
+ eval "$__conda_setup"
48
+ else
49
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
50
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
51
+ else
52
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
53
+ fi
54
+ fi
55
+ unset __conda_setup
56
+ # <<< conda initialize <<<
57
+
58
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
59
+ conda activate base
60
+ conda activate hf-prod
bigscience/jz/envs/start-user ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # user env start script
2
+
3
+ # replace stas with the name of your conda env in this script
4
+
5
+ # if this session isn't run via a login shell, which is the case when running a
6
+ # command which is not shell via ssh, the bash function `module` will be missing.
7
+ # so work around it by emulating part of the login shell that loads modules environment
8
+ #if [ -z $(type -t module) ]
9
+ #then
10
+ # . /etc/profile.d/z_modules.sh
11
+ #fi
12
+ module purge
13
+ module load pytorch-gpu/py3/1.8.1
14
+ module load nvtop git git-lfs github-cli mc
15
+
16
+ # git prompt
17
+ export GIT_PROMPT_ONLY_IN_REPO=0;
18
+ export GIT_PROMPT_THEME="JZPRod"
19
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
20
+
21
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
22
+ #
23
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$ALL_CCFRWORK/datasets`
24
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/EXPERIMENTS`
25
+
26
+ # specific caches
27
+
28
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
29
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
30
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
31
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
32
+
33
+ #export PYTHONPATH=$WORK/hf/transformers-master/src
34
+
35
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
36
+
37
+
38
+
39
+
40
+ ### CONDA ###
41
+
42
+ # >>> conda initialize >>>
43
+ # !! Contents within this block are managed by 'conda init' !!
44
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
45
+ if [ $? -eq 0 ]; then
46
+ eval "$__conda_setup"
47
+ else
48
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
49
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
50
+ else
51
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
52
+ fi
53
+ fi
54
+ unset __conda_setup
55
+ # <<< conda initialize <<<
56
+
57
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
58
+ conda activate base
59
+ conda activate stas
bigscience/jz/envs/workarounds.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Workarounds
2
+
3
+ ## Missing certificates
4
+
5
+ Sometimes, some certificates can be missing. It's possible to point to our own local versions of the certificates. You can simply copy them to `$six_ALL_CCFRWORK/etc/ssl/certs/` or any other relevant folder:
6
+ ```bash
7
+ export CURL_CA_BUNDLE=$six_ALL_CCFRWORK/etc/ssl/certs/ca-certificates.crt
8
+ ```
bigscience/jz/frameworks/deepspeed.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deepspeed notes
2
+
3
+ A lot of these collected from chats with Samyam, Shaden and Olatunji
4
+
5
+ ## Should I use the `deepspeed` launcher under slurm.
6
+
7
+ No, it won't work.
8
+
9
+ Instead use:
10
+ ```
11
+ python -u -m torch.distributed.launch \
12
+ --nproc_per_node $GPUS_PER_NODE \
13
+ --nnodes $NNODES \
14
+ --master_addr $MASTER_ADDR \
15
+ --master_port $MASTER_PORT \
16
+ --node_rank $SLURM_PROCID \
17
+ ....
18
+ ```
19
+
20
+ ## on 8 gpus I get now: `data_parallel_size: 8, parameter_parallel_size: 8`
21
+
22
+ In this case seeing that the DP and parameter parallel size match means ZeRO will partition across all gpus
23
+
24
+ ## Memory estimates
25
+
26
+ As each node has about 160GB of memory, the model size you can run with Z2-Offload is about 8-10B parameters per node. Each of those parameters will require 4 bytes for fp32 momentum, variance, and parameters, gradients so a total of 16 bytes per parameter, for a total of about 160 GB.
27
+
28
+
29
+ # Pipeline + ZeRO
30
+
31
+ If you're using PP, you'll want to use ZeRO stage 0 or 1. Pipeline parallelism does weird things with gradients that does not play nicely with Z2+. We assert that when using DS' pipeline parallelism, but I think it's more wild west with Megatron's PP implementation.
32
+
33
+ ```
34
+ train_batch_size=$(($WORLD_SIZE*$MICRO_BATCH_SIZE*$gradient_accumulation_steps))
35
+ ```
36
+
37
+ You want to scale by DP size instead of WORLD_SIZE. Let me write down a bit about batch sizes:
38
+
39
+
40
+ # Megatron + Deepspeed
41
+
42
+
43
+ The `batch_size` in our Megatron scripts is the same thing as micro-batch size. That's the size of each batch of data that comes off the data loader and goes through the various kernels. That's usually what you think of when you talk about batch size (then multiplied by the size of data parallelism)
44
+
45
+ Megatron updated their terminology to match DeepSpeed once they added PP support, which adds the concept of gradient accumulation. Before that, there was no grad accumulation and so the global batch size was assumed to be `DP * batch_size`.
46
+
47
+ So thinking in terms the three axes of parallelism:
48
+
49
+ * Each pipeline processes a `gradient_accumulation_steps` (gas) number of micro-batches per training step. There are as many pipelines as the data parallel dimension, so the global batch size of each training step is `microbatch * gas * DP`
50
+ * Megatron's model parallelism (renamed to tensor model parallelism) is not in the above formula. You can think of it as splitting batches across the MP group.
51
+
52
+ A bit on the various batch size parameters and performance:
53
+
54
+ Increasing micro-batch size increases the arithmetic intensity of individual kernels, increasing throughput and also the memory pressure from activations.
55
+
56
+ Increasing the gradient accumulation steps decreases the bubble overheads of pipeline parallelism. For DeepSpeed's PP algorithm, if you set `gas=8*PP` you should get 90% pipeline efficiency. Theoretical pipeline efficiency is:
57
+
58
+ ```
59
+ efficiency = gas / (gas + PP - 1)
60
+ ```
61
+
62
+ Increasing gas relative to PP will asymptotically approach 100% efficiency as you shrink the pipeline bubble overhead.
63
+
64
+ PyTorch's PP implementation is based on the GPipe algorithm, which still has a clear divide between forward/backward passes:
65
+
66
+ ![gpipe](images/gpipe.png)
67
+
68
+ Their docs use both chunks/microbatch terminology. I'll use 'mb' for short. The key thing to note is that all the forward passes are done first, then all the backward passes. That means that the pipeline memory overheads (eg., activations from each mb) are kept around and scale linearly with the number of chunks. Since you increase the number of chunks to decrease PP overheads, you pay a linearly increasing memory cost to improve throughput.
69
+
70
+ DeepSpeed's pipeline parallelism takes another approach, in which the forward/backward passes for different mbs are done in parallel.
71
+
72
+ ![deepspeed pipe](images/deepspeed-pipe.png)
73
+
74
+ After each backward pass completes, the gradient is accumulated into a single gradient buffer and the corresponding activations are freed. The number of mbs in flight at any time is bounded by the dimension of pipeline parallelism, not the number of gradient accumulation steps (same thing as chunks). That means that you can still increase the gas to improve efficiency, but memory overheads stay constant and only scale with the number of pipeline stages.
75
+
76
+ Say you split a model across 20 pipeline stages and want 90% PP efficiency... the GPipe approach will need about 8x more memory for activations because each microbatch has to be kept around until all of the backward passes begin.
77
+
78
+ Activation checkpointing of course reduces activation memory for both, but this applies even with checkpointing each layer. There are also pipeline overheads in which you store the input/output for each mb to pass to the adjacent stages
79
+
80
+ Though let me add, when I'm tuning perf for PP+DP I usually increase the gas first to get rid of the pipeline bubble overhead. Then you can increase the microbatch size to improve efficiency of individual kernels
81
+
82
+
83
+
84
+ ## Tuning experiments
85
+
86
+
87
+ Shaden's approach:
88
+
89
+ - Fix MICRO_BATCH_SIZE=1 until you're set with the model configuration.
90
+ - Use TP_SIZE=GPUS_PER_NODE
91
+ - If using PP, use PP_SIZE=NNODES and PP_CHUNKS at about 8*PP_SIZE. Larger that that won't hurt if you can spare a larger batch size, but there are diminishing returns. PP_CHUNKS=16*PP_SIZE increases efficiency to 94% for example (vs 90%).
92
+ - Increase layer/hidden until you can't 
93
+ . Load balance is important here, you want the number of layers to be divisible by PP_SIZE. Otherwise the entire pipeline slows down
94
+ - You can go back at the end and try to increase MICRO_BATCH_SIZE if you have leftover memory for larger activations. Sometimes I can increase to 2 and get higher throughput
95
+
96
+
97
+ Samyam's approach:
98
+
99
+ - try to tune up the max micro-bs on 1 node model scaled down to a few layers (Same hidden size)
100
+ - experiment in the range of 16 to 64 to get the highest tflops
101
+ - how efficient it's running w/o communications
102
+ - fit on a single node
103
+ - could turn off optimizer step - no communications between gpus
104
+ - one more hyper param to experiment with:
105
+ tiled - turn it on - overlapping communication improvement
bigscience/jz/frameworks/megatron-lm.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Megatron-LM Notes and Nuances
2
+
3
+
4
+ ## Configuration
5
+
6
+ - Data Parallel: `data-parallel-size = world_size / (pipeline_model_parallel_size * tensor_model_parallel_size)`
7
+ By default, `pipeline_model_parallel_size=`` and `tensor_model_parallel_size=1`
8
+
9
+
10
+ ## Troubleshooting
11
+
12
+ - if megatron hangs in:
13
+
14
+ ```
15
+ >>> done with dataset index builder. Compilation time: 0.107 seconds
16
+ > compiling and loading fused kernels ...
17
+ ```
18
+ do:
19
+ ```
20
+ rm megatron/fused_kernels/build/lock
21
+ ```
22
+ and restart.
23
+
24
+
25
+ ## General Performance Notes
26
+
27
+ NVIDIA paper: https://arxiv.org/abs/2104.04473v2
28
+
29
+ - they used 80GB A100s with 312TFlops/gpu (and achieved about 50% of that in the largest model/batch size (163TFlops)
30
+
31
+ - we are using 32GB V100s with 125TFlops/gpu
32
+
33
+ - The DGX-2 clusters used by NVIDIA have 300GBps intra-node connections and 800Gbps inter-node connections
34
+
35
+ - JZ on the other hand has 50GBps intra-node connections and 400Gbps inter-node connections.
36
+
37
+ and the rest of the hardware is less powerful (so if we reach about 35-50TFlops that would be fantastic)
38
+
39
+ Their main scaling table:
40
+
41
+ - model parallel size = tensor model parallel * pipeline model parallel
42
+
43
+ where tensor parallel is 8 at the most
44
+
45
+ So for example for 76B it says MP=32, which means 8 * 4 - so `PP_size=4` and `TP_size=8`
46
+
47
+ Basically use tensor model parallelism within a node, then use pipeline model parallelism for larger models
48
+ - So if MP size <= 8, tensor MP = MP size, pipeline MP = 1
49
+ - Otherwise, tensor MP = 8, pipeline MP = (MP size // 8 )
50
+
51
+ DataParallel isn't not in the table, it's:
52
+
53
+ DP = (total number of GPUs // MP size)
54
+
55
+ Here is the main table from the paper with added breakdown of TP/PP/DP:
56
+
57
+ | | | | | | | | | | | | | | |
58
+ | ---: | ----: | -----: | --: | -: | -: | -: | --: | ---: | ---: | -----: | ----: | ----: | -----: |
59
+ | Model | Atten | Hidden | Lay | TP | PP | DP | MP | GPUs | Micro | Global | TFlops | TFlops | PFlops |
60
+ | size | heads | size | ers | | | | | | BS | BS | /GPU | % | Aggreg |
61
+ | 1.7B | 24 | 2304 | 24 | 1 | 1 | 32 | 1 | 32 | 16 | 512 | 137 | 44% | 4.4 |
62
+ | 3.6B | 32 | 3072 | 30 | 2 | 1 | 32 | 2 | 64 | 16 | 512 | 138 | 44% | 8.8 |
63
+ | 7.5B | 32 | 4096 | 36 | 4 | 1 | 32 | 4 | 128 | 16 | 512 | 142 | 46% | 18.2 |
64
+ | 18B | 48 | 6144 | 40 | 8 | 1 | 32 | 8 | 256 | 8 | 1024 | 135 | 43% | 34.6 |
65
+ | 39B | 64 | 8192 | 48 | 8 | 2 | 32 | 16 | 512 | 4 | 1536 | 138 | 44% | 70.8 |
66
+ | 76B | 80 | 10240 | 60 | 8 | 4 | 32 | 32 | 1024 | 2 | 1792 | 140 | 45% | 143.8 |
67
+ | 145B | 96 | 12288 | 80 | 8 | 8 | 24 | 64 | 1536 | 2 | 2304 | 148 | 47% | 227.1 |
68
+ | 310B | 128 | 16384 | 96 | 8 | 16 | 15 | 128 | 1920 | 1 | 2160 | 155 | 50% | 297.4 |
69
+ | 530B | 128 | 20480 | 105 | 8 | 35 | 9 | 280 | 2520 | 1 | 2520 | 163 | 52% | 410.2 |
70
+ | 1T | 160 | 25600 | 128 | 8 | 64 | 6 | 512 | 3072 | 1 | 3072 | 163 | 52% | 502.0 |
71
+ | | | | | | | | | | | | | | |
72
+
73
+
74
+ ## TODO
75
+
76
+ Notes from Jared - to sort:
77
+
78
+ - batch size
79
+
80
+ `--global-batch-size` leads to automatic gradient accumulation, so for example on 4-gpu node with:
81
+
82
+ with only 4-way data parallel using a micro batch size of 16 and global batch size of 2048 it's going to do gradient accumulation on 32 batches for each iteration.
83
+
84
+ so probably best not to use this argument, unless it's thought through.
85
+
86
+ --micro-batch-size is always the smallest "batch size", it's what gets sent through the model.
87
+
88
+ --global-batch-size will default to micro batch size * data parallelism unless specified. With the default value there will be no gradient accumulation. If specified, gradient accumulation will happen to reach the global batch size. The "chunks" you talk about above for PP we see as just gradient accumulation. Without gradient accumulation PP is very inefficient with no overlap of executing the different stages. So the more micro-batches that get accumulated, or the large the global batch size, the more efficient PP will be.
89
+ We discussed a lot about how best to expose that in arguments and decided most of the time we care about the micro batch size and the global batch size and don't want to do the math to figure out the number of microbatches done to get to the global batch size. Especially since we will sometimes have a dynamic global batch size
90
+
91
+ So bottom line under PP number of micro-batches == gradient accumulation
92
+ # Megatron-LM notes
bigscience/jz/hpc-specs.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Specs of Jean Zay
2
+
3
+ - 261 nodes, with V100 32 GB GPUs: total 1044 GPUs
4
+ - 351 nodes, with V100 16 GB GPUs: total 1404 GPUs
5
+
6
+ ## Disc Partitions
7
+
8
+ - `$HOME` - 3GB for small files
9
+ - `$WORK` - 5TB / 500k inodes → sources, input/output files
10
+ - `$SCRATCH` - fastest (full SSD), 400TB our quota (total 2PB), files auto-removed after 30 days without access
11
+ - `$STORE` - for long term storage in tar files (very few inodes!)
12
+
13
+ ## Shared Filesystem
14
+
15
+ - GPFS filesystem (Spectrum Scale)
16
+
17
+ - `$SCRATCH` - is SSD with theoretical bandwidth of at least 300 GB/s, probably more with the 2PB extension
18
+ - other partitions are slower discs
19
+
20
+ ## Network Topology
21
+
22
+ V100 32GB GPU are `r6i[4-7]n[0-8],r[7-9]i[0-7]n[0-8],r14i7n[0-8]`
23
+
24
+ They are mostly grouped together but that doesn't really mean that the switches are completely independent from the rest of the network.
25
+
26
+ Due to the hypercube topology used on JZ reaching two nodes on different racks might use intermediate hops on other racks. e.g. communications between nodes on r6 and r7 might go through switches on r3 or r8 depending of the targeted nodes.
27
+
28
+ ## JZ3
29
+
30
+ coming in Jan 2022:
31
+
32
+ - GPUs: 416 A100 80GB GPUs (52 nodes of 8 gpus each)
33
+ - 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links
34
+ - CPU: AMD
35
+ - CPU memory: 512GB per node
36
+ - Inter-node connect: Omni-Path Architecture (OPA)
37
+ - NCCL-communications network: a fully dedicated subnet
38
+ - Disc IO network: shared network with other types of nodes
bigscience/jz/scripts/custom_callbacks.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from transformers import TrainerCallback, is_tensorboard_available
4
+ from transformers.integrations import rewrite_logs
5
+
6
+
7
+ class LogFlosCallback(TrainerCallback):
8
+ """
9
+ A :class:`~transformers.TrainerCallback` that adds current flos to every log.
10
+ """
11
+
12
+ def on_log(self, args, state, control, logs=None, **kwargs):
13
+ logs["total_flos"] = state.total_flos
14
+
15
+
16
+ class TensorBoardFloIndexedCallback(TrainerCallback):
17
+ """
18
+ A :class:`~transformers.TrainerCallback` that sends the logs to `TensorBoard
19
+ <https://www.tensorflow.org/tensorboard>`__.
20
+
21
+ Args:
22
+ tb_writer (:obj:`SummaryWriter`, `optional`):
23
+ The writer to use. Will instantiate one if not set.
24
+ """
25
+
26
+ def __init__(self, tb_writer=None):
27
+ has_tensorboard = is_tensorboard_available()
28
+ assert (
29
+ has_tensorboard
30
+ ), "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
31
+ if has_tensorboard:
32
+ try:
33
+ from torch.utils.tensorboard import SummaryWriter # noqa: F401
34
+
35
+ self._SummaryWriter = SummaryWriter
36
+ except ImportError:
37
+ try:
38
+ from tensorboardX import SummaryWriter
39
+
40
+ self._SummaryWriter = SummaryWriter
41
+ except ImportError:
42
+ self._SummaryWriter = None
43
+ else:
44
+ self._SummaryWriter = None
45
+ self.tb_writer = tb_writer
46
+
47
+ def _init_summary_writer(self, args, log_dir=None):
48
+ log_dir = log_dir or args.logging_dir
49
+ if self._SummaryWriter is not None:
50
+ self.tb_writer = self._SummaryWriter(log_dir=log_dir)
51
+
52
+ def on_train_begin(self, args, state, control, **kwargs):
53
+ if not state.is_world_process_zero:
54
+ return
55
+
56
+ log_dir = None
57
+
58
+ if state.is_hyper_param_search:
59
+ trial_name = state.trial_name
60
+ if trial_name is not None:
61
+ log_dir = os.path.join(args.logging_dir, trial_name)
62
+
63
+ self._init_summary_writer(args, log_dir)
64
+
65
+ if self.tb_writer is not None:
66
+ self.tb_writer.add_text("args", args.to_json_string())
67
+ if "model" in kwargs:
68
+ model = kwargs["model"]
69
+ if hasattr(model, "config") and model.config is not None:
70
+ model_config_json = model.config.to_json_string()
71
+ self.tb_writer.add_text("model_config", model_config_json)
72
+ # Version of TensorBoard coming from tensorboardX does not have this method.
73
+ if hasattr(self.tb_writer, "add_hparams"):
74
+ self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={})
75
+
76
+ def on_log(self, args, state, control, logs=None, **kwargs):
77
+ if not state.is_world_process_zero:
78
+ return
79
+
80
+ if self.tb_writer is None:
81
+ self._init_summary_writer(args)
82
+
83
+ if self.tb_writer is not None:
84
+ logs = rewrite_logs(logs)
85
+ self.tb_writer.add_scalar("Conversion/x steps - y flos", state.total_flos, state.global_step)
86
+ self.tb_writer.add_scalar("Conversion/x flos - y steps", state.global_step, state.total_flos)
87
+ for k, v in logs.items():
88
+ if isinstance(v, (int, float)):
89
+ self.tb_writer.add_scalar(f"Flos/{k}", v, state.total_flos)
90
+ self.tb_writer.add_scalar(f"Steps/{k}", v, state.global_step)
91
+ self.tb_writer.flush()
92
+
93
+ def on_train_end(self, args, state, control, **kwargs):
94
+ if self.tb_writer:
95
+ self.tb_writer.close()
bigscience/jz/scripts/run_clm.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=causal-lm
21
+ """
22
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ from dataclasses import dataclass, field
29
+ from typing import Optional
30
+
31
+ import torch.distributed
32
+ from datasets import load_dataset
33
+
34
+ import transformers
35
+ from transformers import (
36
+ CONFIG_MAPPING,
37
+ MODEL_FOR_CAUSAL_LM_MAPPING,
38
+ AutoConfig,
39
+ AutoModelForCausalLM,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Trainer,
43
+ TrainingArguments,
44
+ default_data_collator,
45
+ set_seed,
46
+ )
47
+ from transformers.testing_utils import CaptureLogger
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+
51
+ ### I very much dislike this solution. `run_clm.py` should probably be at the root, or install as an editable package.
52
+ import os
53
+ currentdir = os.path.dirname(os.path.realpath(__file__))
54
+ parentdir = os.path.dirname(currentdir)
55
+ sys.path.append(parentdir)
56
+ ###
57
+
58
+ from models.decoder_only_t5 import DecoderOnlyT5Config, DecoderOnlyT5LMHeadModel
59
+
60
+ CONFIG_MAPPING["decoder_only_t5"] = DecoderOnlyT5Config
61
+ MODEL_FOR_CAUSAL_LM_MAPPING[DecoderOnlyT5Config] = DecoderOnlyT5LMHeadModel
62
+
63
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
64
+ from custom_callbacks import LogFlosCallback, TensorBoardFloIndexedCallback
65
+
66
+ check_min_version("4.6.0.dev0")
67
+
68
+ logging.basicConfig(
69
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
70
+ datefmt="%m/%d/%Y %H:%M:%S",
71
+ level=logging.INFO,
72
+ )
73
+ logger = logging.getLogger(__name__)
74
+
75
+
76
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
77
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
78
+
79
+
80
+ @dataclass
81
+ class ModelArguments:
82
+ """
83
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
84
+ """
85
+
86
+ model_name_or_path: Optional[str] = field(
87
+ default=None,
88
+ metadata={
89
+ "help": "The model checkpoint for weights initialization."
90
+ "Don't set if you want to train a model from scratch."
91
+ },
92
+ )
93
+ model_type: Optional[str] = field(
94
+ default=None,
95
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
96
+ )
97
+ config_name: Optional[str] = field(
98
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
99
+ )
100
+ tokenizer_name: Optional[str] = field(
101
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
102
+ )
103
+ cache_dir: Optional[str] = field(
104
+ default=None,
105
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
106
+ )
107
+ use_fast_tokenizer: bool = field(
108
+ default=True,
109
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
110
+ )
111
+ model_revision: str = field(
112
+ default="main",
113
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
114
+ )
115
+ use_auth_token: bool = field(
116
+ default=False,
117
+ metadata={
118
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
119
+ "with private models)."
120
+ },
121
+ )
122
+
123
+
124
+ @dataclass
125
+ class ConfigArguments:
126
+ """
127
+ Arguments defining the new model we're about to train when training from scratch
128
+ """
129
+
130
+ n_ctx: Optional[int] = field(default=1024, metadata={"help": "Dimensionality of the causal mask"})
131
+ n_embd: Optional[int] = field(
132
+ default=768, metadata={"help": "Dimensionality of the embeddings and hidden states."}
133
+ )
134
+ n_layer: Optional[int] = field(default=12, metadata={"help": "Number of hidden layers."})
135
+ n_head: Optional[int] = field(default=12, metadata={"help": "Number of attention heads for each attention layer."})
136
+ n_inner: Optional[int] = field(default=None, metadata={"help": "Dimensionality of the inner feed-forward layers."})
137
+
138
+
139
+ @dataclass
140
+ class DataTrainingArguments:
141
+ """
142
+ Arguments pertaining to what data we are going to input our model for training and eval.
143
+ """
144
+
145
+ sanity: bool = field(
146
+ default=False, metadata={"help": "Only use fraction of the dataset"}
147
+ )
148
+ dataset_name: Optional[str] = field(
149
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
150
+ )
151
+ dataset_config_name: Optional[str] = field(
152
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
153
+ )
154
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
155
+ validation_file: Optional[str] = field(
156
+ default=None,
157
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
158
+ )
159
+ max_train_samples: Optional[int] = field(
160
+ default=None,
161
+ metadata={
162
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
163
+ "value if set."
164
+ },
165
+ )
166
+ max_val_samples: Optional[int] = field(
167
+ default=None,
168
+ metadata={
169
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
170
+ "value if set."
171
+ },
172
+ )
173
+
174
+ block_size: Optional[int] = field(
175
+ default=None,
176
+ metadata={
177
+ "help": "Optional input sequence length after tokenization. "
178
+ "The training dataset will be truncated in block of this size for training. "
179
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
180
+ },
181
+ )
182
+ overwrite_cache: bool = field(
183
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
184
+ )
185
+ validation_split_percentage: Optional[int] = field(
186
+ default=5,
187
+ metadata={
188
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
189
+ },
190
+ )
191
+ preprocessing_num_workers: Optional[int] = field(
192
+ default=None,
193
+ metadata={"help": "The number of processes to use for the preprocessing."},
194
+ )
195
+
196
+ def __post_init__(self):
197
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
198
+ raise ValueError("Need either a dataset name or a training/validation file.")
199
+ else:
200
+ if self.train_file is not None:
201
+ extension = self.train_file.split(".")[-1]
202
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
203
+ if self.validation_file is not None:
204
+ extension = self.validation_file.split(".")[-1]
205
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
206
+
207
+
208
+ def main():
209
+ # See all possible arguments in src/transformers/training_args.py
210
+ # or by passing the --help flag to this script.
211
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
212
+
213
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ConfigArguments))
214
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
215
+ # If we pass only one argument to the script and it's the path to a json file,
216
+ # let's parse it to get our arguments.
217
+ model_args, data_args, training_args, config_args = parser.parse_json_file(
218
+ json_file=os.path.abspath(sys.argv[1])
219
+ )
220
+ else:
221
+ model_args, data_args, training_args, config_args = parser.parse_args_into_dataclasses()
222
+
223
+ # Detecting last checkpoint.
224
+ last_checkpoint = None
225
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
226
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
227
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
228
+ raise ValueError(
229
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
230
+ "Use --overwrite_output_dir to overcome."
231
+ )
232
+ elif last_checkpoint is not None:
233
+ logger.info(
234
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
235
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
236
+ )
237
+
238
+ # Setup logging
239
+ logging.basicConfig(
240
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
241
+ datefmt="%m/%d/%Y %H:%M:%S",
242
+ handlers=[logging.StreamHandler(sys.stdout)],
243
+ )
244
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
245
+
246
+ # Log on each process the small summary:
247
+ logger.warning(
248
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
249
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
250
+ )
251
+ # Set the verbosity to info of the Transformers logger (on main process only):
252
+ if is_main_process(training_args.local_rank):
253
+ transformers.utils.logging.set_verbosity_info()
254
+ transformers.utils.logging.enable_default_handler()
255
+ transformers.utils.logging.enable_explicit_format()
256
+ logger.info(f"Training/evaluation parameters {training_args}")
257
+
258
+ # Set seed before initializing model.
259
+ set_seed(training_args.seed)
260
+
261
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
262
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
263
+ # (the dataset will be downloaded automatically from the datasets Hub).
264
+ #
265
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
266
+ # 'text' is found. You can easily tweak this behavior (see below).
267
+ #
268
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
269
+ # download the dataset.
270
+ if data_args.dataset_name is not None:
271
+ # Downloading and loading a dataset from the hub.
272
+ datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, keep_in_memory=False, cache_dir=model_args.cache_dir)
273
+ if "validation" not in datasets.keys():
274
+ datasets["validation"] = load_dataset(
275
+ data_args.dataset_name,
276
+ data_args.dataset_config_name,
277
+ split=f"train[:{data_args.validation_split_percentage}%]",
278
+ keep_in_memory=False,
279
+ cache_dir=model_args.cache_dir
280
+ )
281
+ datasets["train"] = load_dataset(
282
+ data_args.dataset_name,
283
+ data_args.dataset_config_name,
284
+ split=f"train[{data_args.validation_split_percentage}%:]",
285
+ keep_in_memory=False,
286
+ cache_dir=model_args.cache_dir
287
+ )
288
+ else:
289
+ data_files = {}
290
+ if data_args.train_file is not None:
291
+ data_files["train"] = data_args.train_file
292
+ if data_args.validation_file is not None:
293
+ data_files["validation"] = data_args.validation_file
294
+ extension = (
295
+ data_args.train_file.split(".")[-1]
296
+ if data_args.train_file is not None
297
+ else data_args.validation_file.split(".")[-1]
298
+ )
299
+ if extension == "txt":
300
+ extension = "text"
301
+ datasets = load_dataset(extension, data_files=data_files, keep_in_memory=False, cache_dir=model_args.cache_dir)
302
+ if data_args.sanity:
303
+ datasets["train"] = datasets["train"].shard(100, index=0, contiguous=True)
304
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
305
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
306
+
307
+ # Load pretrained model and tokenizer
308
+ #
309
+ # Distributed training:
310
+ # The .from_pretrained methods guarantee that only one local process can concurrently
311
+ # download model & vocab.
312
+
313
+ config_kwargs = {
314
+ "cache_dir": model_args.cache_dir,
315
+ "revision": model_args.model_revision,
316
+ "use_auth_token": True if model_args.use_auth_token else None,
317
+ }
318
+ if model_args.config_name:
319
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
320
+ elif model_args.model_name_or_path:
321
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
322
+ else:
323
+ config = CONFIG_MAPPING[model_args.model_type](**vars(config_args), **config_kwargs)
324
+ logger.warning("You are instantiating a new config instance from scratch.")
325
+
326
+ tokenizer_kwargs = {
327
+ "cache_dir": model_args.cache_dir,
328
+ "use_fast": model_args.use_fast_tokenizer,
329
+ "revision": model_args.model_revision,
330
+ "use_auth_token": True if model_args.use_auth_token else None,
331
+ }
332
+ if model_args.tokenizer_name:
333
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
334
+ elif model_args.model_name_or_path:
335
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
336
+ else:
337
+ raise ValueError(
338
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
339
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
340
+ )
341
+
342
+ if model_args.model_name_or_path:
343
+ model = AutoModelForCausalLM.from_pretrained(
344
+ model_args.model_name_or_path,
345
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
346
+ config=config,
347
+ cache_dir=model_args.cache_dir,
348
+ revision=model_args.model_revision,
349
+ use_auth_token=True if model_args.use_auth_token else None,
350
+ )
351
+ else:
352
+ logger.info("Training new model from scratch")
353
+ model = AutoModelForCausalLM.from_config(config)
354
+
355
+ model.resize_token_embeddings(len(tokenizer))
356
+
357
+ # Preprocessing the datasets.
358
+ # First we tokenize all the texts.
359
+ if training_args.do_train:
360
+ column_names = datasets["train"].column_names
361
+ else:
362
+ column_names = datasets["validation"].column_names
363
+ text_column_name = "text" if "text" in column_names else column_names[0]
364
+
365
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
366
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
367
+
368
+ datasets = datasets.shuffle()
369
+ def tokenize_function(examples):
370
+ with CaptureLogger(tok_logger) as cl:
371
+ output = tokenizer(examples[text_column_name])
372
+ # clm input could be much much longer than block_size
373
+ if "Token indices sequence length is longer than the" in cl.out:
374
+ tok_logger.warning(
375
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
376
+ )
377
+ return output
378
+
379
+ # Ensures only the main process does dataset pre-processing; the other ones will load the `map` cache
380
+ if not is_main_process(training_args.local_rank):
381
+ print("waiting for main process to execute mapping")
382
+ torch.distributed.barrier()
383
+
384
+ logger.info("Mapping dataset to tokenized dataset.",)
385
+ tokenized_datasets = datasets.map(
386
+ tokenize_function,
387
+ batched=True,
388
+ num_proc=data_args.preprocessing_num_workers,
389
+ remove_columns=column_names,
390
+ load_from_cache_file=not data_args.overwrite_cache,
391
+ keep_in_memory=False
392
+ )
393
+
394
+ if data_args.block_size is None:
395
+ block_size = tokenizer.model_max_length
396
+ if block_size > 1024:
397
+ logger.warning(
398
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
399
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
400
+ )
401
+ block_size = 1024
402
+ else:
403
+ if data_args.block_size > tokenizer.model_max_length:
404
+ logger.warning(
405
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
406
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
407
+ )
408
+ # block_size = min(data_args.block_size, tokenizer.model_max_length)
409
+ block_size = data_args.block_size
410
+
411
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
412
+ def group_texts(examples):
413
+ # Concatenate all texts.
414
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
415
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
416
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
417
+ # customize this part to your needs.
418
+ total_length = (total_length // block_size) * block_size
419
+ # Split by chunks of max_len.
420
+ result = {
421
+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
422
+ for k, t in concatenated_examples.items()
423
+ }
424
+ result["labels"] = result["input_ids"].copy()
425
+ return result
426
+
427
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
428
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
429
+ # to preprocess.
430
+ #
431
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
432
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
433
+
434
+ logger.info("Chunking tokenized dataset.")
435
+ lm_datasets = tokenized_datasets.map(
436
+ group_texts,
437
+ batched=True,
438
+ num_proc=data_args.preprocessing_num_workers,
439
+ load_from_cache_file=not data_args.overwrite_cache,
440
+ keep_in_memory=False
441
+ )
442
+
443
+ # Now the other ones can catch up.
444
+ if training_args.local_rank != -1 and is_main_process(training_args.local_rank):
445
+ print("loading results from main process")
446
+ torch.distributed.barrier()
447
+
448
+ if training_args.do_train:
449
+ if "train" not in tokenized_datasets:
450
+ raise ValueError("--do_train requires a train dataset")
451
+ train_dataset = lm_datasets["train"]
452
+ if data_args.max_train_samples is not None:
453
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
454
+
455
+ if training_args.do_eval:
456
+ if "validation" not in tokenized_datasets:
457
+ cutoff = data_args.validation_split_percentage * len(lm_datasets["train"]) // 100
458
+ train_dataset = lm_datasets["train"].select(range(cutoff, len(lm_datasets["train"])))
459
+ eval_dataset = lm_datasets["train"].select(range(cutoff))
460
+ else:
461
+ eval_dataset = lm_datasets["validation"]
462
+ if data_args.max_val_samples is not None:
463
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
464
+
465
+
466
+ # Initialize our Trainer
467
+ trainer = Trainer(
468
+ model=model,
469
+ args=training_args,
470
+ train_dataset=train_dataset if training_args.do_train else None,
471
+ eval_dataset=eval_dataset if training_args.do_eval else None,
472
+ tokenizer=tokenizer,
473
+ # Data collator will default to DataCollatorWithPadding, so we change it.
474
+ data_collator=default_data_collator,
475
+ callbacks=[LogFlosCallback, TensorBoardFloIndexedCallback]
476
+ )
477
+
478
+ # Training
479
+ if training_args.do_train:
480
+ checkpoint = None
481
+ if training_args.resume_from_checkpoint is not None:
482
+ checkpoint = training_args.resume_from_checkpoint
483
+ elif last_checkpoint is not None:
484
+ checkpoint = last_checkpoint
485
+
486
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
487
+ trainer.save_model() # Saves the tokenizer too for easy upload
488
+
489
+ metrics = train_result.metrics
490
+
491
+ max_train_samples = (
492
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
493
+ )
494
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
495
+
496
+ trainer.log_metrics("train", metrics)
497
+ trainer.save_metrics("train", metrics)
498
+ trainer.save_state()
499
+
500
+ # Evaluation
501
+ if training_args.do_eval:
502
+ logger.info("*** Evaluate ***")
503
+
504
+ metrics = trainer.evaluate()
505
+
506
+ metrics["eval_samples"] = len(eval_dataset)
507
+ perplexity = math.exp(metrics["eval_loss"])
508
+ metrics["perplexity"] = perplexity
509
+
510
+ trainer.log_metrics("eval", metrics)
511
+ trainer.save_metrics("eval", metrics)
512
+
513
+
514
+ def _mp_fn(index):
515
+ # For xla_spawn (TPUs)
516
+ main()
517
+
518
+
519
+ if __name__ == "__main__":
520
+ main()
bigscience/jz/scripts/run_clm_prompted.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Prompted version of run_clm.
18
+ """
19
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
20
+
21
+ import logging
22
+ import math
23
+ import os
24
+ import sys
25
+ from dataclasses import dataclass, field
26
+ import torch
27
+ from typing import Optional, Dict, List, Union
28
+
29
+ from datasets import load_dataset, load_from_disk
30
+
31
+ import transformers
32
+ from transformers import (
33
+ CONFIG_MAPPING,
34
+ MODEL_FOR_CAUSAL_LM_MAPPING,
35
+ AutoConfig,
36
+ AutoModelForCausalLM,
37
+ AutoTokenizer,
38
+ HfArgumentParser,
39
+ Trainer,
40
+ TrainingArguments,
41
+ default_data_collator,
42
+ set_seed,
43
+ )
44
+ from transformers.testing_utils import CaptureLogger
45
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
46
+ from transformers.utils import check_min_version
47
+ from transformers.file_utils import PaddingStrategy
48
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
49
+
50
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
51
+ check_min_version("4.6.0.dev0")
52
+
53
+ logging.basicConfig(
54
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
55
+ datefmt="%m/%d/%Y %H:%M:%S",
56
+ level=logging.INFO,
57
+ )
58
+ logger = logging.getLogger(__name__)
59
+
60
+
61
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
62
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
63
+
64
+ @dataclass
65
+ class MyDataCollatorWithPadding:
66
+ """
67
+ Custom version of `DataCollatorWithPadding`.
68
+ """
69
+
70
+ tokenizer: PreTrainedTokenizerBase
71
+ padding: Union[bool, str, PaddingStrategy] = True
72
+ max_length: Optional[int] = None
73
+ pad_to_multiple_of: Optional[int] = None
74
+
75
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
76
+ batch = self.tokenizer.pad(
77
+ features,
78
+ padding=self.padding,
79
+ max_length=self.max_length,
80
+ pad_to_multiple_of=self.pad_to_multiple_of,
81
+ )
82
+ if "label" in batch:
83
+ batch["labels"] = batch["label"]
84
+ del batch["label"]
85
+ if "label_ids" in batch:
86
+ batch["labels"] = batch["label_ids"]
87
+ del batch["label_ids"]
88
+ # Padding labels
89
+ max_l = len(batch["input_ids"][0])
90
+ result = []
91
+ for i in batch["labels"]:
92
+ result.append(i + [-100]*(max_l - len(i)))
93
+ batch["labels"] = result
94
+ for k, v in batch.items():
95
+ batch[k] = torch.tensor(v)
96
+ return batch
97
+
98
+ @dataclass
99
+ class ModelArguments:
100
+ """
101
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
102
+ """
103
+
104
+ model_name_or_path: Optional[str] = field(
105
+ default=None,
106
+ metadata={
107
+ "help": "The model checkpoint for weights initialization."
108
+ "Don't set if you want to train a model from scratch."
109
+ },
110
+ )
111
+ model_type: Optional[str] = field(
112
+ default=None,
113
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
114
+ )
115
+ config_name: Optional[str] = field(
116
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
117
+ )
118
+ tokenizer_name: Optional[str] = field(
119
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
120
+ )
121
+ cache_dir: Optional[str] = field(
122
+ default=None,
123
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
124
+ )
125
+ use_fast_tokenizer: bool = field(
126
+ default=True,
127
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
128
+ )
129
+ model_revision: str = field(
130
+ default="main",
131
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
132
+ )
133
+ use_auth_token: bool = field(
134
+ default=False,
135
+ metadata={
136
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
137
+ "with private models)."
138
+ },
139
+ )
140
+
141
+
142
+ @dataclass
143
+ class DataTrainingArguments:
144
+ """
145
+ Arguments pertaining to what data we are going to input our model for training and eval.
146
+ """
147
+
148
+ dataset_name: Optional[str] = field(
149
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
150
+ )
151
+ dataset_config_name: Optional[str] = field(
152
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
153
+ )
154
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
155
+ validation_file: Optional[str] = field(
156
+ default=None,
157
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
158
+ )
159
+ max_train_samples: Optional[int] = field(
160
+ default=None,
161
+ metadata={
162
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
163
+ "value if set."
164
+ },
165
+ )
166
+ max_val_samples: Optional[int] = field(
167
+ default=None,
168
+ metadata={
169
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
170
+ "value if set."
171
+ },
172
+ )
173
+
174
+ block_size: Optional[int] = field(
175
+ default=None,
176
+ metadata={
177
+ "help": "Optional input sequence length after tokenization. "
178
+ "The training dataset will be truncated in block of this size for training. "
179
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
180
+ },
181
+ )
182
+ overwrite_cache: bool = field(
183
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
184
+ )
185
+ validation_split_percentage: Optional[int] = field(
186
+ default=5,
187
+ metadata={
188
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
189
+ },
190
+ )
191
+ preprocessing_num_workers: Optional[int] = field(
192
+ default=None,
193
+ metadata={"help": "The number of processes to use for the preprocessing."},
194
+ )
195
+
196
+ def __post_init__(self):
197
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
198
+ raise ValueError("Need either a dataset name or a training/validation file.")
199
+ else:
200
+ if self.train_file is not None:
201
+ extension = self.train_file.split(".")[-1]
202
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
203
+ if self.validation_file is not None:
204
+ extension = self.validation_file.split(".")[-1]
205
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
206
+
207
+
208
+ def main():
209
+ # See all possible arguments in src/transformers/training_args.py
210
+ # or by passing the --help flag to this script.
211
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
212
+
213
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
214
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
215
+ # If we pass only one argument to the script and it's the path to a json file,
216
+ # let's parse it to get our arguments.
217
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
218
+ else:
219
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
220
+
221
+ # Detecting last checkpoint.
222
+ last_checkpoint = None
223
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
224
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
225
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
226
+ raise ValueError(
227
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
228
+ "Use --overwrite_output_dir to overcome."
229
+ )
230
+ elif last_checkpoint is not None:
231
+ logger.info(
232
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
233
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
234
+ )
235
+
236
+ # Setup logging
237
+ logging.basicConfig(
238
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
239
+ datefmt="%m/%d/%Y %H:%M:%S",
240
+ handlers=[logging.StreamHandler(sys.stdout)],
241
+ )
242
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
243
+
244
+ # Log on each process the small summary:
245
+ logger.warning(
246
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
247
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
248
+ )
249
+ # Set the verbosity to info of the Transformers logger (on main process only):
250
+ if is_main_process(training_args.local_rank):
251
+ transformers.utils.logging.set_verbosity_info()
252
+ transformers.utils.logging.enable_default_handler()
253
+ transformers.utils.logging.enable_explicit_format()
254
+ logger.info(f"Training/evaluation parameters {training_args}")
255
+
256
+ # Set seed before initializing model.
257
+ set_seed(training_args.seed)
258
+
259
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
260
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
261
+ # (the dataset will be downloaded automatically from the datasets Hub).
262
+ #
263
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
264
+ # 'text' is found. You can easily tweak this behavior (see below).
265
+ #
266
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
267
+ # download the dataset.
268
+ # if data_args.dataset_name is not None:
269
+ # # Downloading and loading a dataset from the hub.
270
+ # datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
271
+ # if "validation" not in datasets.keys():
272
+ # datasets["validation"] = load_dataset(
273
+ # data_args.dataset_name,
274
+ # data_args.dataset_config_name,
275
+ # split=f"train[:{data_args.validation_split_percentage}%]",
276
+ # )
277
+ # datasets["train"] = load_dataset(
278
+ # data_args.dataset_name,
279
+ # data_args.dataset_config_name,
280
+ # split=f"train[{data_args.validation_split_percentage}%:]",
281
+ # )
282
+ # else:
283
+ # data_files = {}
284
+ # if data_args.train_file is not None:
285
+ # data_files["train"] = data_args.train_file
286
+ # if data_args.validation_file is not None:
287
+ # data_files["validation"] = data_args.validation_file
288
+ # extension = (
289
+ # data_args.train_file.split(".")[-1]
290
+ # if data_args.train_file is not None
291
+ # else data_args.validation_file.split(".")[-1]
292
+ # )
293
+ # if extension == "txt":
294
+ # extension = "text"
295
+ # datasets = load_dataset(extension, data_files=data_files)
296
+ datasets = load_from_disk(dataset_path=data_args.dataset_name)
297
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
298
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
299
+
300
+ # Load pretrained model and tokenizer
301
+ #
302
+ # Distributed training:
303
+ # The .from_pretrained methods guarantee that only one local process can concurrently
304
+ # download model & vocab.
305
+
306
+ config_kwargs = {
307
+ "cache_dir": model_args.cache_dir,
308
+ "revision": model_args.model_revision,
309
+ "use_auth_token": True if model_args.use_auth_token else None,
310
+ }
311
+ if model_args.config_name:
312
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
313
+ elif model_args.model_name_or_path:
314
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
315
+ else:
316
+ config = CONFIG_MAPPING[model_args.model_type]()
317
+ logger.warning("You are instantiating a new config instance from scratch.")
318
+
319
+ tokenizer_kwargs = {
320
+ "cache_dir": model_args.cache_dir,
321
+ "use_fast": model_args.use_fast_tokenizer,
322
+ "revision": model_args.model_revision,
323
+ "use_auth_token": True if model_args.use_auth_token else None,
324
+ }
325
+ if model_args.tokenizer_name:
326
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
327
+ elif model_args.model_name_or_path:
328
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
329
+ else:
330
+ raise ValueError(
331
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
332
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
333
+ )
334
+ if tokenizer.pad_token_id is None and tokenizer.eos_token_id is not None:
335
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{tokenizer.eos_token_id}.")
336
+ tokenizer.pad_token = tokenizer.eos_token
337
+
338
+ if model_args.model_name_or_path:
339
+ model = AutoModelForCausalLM.from_pretrained(
340
+ model_args.model_name_or_path,
341
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
342
+ config=config,
343
+ cache_dir=model_args.cache_dir,
344
+ revision=model_args.model_revision,
345
+ use_auth_token=True if model_args.use_auth_token else None,
346
+ )
347
+ else:
348
+ logger.info("Training new model from scratch")
349
+ model = AutoModelForCausalLM.from_config(config)
350
+
351
+ model.resize_token_embeddings(len(tokenizer))
352
+
353
+ # Preprocessing the datasets.
354
+ # First we tokenize all the texts.
355
+ if training_args.do_train:
356
+ column_names = datasets["train"].column_names
357
+ else:
358
+ column_names = datasets["validation"].column_names
359
+ text_column_name = "text" if "text" in column_names else column_names[0]
360
+
361
+ def tokenize_function(examples):
362
+ def tok_f_ids(string):
363
+ return tokenizer(string, return_attention_mask=False)["input_ids"]
364
+
365
+ texts, texts_a, texts_b = [], [], []
366
+
367
+ unprompted_texts = examples["text"]
368
+ prompting_instances = examples["prompting_instances"]
369
+
370
+ for ump_text, ppt_instances in zip(unprompted_texts, prompting_instances):
371
+ if ppt_instances:
372
+ for i, p, o in zip(ppt_instances["input"], ppt_instances["prompt"], ppt_instances["output"]):
373
+ texts.append([])
374
+ texts_a.append(
375
+ tok_f_ids(i) \
376
+ + [tokenizer.eos_token_id] \
377
+ + tok_f_ids(p) \
378
+ + [tokenizer.eos_token_id]
379
+ )
380
+ texts_b.append(tok_f_ids(o))
381
+ else:
382
+ texts.append(tok_f_ids(ump_text))
383
+ texts_a.append([])
384
+ texts_b.append([])
385
+ return {
386
+ "text_full": texts,
387
+ "text_a": texts_a,
388
+ "text_b": texts_b,
389
+ }
390
+
391
+ datasets = datasets.shuffle()
392
+ logger.info("Mapping dataset to tokenized dataset.",)
393
+ tokenized_datasets = datasets.map(
394
+ tokenize_function,
395
+ batched=True,
396
+ num_proc=data_args.preprocessing_num_workers,
397
+ remove_columns=column_names,
398
+ load_from_cache_file=not data_args.overwrite_cache,
399
+ )
400
+
401
+ if data_args.block_size is None:
402
+ block_size = tokenizer.model_max_length
403
+ if block_size > 1024:
404
+ logger.warning(
405
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
406
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
407
+ )
408
+ block_size = 1024
409
+ else:
410
+ if data_args.block_size > tokenizer.model_max_length:
411
+ logger.warning(
412
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
413
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
414
+ )
415
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
416
+
417
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
418
+ def group_texts(examples):
419
+ texts = examples["text_full"]
420
+ texts_a = examples["text_a"]
421
+ texts_b = examples["text_b"]
422
+
423
+ result = {
424
+ "input_ids": [],
425
+ "labels": [],
426
+ "attention_mask": [],
427
+ "length": [],
428
+ }
429
+ n = int(block_size/2)
430
+ for t, t_a, t_b in zip(texts, texts_a, texts_b):
431
+ if t == []:
432
+ cut_t_a = t_a[-n:]
433
+ cut_t_b = t_b[:n]
434
+ if len(cut_t_b) < 20:
435
+ continue
436
+ result["input_ids"].append(cut_t_a + cut_t_b)
437
+ result["labels"].append([-100]*len(cut_t_a) + cut_t_b)
438
+ else:
439
+ total_length = len(t)
440
+ total_length = (total_length // block_size) * block_size
441
+ for i in range (0, total_length, block_size):
442
+ sub_seq = t[i : i + block_size]
443
+ result["input_ids"].append(sub_seq)
444
+ result["labels"].append(sub_seq)
445
+ for i in result["labels"]:
446
+ result["attention_mask"].append([1]*len(i))
447
+ result["length"].append(len(i))
448
+ return result
449
+
450
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
451
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
452
+ # to preprocess.
453
+ #
454
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
455
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
456
+
457
+ logger.info("Chunking tokenized dataset.")
458
+ lm_datasets = tokenized_datasets.map(
459
+ group_texts,
460
+ batched=True,
461
+ num_proc=data_args.preprocessing_num_workers,
462
+ remove_columns=tokenized_datasets["train"].column_names,
463
+ load_from_cache_file=not data_args.overwrite_cache,
464
+ )
465
+
466
+ if training_args.do_train:
467
+ if "train" not in tokenized_datasets:
468
+ raise ValueError("--do_train requires a train dataset")
469
+ train_dataset = lm_datasets["train"]
470
+ if data_args.max_train_samples is not None:
471
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
472
+
473
+ if training_args.do_eval:
474
+ if "validation" not in tokenized_datasets:
475
+ raise ValueError("--do_eval requires a validation dataset")
476
+ eval_dataset = lm_datasets["validation"]
477
+ if data_args.max_val_samples is not None:
478
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
479
+
480
+ # Initialize our Trainer
481
+ trainer = Trainer(
482
+ model=model,
483
+ args=training_args,
484
+ train_dataset=train_dataset if training_args.do_train else None,
485
+ eval_dataset=eval_dataset if training_args.do_eval else None,
486
+ tokenizer=tokenizer,
487
+ # Data collator will default to DataCollatorWithPadding, so we change it.
488
+ data_collator=MyDataCollatorWithPadding(tokenizer=tokenizer, padding=True),
489
+ )
490
+
491
+ # Training
492
+ if training_args.do_train:
493
+ if last_checkpoint is not None:
494
+ checkpoint = last_checkpoint
495
+ elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
496
+ checkpoint = model_args.model_name_or_path
497
+ else:
498
+ checkpoint = None
499
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
500
+ trainer.save_model() # Saves the tokenizer too for easy upload
501
+
502
+ metrics = train_result.metrics
503
+
504
+ max_train_samples = (
505
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
506
+ )
507
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
508
+
509
+ trainer.log_metrics("train", metrics)
510
+ trainer.save_metrics("train", metrics)
511
+ trainer.save_state()
512
+
513
+ # Evaluation
514
+ if training_args.do_eval:
515
+ logger.info("*** Evaluate ***")
516
+
517
+ metrics = trainer.evaluate()
518
+
519
+ max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
520
+ metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
521
+ perplexity = math.exp(metrics["eval_loss"])
522
+ metrics["perplexity"] = perplexity
523
+
524
+ trainer.log_metrics("eval", metrics)
525
+ trainer.save_metrics("eval", metrics)
526
+
527
+
528
+ def _mp_fn(index):
529
+ # For xla_spawn (TPUs)
530
+ main()
531
+
532
+
533
+ if __name__ == "__main__":
534
+ main()
bigscience/jz/scripts/run_text2text.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tune a text-to-text model (T5, BART, ...) on a text file or dataset.
18
+ """
19
+
20
+ import logging
21
+ import math
22
+ import os
23
+ import sys
24
+ from dataclasses import dataclass, field
25
+ from typing import Optional
26
+
27
+ import torch.distributed
28
+ from datasets import load_dataset
29
+
30
+ import transformers
31
+ from transformers import (
32
+ CONFIG_MAPPING,
33
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
34
+ AutoConfig,
35
+ AutoModelForSeq2SeqLM,
36
+ AutoTokenizer,
37
+ HfArgumentParser,
38
+ Trainer,
39
+ TrainingArguments,
40
+ default_data_collator,
41
+ set_seed,
42
+ )
43
+ from transformers.testing_utils import CaptureLogger
44
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
45
+ from transformers.utils import check_min_version
46
+
47
+ ### I very much dislike this solution. `run_clm.py` should probably be at the root, or install as an editable package.
48
+ import os
49
+ currentdir = os.path.dirname(os.path.realpath(__file__))
50
+ parentdir = os.path.dirname(currentdir)
51
+ sys.path.append(parentdir)
52
+ ###
53
+
54
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
55
+ from custom_callbacks import LogFlosCallback, TensorBoardFloIndexedCallback
56
+
57
+ check_min_version("4.6.0.dev0")
58
+
59
+ logging.basicConfig(
60
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
61
+ datefmt="%m/%d/%Y %H:%M:%S",
62
+ level=logging.INFO,
63
+ )
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys())
68
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
69
+
70
+
71
+ @dataclass
72
+ class ModelArguments:
73
+ """
74
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
75
+ """
76
+
77
+ model_name_or_path: Optional[str] = field(
78
+ default=None,
79
+ metadata={
80
+ "help": "The model checkpoint for weights initialization."
81
+ "Don't set if you want to train a model from scratch."
82
+ },
83
+ )
84
+ model_type: Optional[str] = field(
85
+ default=None,
86
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
87
+ )
88
+ config_name: Optional[str] = field(
89
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
90
+ )
91
+ tokenizer_name: Optional[str] = field(
92
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
93
+ )
94
+ cache_dir: Optional[str] = field(
95
+ default=None,
96
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
97
+ )
98
+ use_fast_tokenizer: bool = field(
99
+ default=True,
100
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
101
+ )
102
+ model_revision: str = field(
103
+ default="main",
104
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
105
+ )
106
+ use_auth_token: bool = field(
107
+ default=False,
108
+ metadata={
109
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
110
+ "with private models)."
111
+ },
112
+ )
113
+
114
+
115
+ @dataclass
116
+ class ConfigArguments:
117
+ """
118
+ Arguments defining the new model we're about to train when training from scratch
119
+ """
120
+
121
+ n_ctx: Optional[int] = field(default=1024, metadata={"help": "Dimensionality of the causal mask"})
122
+ n_embd: Optional[int] = field(
123
+ default=768, metadata={"help": "Dimensionality of the embeddings and hidden states."}
124
+ )
125
+ n_layer: Optional[int] = field(default=12, metadata={"help": "Number of hidden layers."})
126
+ n_head: Optional[int] = field(default=12, metadata={"help": "Number of attention heads for each attention layer."})
127
+ n_inner: Optional[int] = field(default=None, metadata={"help": "Dimensionality of the inner feed-forward layers."})
128
+
129
+
130
+ @dataclass
131
+ class DataTrainingArguments:
132
+ """
133
+ Arguments pertaining to what data we are going to input our model for training and eval.
134
+ """
135
+
136
+ sanity: bool = field(
137
+ default=False, metadata={"help": "Only use fraction of the dataset"}
138
+ )
139
+ dataset_name: Optional[str] = field(
140
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
141
+ )
142
+ dataset_config_name: Optional[str] = field(
143
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
146
+ validation_file: Optional[str] = field(
147
+ default=None,
148
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
149
+ )
150
+ max_train_samples: Optional[int] = field(
151
+ default=None,
152
+ metadata={
153
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
154
+ "value if set."
155
+ },
156
+ )
157
+ max_val_samples: Optional[int] = field(
158
+ default=None,
159
+ metadata={
160
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
161
+ "value if set."
162
+ },
163
+ )
164
+
165
+ block_size: Optional[int] = field(
166
+ default=None,
167
+ metadata={
168
+ "help": "Optional input sequence length after tokenization. "
169
+ "The training dataset will be truncated in block of this size for training. "
170
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
171
+ },
172
+ )
173
+ overwrite_cache: bool = field(
174
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
175
+ )
176
+ validation_split_percentage: Optional[int] = field(
177
+ default=5,
178
+ metadata={
179
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
180
+ },
181
+ )
182
+ preprocessing_num_workers: Optional[int] = field(
183
+ default=None,
184
+ metadata={"help": "The number of processes to use for the preprocessing."},
185
+ )
186
+
187
+ def __post_init__(self):
188
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
189
+ raise ValueError("Need either a dataset name or a training/validation file.")
190
+ else:
191
+ if self.train_file is not None:
192
+ extension = self.train_file.split(".")[-1]
193
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
194
+ if self.validation_file is not None:
195
+ extension = self.validation_file.split(".")[-1]
196
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
197
+
198
+
199
+ def main():
200
+ # See all possible arguments in src/transformers/training_args.py
201
+ # or by passing the --help flag to this script.
202
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
203
+
204
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ConfigArguments))
205
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
206
+ # If we pass only one argument to the script and it's the path to a json file,
207
+ # let's parse it to get our arguments.
208
+ model_args, data_args, training_args, config_args = parser.parse_json_file(
209
+ json_file=os.path.abspath(sys.argv[1])
210
+ )
211
+ else:
212
+ model_args, data_args, training_args, config_args = parser.parse_args_into_dataclasses()
213
+
214
+ # Detecting last checkpoint.
215
+ last_checkpoint = None
216
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
217
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
218
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
219
+ raise ValueError(
220
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
221
+ "Use --overwrite_output_dir to overcome."
222
+ )
223
+ elif last_checkpoint is not None:
224
+ logger.info(
225
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
226
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
227
+ )
228
+
229
+ # Setup logging
230
+ logging.basicConfig(
231
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
232
+ datefmt="%m/%d/%Y %H:%M:%S",
233
+ handlers=[logging.StreamHandler(sys.stdout)],
234
+ )
235
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
236
+
237
+ # Log on each process the small summary:
238
+ logger.warning(
239
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
240
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
241
+ )
242
+ # Set the verbosity to info of the Transformers logger (on main process only):
243
+ if is_main_process(training_args.local_rank):
244
+ transformers.utils.logging.set_verbosity_info()
245
+ transformers.utils.logging.enable_default_handler()
246
+ transformers.utils.logging.enable_explicit_format()
247
+ logger.info(f"Training/evaluation parameters {training_args}")
248
+
249
+ # Set seed before initializing model.
250
+ set_seed(training_args.seed)
251
+
252
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
253
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
254
+ # (the dataset will be downloaded automatically from the datasets Hub).
255
+ #
256
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
257
+ # 'text' is found. You can easily tweak this behavior (see below).
258
+ #
259
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
260
+ # download the dataset.
261
+ if data_args.dataset_name is not None:
262
+ # Downloading and loading a dataset from the hub.
263
+ datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, keep_in_memory=False, cache_dir=model_args.cache_dir)
264
+ if "validation" not in datasets.keys():
265
+ datasets["validation"] = load_dataset(
266
+ data_args.dataset_name,
267
+ data_args.dataset_config_name,
268
+ split=f"train[:{data_args.validation_split_percentage}%]",
269
+ keep_in_memory=False,
270
+ cache_dir=model_args.cache_dir
271
+ )
272
+ datasets["train"] = load_dataset(
273
+ data_args.dataset_name,
274
+ data_args.dataset_config_name,
275
+ split=f"train[{data_args.validation_split_percentage}%:]",
276
+ keep_in_memory=False,
277
+ cache_dir=model_args.cache_dir
278
+ )
279
+ else:
280
+ data_files = {}
281
+ if data_args.train_file is not None:
282
+ data_files["train"] = data_args.train_file
283
+ if data_args.validation_file is not None:
284
+ data_files["validation"] = data_args.validation_file
285
+ extension = (
286
+ data_args.train_file.split(".")[-1]
287
+ if data_args.train_file is not None
288
+ else data_args.validation_file.split(".")[-1]
289
+ )
290
+ if extension == "txt":
291
+ extension = "text"
292
+ datasets = load_dataset(extension, data_files=data_files, keep_in_memory=False, cache_dir=model_args.cache_dir)
293
+ if data_args.sanity:
294
+ datasets["train"] = datasets["train"].shard(100, index=0, contiguous=True)
295
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
296
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
297
+
298
+ # Load pretrained model and tokenizer
299
+ #
300
+ # Distributed training:
301
+ # The .from_pretrained methods guarantee that only one local process can concurrently
302
+ # download model & vocab.
303
+
304
+ config_kwargs = {
305
+ "cache_dir": model_args.cache_dir,
306
+ "revision": model_args.model_revision,
307
+ "use_auth_token": True if model_args.use_auth_token else None,
308
+ }
309
+ if model_args.config_name:
310
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
311
+ elif model_args.model_name_or_path:
312
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
313
+ else:
314
+ config = CONFIG_MAPPING[model_args.model_type](**vars(config_args), **config_kwargs)
315
+ logger.warning("You are instantiating a new config instance from scratch.")
316
+
317
+ tokenizer_kwargs = {
318
+ "cache_dir": model_args.cache_dir,
319
+ "use_fast": model_args.use_fast_tokenizer,
320
+ "revision": model_args.model_revision,
321
+ "use_auth_token": True if model_args.use_auth_token else None,
322
+ }
323
+ if model_args.tokenizer_name:
324
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
325
+ elif model_args.model_name_or_path:
326
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
327
+ else:
328
+ raise ValueError(
329
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
330
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
331
+ )
332
+
333
+ if model_args.model_name_or_path:
334
+ model = AutoModelForSeq2SeqLM.from_pretrained(
335
+ model_args.model_name_or_path,
336
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
337
+ config=config,
338
+ cache_dir=model_args.cache_dir,
339
+ revision=model_args.model_revision,
340
+ use_auth_token=True if model_args.use_auth_token else None,
341
+ )
342
+ else:
343
+ logger.info("Training new model from scratch")
344
+ model = AutoModelForSeq2SeqLM.from_config(config)
345
+
346
+ model.resize_token_embeddings(len(tokenizer))
347
+
348
+ # Preprocessing the datasets.
349
+ # First we tokenize all the texts.
350
+ if training_args.do_train:
351
+ column_names = datasets["train"].column_names
352
+ else:
353
+ column_names = datasets["validation"].column_names
354
+ text_column_name = "text" if "text" in column_names else column_names[0]
355
+
356
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
357
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
358
+
359
+ datasets = datasets.shuffle()
360
+ def tokenize_function(examples):
361
+ with CaptureLogger(tok_logger) as cl:
362
+ output = tokenizer(examples[text_column_name])
363
+ # clm input could be much much longer than block_size
364
+ if "Token indices sequence length is longer than the" in cl.out:
365
+ tok_logger.warning(
366
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
367
+ )
368
+ return output
369
+
370
+ # Ensures only the main process does dataset pre-processing; the other ones will load the `map` cache
371
+ if not is_main_process(training_args.local_rank):
372
+ print("waiting for main process to execute mapping")
373
+ torch.distributed.barrier()
374
+
375
+ logger.info("Mapping dataset to tokenized dataset.",)
376
+ tokenized_datasets = datasets.map(
377
+ tokenize_function,
378
+ batched=True,
379
+ num_proc=data_args.preprocessing_num_workers,
380
+ remove_columns=column_names,
381
+ load_from_cache_file=not data_args.overwrite_cache,
382
+ keep_in_memory=False
383
+ )
384
+
385
+ if data_args.block_size is None:
386
+ block_size = tokenizer.model_max_length
387
+ if block_size > 1024:
388
+ logger.warning(
389
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
390
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
391
+ )
392
+ block_size = 1024
393
+ else:
394
+ if data_args.block_size > tokenizer.model_max_length:
395
+ logger.warning(
396
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
397
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
398
+ )
399
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
400
+
401
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
402
+ def group_texts(examples):
403
+ # Concatenate all texts.
404
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
405
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
406
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
407
+ # customize this part to your needs.
408
+ total_length = (total_length // (2 * block_size)) * 2 * block_size
409
+ # Split by chunks of max_len.
410
+ result = {
411
+ k: [t[i : i + block_size] for i in range(0, total_length, 2*block_size)]
412
+ for k, t in concatenated_examples.items()
413
+ }
414
+ result["labels"] = [
415
+ concatenated_examples['input_ids'][i : i + block_size]
416
+ for i in range(block_size, total_length, 2*block_size)
417
+ ]
418
+ return result
419
+
420
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
421
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
422
+ # to preprocess.
423
+ #
424
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
425
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
426
+
427
+ logger.info("Chunking tokenized dataset.")
428
+ lm_datasets = tokenized_datasets.map(
429
+ group_texts,
430
+ batched=True,
431
+ num_proc=data_args.preprocessing_num_workers,
432
+ load_from_cache_file=not data_args.overwrite_cache,
433
+ keep_in_memory=False
434
+ )
435
+
436
+ # Now the other ones can catch up.
437
+ if training_args.local_rank != -1 and is_main_process(training_args.local_rank):
438
+ print("loading results from main process")
439
+ torch.distributed.barrier()
440
+
441
+ if training_args.do_train:
442
+ if "train" not in tokenized_datasets:
443
+ raise ValueError("--do_train requires a train dataset")
444
+ train_dataset = lm_datasets["train"]
445
+ if data_args.max_train_samples is not None:
446
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
447
+
448
+ if training_args.do_eval:
449
+ if "validation" not in tokenized_datasets:
450
+ cutoff = data_args.validation_split_percentage * len(lm_datasets["train"]) // 100
451
+ train_dataset = lm_datasets["train"].select(range(cutoff, len(lm_datasets["train"])))
452
+ eval_dataset = lm_datasets["train"].select(range(cutoff))
453
+ else:
454
+ eval_dataset = lm_datasets["validation"]
455
+ if data_args.max_val_samples is not None:
456
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
457
+
458
+
459
+ # Initialize our Trainer
460
+ trainer = Trainer(
461
+ model=model,
462
+ args=training_args,
463
+ train_dataset=train_dataset if training_args.do_train else None,
464
+ eval_dataset=eval_dataset if training_args.do_eval else None,
465
+ tokenizer=tokenizer,
466
+ # Data collator will default to DataCollatorWithPadding, so we change it.
467
+ data_collator=default_data_collator,
468
+ callbacks=[LogFlosCallback, TensorBoardFloIndexedCallback]
469
+ )
470
+
471
+ # Training
472
+ if training_args.do_train:
473
+ checkpoint = None
474
+ if training_args.resume_from_checkpoint is not None:
475
+ checkpoint = training_args.resume_from_checkpoint
476
+ elif last_checkpoint is not None:
477
+ checkpoint = last_checkpoint
478
+
479
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
480
+ trainer.save_model() # Saves the tokenizer too for easy upload
481
+
482
+ metrics = train_result.metrics
483
+
484
+ max_train_samples = (
485
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
486
+ )
487
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
488
+
489
+ trainer.log_metrics("train", metrics)
490
+ trainer.save_metrics("train", metrics)
491
+ trainer.save_state()
492
+
493
+ # Evaluation
494
+ if training_args.do_eval:
495
+ logger.info("*** Evaluate ***")
496
+
497
+ metrics = trainer.evaluate()
498
+
499
+ max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
500
+ metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
501
+ perplexity = math.exp(metrics["eval_loss"])
502
+ metrics["perplexity"] = perplexity
503
+
504
+ trainer.log_metrics("eval", metrics)
505
+ trainer.save_metrics("eval", metrics)
506
+
507
+
508
+ def _mp_fn(index):
509
+ # For xla_spawn (TPUs)
510
+ main()
511
+
512
+
513
+ if __name__ == "__main__":
514
+ main()
bigscience/jz/slurm/README.md ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SLURM How To
2
+
3
+
4
+ ## Partitions
5
+
6
+ All types of nodes have 40 CPU cores per node, unless specified differently.
7
+
8
+ GPU-nodes: `--account=six@gpu`
9
+
10
+ - `-p gpu_p1`: 4x v100-32GB
11
+ - `-p gpu_p2`: 8x v100-32GB
12
+ - `-p gpu_p3`: 4x v100-16GB
13
+ - `-p gpu_p4`: 8x A100-40GB / 48 CPU cores (only 3 nodes)
14
+ - `-p prepost`: 1x V100-16GB + network
15
+
16
+ Combos:
17
+
18
+ - `-p gpu_p13` - all 4x nodes combined - i.e. when either 16GB or 32GB will do
19
+
20
+ CPU-only nodes: `--account=six@cpu`
21
+
22
+ - `-p cpu_p1`: up to 100h: this is the default partition for `--account=six@cpu`
23
+ only 20h by default, add `--qos=qos_cpu-t4` to use 100h (only available if no more than 4 nodes are used).
24
+
25
+ **Important: having `#SBATCH --gres=gpu:0` in a slurm file forces gpu allocations as well, ignoring the account specification. So remove those**
26
+
27
+ The following CPU-only partitions time on which isn't deducted from allocation:
28
+
29
+ - `-p prepost`: up to 20h - for pre/post-processing + has internet!
30
+ - `-p visu`: up to 4h - for visualization
31
+ - `-p archive`: up to 20h - for archiving
32
+ - `-p compil`: up to 20h - for compilation + has internet!
33
+
34
+
35
+ **Constraints**:
36
+
37
+ - `-C v100-16g` # to select nodes having v100 GPUs with 16 GB of memory (same as `-p gpu_p3`)
38
+ - `-C v100-32g` # to select nodes having v100 GPUs with 32 GB of memory (same as `-p gpu_p1`)
39
+
40
+ If your job can run on both types of GPUs, we recommend not to specify any constraints as it will reduce the waiting time of your jobs before resources are available for the execution.
41
+
42
+ Special reservation constraint - if a special reservation is made, e.g., `huggingface1`, activate it with: `--reservation=huggingface1`.
43
+
44
+ **Long running jobs**:
45
+
46
+ Normal GPU jobs can do max `--time=20:00:00`, for longer jobs up to 100h use `--qos=qos_gpu-t4`. Limit 16 GPUs.
47
+
48
+ Note: the given node could be already heavily used by any other random users.
49
+
50
+ Normal CPU jobs can do max `--time=100:00:00` (only `-p cpu_p1`, other partitions 20h)
51
+
52
+ Full details per parition type
53
+
54
+ - CPU: http://www.idris.fr/eng/jean-zay/cpu/jean-zay-cpu-exec_partition_slurm-eng.html and
55
+ http://www.idris.fr/eng/jean-zay/cpu/jean-zay-cpu-exec_alloc-mem-eng.html
56
+ - GPU: http://www.idris.fr/eng/jean-zay/gpu/jean-zay-gpu-exec_partition_slurm-eng.html
57
+
58
+
59
+ To see all available partitions and their total/idle status:
60
+
61
+ ```
62
+ sinfo
63
+ ```
64
+
65
+ ## Priorities
66
+
67
+ - `--qos=qos_gpu-t3` 20h / 512gpus (default priority)
68
+ - `--qos=qos_gpu-t4` 100h / 16gpus - long runnning slow jobs - e.g. preprocessing
69
+ - `--qos=qos_gpu-dev` 2h / 32gpus - this is for getting allocation much faster - for dev work!
70
+
71
+
72
+ Full info: http://www.idris.fr/eng/jean-zay/gpu/jean-zay-gpu-exec_partition_slurm-eng.html
73
+
74
+
75
+ **Important**: when running non-primary training jobs please use: `--nice=10000` in the slurm instructions to allow the main job to get highest priority. But only if you're using `-C v100-32g` (`-p gpu_p1`). For other type of nodes there is no need to.
76
+
77
+ Detailed explanation: using `--nice=10000` for the test jobs should work fine as long as you use the same QoS as the production jobs (`qos_gpu-t3`, if you use the `qos_gpu-dev` partition then the test jobs will always have higher priority). The nice value is chosen so that it always cancels the age factor, since the fairshare is common to all your jobs it should be enough to ensure that jobs with `--nice=10000` always have a lower priority than your other jobs with the same QoS. Since the age factor is only 3% of the priority, it should hurt the priority too much compared to other users. (edited)
78
+
79
+
80
+ **How the job priority is computed**
81
+
82
+ Currently on Jean Zay:
83
+
84
+ 1. 69.4% of the priority depends directly on the chosen QoS
85
+ 2. 27.8% is the "fairshare" (see `idr_compuse` for the value)
86
+ 3. and only 2.8% is the job age in queue
87
+
88
+
89
+
90
+ ## Consumption report
91
+
92
+
93
+ Run:
94
+ ```
95
+ idr_compuse
96
+ ```
97
+
98
+ This provides a report on how heavily we use our allocations. When they are over-consumed we get a lower priority in the scheduler.
99
+
100
+
101
+ ## Wait time for resource granting
102
+
103
+ ```
104
+ squeue -u `whoami` --start
105
+ ```
106
+ will show when any pending jobs are scheduled to start.
107
+
108
+ They may start sooner if others cancel their reservations before the end of the reservation.
109
+
110
+
111
+
112
+ ## Request allocation via dependency
113
+
114
+ To schedule a new job when one more of the currently scheduled job ends (regardless of whether it still running or not started yet), use the dependency mechanism, by telling `sbatch` to start the new job once the currently running job succeeds, using:
115
+
116
+ ```
117
+ sbatch --dependency=CURRENTLY_RUNNING_JOB_ID tr1-13B-round1.slurm
118
+ ```
119
+
120
+ Using `--dependency` may lead to shorter wait times that using `--begin`, since if the time passed to `--begin` allows even for a few minutes of delay since the stopping of the last job, the scheduler may already start some other jobs even if their priority is lower than our job. That's because the scheduler ignores any jobs with `--begin` until the specified time arrives.
121
+
122
+
123
+ ## Make allocations at a scheduled time
124
+
125
+ To postpone making the allocation for a given time, use:
126
+ ```
127
+ salloc --begin HH:MM MM/DD/YY
128
+ ```
129
+
130
+ Same for `sbatch`.
131
+
132
+ It will simply put the job into the queue at the requested time, as if you were to execute this command at this time. If resources are available at that time, the allocation will be given right away. Otherwise it'll be queued up.
133
+
134
+ Sometimes the relative begin time is useful. And other formats can be used. Examples:
135
+
136
+ ```
137
+ --begin now+2hours
138
+ --begin=16:00
139
+ --begin=now+1hour
140
+ --begin=now+60 # seconds by default
141
+ --begin=2010-01-20T12:34:00
142
+ ```
143
+
144
+ the time-units can be `seconds` (default), `minutes`, `hours`, `days`, or `weeks`:
145
+
146
+ ## Preallocated node without time 60min limit
147
+
148
+ This is very useful for running repetitive interactive experiments - so one doesn't need to wait for an allocation to progress. so the strategy is to allocate the resources once for an extended period of time and then running interactive `srun` jobs using this allocation.
149
+
150
+ set `--time` to the desired window (e.g. 6h):
151
+ ```
152
+ salloc --account=six@gpu --nodes=1 --ntasks-per-node=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash
153
+ salloc: Pending job allocation 1732778
154
+ salloc: job 1732778 queued and waiting for resources
155
+ salloc: job 1732778 has been allocated resources
156
+ salloc: Granted job allocation 1732778
157
+ ```
158
+ now use this reserved node to run a job multiple times, by passing the job id of `salloc`:
159
+ ```
160
+ srun --jobid $SLURM_JOBID --pty bash --rcfile $six_ALL_CCFRWORK/start-prod
161
+ ```
162
+ if run from inside `bash` started via `salloc`. But it can be started from another shell, but then explicitly set `--jobid`.
163
+
164
+ if this `srun` job timed out or manually exited, you can re-start it again in this same reserved node.
165
+
166
+ `srun` can, of course, call the real training command directly and not just `bash`.
167
+
168
+ Important: when allocating a single node, the allocated shell is not on the node (it never is). You have to find out the hostname of the node (reports when giving the allocation or via `squeue` and `ssh` to it.
169
+
170
+ When finished, to release the resources, either exit the shell started in `salloc` or `scancel JOBID`.
171
+
172
+ This reserved node will be counted towards hours usage the whole time it's allocated, so release as soon as done with it.
173
+
174
+ To get just the CPUs instances :
175
+
176
+ ```
177
+ salloc --account=six@cpu --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=6:00:00 bash
178
+ ```
179
+ edit `--cpus-per-task` if more cpu cores are needed.
180
+
181
+ Actually, if this is just one node, then it's even easier to not use `salloc` but to use `srun` in the first place, which will both allocate and give you the shell to use:
182
+ ```
183
+ srun --account=six@gpu --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
184
+ ```
185
+
186
+ And to use a cpu-only node:
187
+ ```
188
+ srun --account=six@cpu --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
189
+ ```
190
+ The `--rcfile` part is optional if you want to pre-run something.
191
+
192
+
193
+ With A100s, it's:
194
+
195
+ w/o gpus:
196
+ ```
197
+ srun --pty --partition=gpu_p5 --constraint=a100 --nodes=1 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:0 --time=6:00:00 --account=six@a100 bash --rcfile $six_ALL_CCFRWORK/start-prod
198
+ ```
199
+ w/ gpus:
200
+ ```
201
+ srun --pty --partition=gpu_p5 --constraint=a100 --nodes=1 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time=6:00:00 --account=six@a100 bash --rcfile $six_ALL_CCFRWORK/start-prod
202
+ ```
203
+
204
+
205
+ ## Re-use allocation
206
+
207
+ e.g. when wanting to run various jobs on identical node allocation.
208
+
209
+ In one shell:
210
+ ```
211
+ salloc --account=six@gpu --constraint=v100-32g --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=3:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
212
+ echo $SLURM_JOBID
213
+ ```
214
+
215
+ In another shell:
216
+ ```
217
+ export SLURM_JOBID=<JOB ID FROM ABOVE>
218
+ srun --jobid=$SLURM_JOBID ...
219
+ ```
220
+
221
+ You may need to set `--gres=gpu:0` to run some diagnostics job on the nodes. For example, let's check shared memory of all the hosts:
222
+ ```
223
+ srun --jobid 631078 --gres=gpu:0 bash -c 'echo $(hostname) $(df -h | grep shm)'
224
+ ```
225
+
226
+ ## Signal the running jobs to finish
227
+
228
+ Since each SLURM run has a limited time span, it can be configured to send a signal of choice to the program a desired amount of time before the end of the allocated time.
229
+ ```
230
+ --signal=[[R][B]:]<sig_num>[@<sig_time>]
231
+ ```
232
+ TODO: need to experiment with this to help training finish gracefully and not start a new cycle after saving the last checkpoint.
233
+
234
+
235
+
236
+ ## Detailed job info
237
+
238
+ While most useful information is preset in various `SLURM_*` env vars, sometimes the info is missing. In such cases use:
239
+ ```
240
+ scontrol show -d job $SLURM_JOB_ID
241
+ ```
242
+ and then parse out what's needed.
243
+
244
+
245
+ For a job that finished its run use:
246
+ ```
247
+ sacct -j JOBID
248
+ ```
249
+
250
+ e.g. with more details, depending on the partition:
251
+ ```
252
+ sacct -u `whoami` -A six@a100 -ojobid,start,end,state,exitcode --format nodelist%300 -j JOBID
253
+ sacct -u `whoami` -A six@gpu -ojobid,start,end,state,exitcode --format nodelist%300 -j JOBID
254
+ ```
255
+
256
+
257
+
258
+ ## show my jobs
259
+
260
+ ```
261
+ squeue -u `whoami`
262
+ ```
263
+
264
+
265
+ by job id:
266
+ ```
267
+ squeue -j JOBID
268
+ ```
269
+
270
+ group's jobs (probably won't include the non-account partitions), including all users is probably better
271
+
272
+ ```
273
+ squeue --account=six@gpu,six@cpu
274
+ ```
275
+
276
+ group's jobs including all `six`'s users:
277
+
278
+ ```
279
+ squeue --user=$(getent group six | cut -d: -f4)
280
+
281
+ ```
282
+
283
+ ## Aliases
284
+
285
+ Handy aliases
286
+
287
+ ```
288
+ alias myjobs="squeue -u `whoami`"
289
+ alias groupjobs="squeue --user=$(getent group six | cut -d: -f4)"
290
+ alias myjobs-pending="squeue -u `whoami` --start"
291
+ alias idle-nodes="sinfo -p gpu_p13 -o '%A'"
292
+ ```
293
+
294
+ more informative all-in-one myjobs that includes the projected start time for pending jobs and requested time limit:
295
+
296
+ ```
297
+ alias myjobs='squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"'
298
+ alias groupjobs='squeue -u $(getent group six | cut -d: -f4) -o "%.16i %u %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"'
299
+ ```
300
+
301
+
302
+
303
+ ## Zombies
304
+
305
+ If there are any zombies left behind across nodes, send one command to kill them all.
306
+
307
+ ```
308
+ srun pkill python
309
+ ```
310
+
311
+ ## Detailed Access to SLURM Accounting
312
+
313
+ `sacct` displays accounting data for all jobs and job steps in the Slurm job accounting log or Slurm database.
314
+
315
+ So this is a great tool for analysing past events.
316
+
317
+ For example, to see which nodes were used to run recent gpu jobs:
318
+
319
+ ```
320
+ sacct -u `whoami` -A six@gpu -ojobid,start,end,state,exitcode --format nodelist%300
321
+ ```
322
+
323
+ `%300` here tells it to use a 300 char width for the output, so that it's not truncated.
324
+
325
+ See `man sacct` for more fields and info fields.
326
+
327
+
328
+
329
+ ## Queue
330
+
331
+
332
+ ### Cancel job
333
+
334
+ To cancel a job:
335
+ ```
336
+ scancel [jobid]
337
+ ```
338
+
339
+ To cancel all of your jobs:
340
+ ```
341
+ scancel -u <userid>
342
+ ```
343
+
344
+ To cancel all of your jobs on a specific partition:
345
+ ```
346
+ scancel -u <userid> -p <partition>
347
+ ```
348
+
349
+ ### Tips
350
+
351
+ - if you see that `salloc`'ed interactive job is scheduled to run much later than you need, try to cancel the job and ask for shorter period - often there might be a closer window for a shorter time allocation.
352
+
353
+
354
+ ## Logging
355
+
356
+ If we need to separate logs to different log files per node add `%N` (for short hostname) so that we have:
357
+
358
+ ```
359
+ #SBATCH --output=%x-%j-%N.out
360
+ ```
361
+
362
+ That way we can tell if a specific node misbehaves - e.g. has a corrupt GPU. This is because currently pytorch doesn't log which node / gpu rank triggered an exception.
363
+
364
+ Hoping it'll be a built-in feature of pytorch https://github.com/pytorch/pytorch/issues/63174 and then one won't need to make things complicated on the logging side.
365
+
366
+
367
+ ## Show the state of nodes
368
+ ```
369
+ sinfo -p PARTITION
370
+ ```
371
+
372
+ Very useful command is:
373
+ ```
374
+ sinfo -s
375
+ ```
376
+
377
+ and look for the main stat, e.g.:
378
+
379
+ ```
380
+ NODES(A/I/O/T) "allocated/idle/other/total".
381
+ 597/0/15/612
382
+ ```
383
+ So here 597 out of 612 nodes are allocated. 0 idle and 15 are not available for whatever other reasons.
384
+
385
+ ```
386
+ sinfo -p gpu_p1 -o "%A"
387
+ ```
388
+
389
+ gives:
390
+ ```
391
+ NODES(A/I)
392
+ 236/24
393
+ ```
394
+
395
+ so you can see if any nodes are available on the 4x v100-32g partition (`gpu_p1`)
396
+
397
+ To check each specific partition:
398
+
399
+ ```
400
+ sinfo -p gpu_p1 -o "%A"
401
+ sinfo -p gpu_p2 -o "%A"
402
+ sinfo -p gpu_p3 -o "%A"
403
+ sinfo -p gpu_p13 -o "%A"
404
+ ```
405
+
406
+ See the table at the top of this document for which partition is which.
407
+
408
+
409
+ ## Job arrays
410
+
411
+
412
+ To run a sequence of jobs, so that the next slurm job is scheduled as soon as the currently running one is over in 20h we use a job array.
413
+
414
+ Let's start with just 10 such jobs:
415
+
416
+ ```
417
+ sbatch --array=1-10%1 array-test.slurm
418
+ ```
419
+
420
+ `%1` limits the number of simultaneously running tasks from this job array to 1. Without it it will try to run all the jobs at once, which we may want sometimes (in which case remove %1), but when training we need one job at a time.
421
+
422
+ Alternatively, as always this param can be part of the script:
423
+ ```
424
+ #SBATCH --array=1-10%1
425
+ ```
426
+
427
+ Here is toy slurm script, which can be used to see how it works:
428
+
429
+ ```
430
+ #!/bin/bash
431
+ #SBATCH --job-name=array-test
432
+ #SBATCH --nodes=1
433
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
434
+ #SBATCH --cpus-per-task=1 # number of cores per tasks
435
+ #SBATCH --hint=nomultithread # we get physical cores not logical
436
+ #SBATCH --time 00:02:00 # maximum execution time (HH:MM:SS)
437
+ #SBATCH --output=%x-%j.out # output file name
438
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
439
+ #SBATCH --account=six@cpu
440
+ #SBATCH -p prepost
441
+
442
+ echo $SLURM_JOB_ID
443
+ echo "I am job ${SLURM_ARRAY_JOB_ID}_${SLURM_ARRAY_TASK_ID}"
444
+ date
445
+ sleep 10
446
+ date
447
+ ```
448
+
449
+ Note `$SLURM_ARRAY_JOB_ID` is the same as `$SLURM_JOB_ID`, and `$SLURM_ARRAY_TASK_ID` is the index of the job.
450
+
451
+ To see the jobs running:
452
+ ```
453
+ $ squeue -u `whoami` -o "%.10i %.9P %.26j %.8T %.10M %.6D %.20S %R"
454
+ JOBID PARTITION NAME STATE TIME NODES START_TIME NODELIST(REASON)
455
+ 591970_[2- prepost array-test PENDING 0:00 1 2021-07-28T20:01:06 (JobArrayTaskLimit)
456
+ ```
457
+ now job 2 is running.
458
+
459
+ To cancel the whole array, cancel the job id as normal (the number before `_`):
460
+ ```
461
+ scancel 591970
462
+ ```
463
+
464
+ To cancel a specific job:
465
+ ```
466
+ scancel 591970_2
467
+ ```
468
+
469
+ If it's important to have the log-file contain the array id, add `%A_%a`:
470
+
471
+ ```
472
+ #SBATCH --output=%x-%j.%A_%a.log
473
+ ```
474
+
475
+ More details https://slurm.schedmd.com/job_array.html
476
+
477
+
478
+ ## Job Array Trains and their Suspend and Release
479
+
480
+ In this recipe we accomplish 2 things:
481
+
482
+ 1. Allow modification to the next job's slurm script
483
+ 2. Allow suspending and resuming job arrays w/o losing the place in the queue when not being ready to continue running a job
484
+
485
+ SLURM is a very unforgiving environment where a small mistake can cost days of waiting time. But there are strategies to mitigate some of this harshness.
486
+
487
+ SLURM jobs have a concept of "age" in the queue which besides project priority governs when a job gets scheduled to run. If your have just scheduled a new job it has no "age" and will normally be put to run last compared to jobs that have entered the queue earlier. Unless of course this new job comes from a high priority project in which case it'll progress faster.
488
+
489
+ So here is how one can keep the "age" and not lose it when needing to fix something in the running script or for example to switch over to another script.
490
+
491
+ The idea is this:
492
+
493
+ 1. `sbatch` a long job array, e.g., `-array=1-50%1`
494
+ 2. inside the slurm script don't have any code other than `source another-script.slurm` - so now you can modify the target script or symlink to another script before the next job starts
495
+ 3. if you need to stop the job array train - don't cancel it, but suspend it without losing your place in a queue
496
+ 4. when ready to continue - unsuspend the job array - only the time while it was suspended is not counted towards its age, but all the previous age is retained.
497
+
498
+ The only limitation of this recipe is that you can't change the number of nodes, time and hardware and partition constraints once the job array was launched.
499
+
500
+ Here is an example:
501
+
502
+ Create a job script:
503
+
504
+ ```
505
+ $ cat train-64n.slurm
506
+ #!/bin/bash
507
+ #SBATCH --job-name=tr8-104B
508
+ #SBATCH --constraint=v100-32g
509
+ #SBATCH --nodes=64
510
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
511
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
512
+ #SBATCH --hint=nomultithread # we get physical cores not logical
513
+ #SBATCH --gres=gpu:4 # number of gpus
514
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
515
+ #SBATCH --output=%x-%j.out # output file name
516
+ #SBATCH --account=six@gpu
517
+
518
+ source tr8-104B-64.slurm
519
+ ```
520
+ Start it as:
521
+ ```
522
+ sbatch --array=1-50%1 train-64.slurm
523
+ ```
524
+
525
+ Now you can easily edit `tr8-104B-64.slurm` before the next job run and either let the current job finish if it's desired or if you need to abort it, just kill the currently running job, e.g. `1557903_5` (not job array `1557903`) and have the train pick up where it left, but with the edited script.
526
+
527
+ The nice thing is that this requires no changes to the original script (`tr8-104B-64.slurm` in this example), and the latter can still be started on its own.
528
+
529
+ Now, what if something is wrong and you need 10min or 10h to fix something. In this case we suspend the train using:
530
+
531
+ ```
532
+ scontrol hold <jobid>
533
+ ```
534
+
535
+ with <jobid> being either a "normal" job, the id of a job array or the id for a job array step
536
+
537
+ and then when ready to continue release the job:
538
+
539
+ ```
540
+ scontrol release <jobid>
541
+ ```
542
+
543
+
544
+ ## Troubleshooting
545
+
546
+
547
+ ### Kill Switch
548
+
549
+ Since SLURM doesn't allow one user to kill another user's SLURM job or cancel a job array, we need a way to be able to have the program abort itself quickly in situations where one user started a job and has gone away and the group needs to restart it. For example, this is needed when a model gets started by someone in North America, and while they are asleep, someone in Europe may need to handle a problem with the training and can't wait for the submitter of the job to wake up.
550
+
551
+ So we had a kill-switch feature implemented in Megatron-Deepspeed. When a file gets created at a pre-determined location, the software will stop its run. Instead of trying to implement a complex thread that will run only one of the dozens of nodes, we simply added a check in 2 strategic locations:
552
+
553
+ 1. startup - to deal with job arrays
554
+ 2. before each iteration of the train loop - to deal with the current run
555
+
556
+ Since multiple jobs use the same Megatron-Deepspeed repo clone this kill switch can't be hardcoded, and thus each job needs to "arm" the kill switch and must use a unique path so that unintentionally other instances won't get killed.
557
+
558
+ To arm:
559
+
560
+ ```
561
+ python pretrain_gpt.py ... --kill-switch-path /tmp/kill-switch-tr11-200B-exp1
562
+ ```
563
+
564
+ To trigger:
565
+ ```
566
+ touch /tmp/kill-switch-tr11-200B-exp1
567
+ ```
568
+
569
+ To deactivate and let new instances of a job run normally:
570
+
571
+ ```
572
+ rm /tmp/kill-switch-tr11-200B-exp1
573
+ ```
574
+
575
+ ### Mismatching nodes number
576
+
577
+ If the pytorch launcher fails it often means that the number of SLURM nodes and the launcher nodes are mismatching, e.g.:
578
+
579
+ ```
580
+ grep -ir nodes= tr123-test.slurm
581
+ #SBATCH --nodes=40
582
+ NNODES=64
583
+ ```
584
+
585
+ This won't work. They have to match.
586
+
587
+ You can add a sanity check to your script:
588
+
589
+ ```
590
+ #!/bin/bash
591
+ #SBATCH --job-name=test-mismatch
592
+ #SBATCH --constraint=v100-16g
593
+ #SBATCH --nodes=2
594
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
595
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
596
+ #SBATCH --hint=nomultithread # we get physical cores not logical
597
+ #SBATCH --gres=gpu:4 # number of gpus
598
+ #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
599
+ #SBATCH --output=%x-%j.out # output file name
600
+ #SBATCH --account=six@gpu
601
+
602
+ [...]
603
+
604
+ NNODES=2
605
+
606
+ # sanity check for having NNODES and `#SBATCH --nodes` match, assuming you use NNODES variable
607
+ if [ "$NNODES" != "$SLURM_NNODES" ]; then
608
+ echo "Misconfigured script: NNODES=$NNODES != SLURM_NNODES=$SLURM_NNODES"
609
+ exit 1
610
+ fi
611
+
612
+ [...]
613
+ ```
614
+
615
+ or you could just do:
616
+
617
+ ```bash
618
+ #SBATCH --nodes=2
619
+ [...]
620
+ NNODES=$SLURM_NNODES
621
+ ```
622
+
623
+ and then it will always be correct
624
+
625
+
626
+
627
+ ### Find faulty nodes and exclude them
628
+
629
+ Sometimes a node is broken, which prevents one from training, especially since restarting the job often hits the same set of nodes. So one needs to be able to isolate the bad node(s) and exclude it from `sbatch`.
630
+
631
+ To find a faulty node, write a small script that reports back the status of the desired check.
632
+
633
+ For example to test if cuda is available on all nodes:
634
+ ```
635
+ python -c 'import torch, socket; print(f"{socket.gethostname()}: {torch.cuda.is_available()}")'
636
+ ```
637
+
638
+ and to only report the nodes that fail:
639
+ ```
640
+ python -c 'import torch, socket; torch.cuda.is_available() or print(f"Broken node: {socket.gethostname()}") '
641
+ ```
642
+
643
+ Of course, the issue could be different - e.g. gpu can't allocate memory, so change the test script to do a small allocation on cuda. Here is one way:
644
+
645
+ ```
646
+ python -c "import torch; torch.ones(1000,1000).cuda()"
647
+ ```
648
+
649
+ But since we need to run the test script on all nodes and not just the first node, the slurm script needs to run it via `srun`. So our first diagnostics script can be written as:
650
+
651
+ ```
652
+ srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
653
+ ```
654
+
655
+ I slightly changed it, due to an issue with quotes.
656
+
657
+ You can always convert the one liner into a real script and then there is no issue with quotes.
658
+
659
+ ```
660
+ $ cat << EOT >> test-nodes.py
661
+ #!/usr/bin/env python
662
+ import torch, socket
663
+ print(socket.gethostname(), torch.cuda.is_available())
664
+ EOT
665
+ $ chmod a+x ./test-nodes.py
666
+ ```
667
+
668
+ Now let's create a driver slurm script. Use a few minutes time for this test so that SLURM yields it faster:
669
+ ```
670
+ #!/bin/bash
671
+ #SBATCH --job-name=test-nodes
672
+ #SBATCH --partition=gpu_p13
673
+ #SBATCH --nodes=4
674
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
675
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
676
+ #SBATCH --hint=nomultithread # we get physical cores not logical
677
+ #SBATCH --gres=gpu:4 # number of gpus
678
+ #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
679
+ #SBATCH --output=%x-%j.out # output file name
680
+ #SBATCH --account=six@gpu
681
+
682
+ source $six_ALL_CCFRWORK/start-prod
683
+ srun --jobid $SLURM_JOBID ./test-nodes.py
684
+ ```
685
+ Once it runs check the logs to see if any reported `False`, those are the nodes you want to exclude.
686
+
687
+ Now once the faulty node(s) is found, feed it to `sbatch`:
688
+ ```
689
+ sbatch --exclude=hostname1,hostname2 ...
690
+ ```
691
+ and `sbatch` will exclude the bad nodes from the allocation.
692
+
693
+ Additionally please report the faulty nodes to `[email protected]` so that they reboot the machine.
694
+
695
+ Here are a few more situations and how to find the bad nodes in those cases:
696
+
697
+ ### Broken NCCL
698
+
699
+ If you're testing something that requires distributed setup, it's a bit more complex. Here is a slurm script that tests that NCCL works. It sets up NCCL and checks that barrier works:
700
+
701
+ ```
702
+ #!/bin/bash
703
+ #SBATCH --job-name=test-nodes-nccl
704
+ #SBATCH --partition=gpu_p13
705
+ #SBATCH --nodes=2
706
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
707
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
708
+ #SBATCH --hint=nomultithread # we get physical cores not logical
709
+ #SBATCH --gres=gpu:4 # number of gpus
710
+ #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
711
+ #SBATCH --output=%x-%j.out # output file name
712
+ #SBATCH --account=six@gpu
713
+
714
+ source $six_ALL_CCFRWORK/start-prod
715
+
716
+ NNODES=2
717
+
718
+ GPUS_PER_NODE=4
719
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
720
+ MASTER_PORT=6000
721
+
722
+ export LAUNCHER="python -u -m torch.distributed.launch \
723
+ --nproc_per_node $GPUS_PER_NODE \
724
+ --nnodes $NNODES \
725
+ --master_addr $MASTER_ADDR \
726
+ --master_port $MASTER_PORT \
727
+ "
728
+
729
+ export SCRIPT=test-nodes-nccl.py
730
+
731
+ cat << EOT > $SCRIPT
732
+ #!/usr/bin/env python
733
+ import torch.distributed as dist
734
+ import torch
735
+ import socket
736
+ import os
737
+ import fcntl
738
+
739
+ def printflock(*msgs):
740
+ """ print """
741
+ with open(__file__, "r") as fh:
742
+ fcntl.flock(fh, fcntl.LOCK_EX)
743
+ try:
744
+ print(*msgs)
745
+ finally:
746
+ fcntl.flock(fh, fcntl.LOCK_UN)
747
+
748
+ local_rank = int(os.environ["LOCAL_RANK"])
749
+ torch.cuda.set_device(local_rank)
750
+ dist.init_process_group("nccl")
751
+ header = f"{socket.gethostname()}-{local_rank}"
752
+ try:
753
+ dist.barrier()
754
+ printflock(f"{header}: NCCL {torch.cuda.nccl.version()} is OK")
755
+ except:
756
+ printflock(f"{header}: NCCL {torch.cuda.nccl.version()} is broken")
757
+ raise
758
+ EOT
759
+
760
+ echo $LAUNCHER --node_rank $SLURM_PROCID $SCRIPT
761
+
762
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $SCRIPT'
763
+ ```
764
+ The script uses `printflock` to solve the interleaved print outputs issue.
765
+
766
+
767
+ ### GPU Memory Check
768
+
769
+
770
+ This tests if each GPU on the allocated nodes can successfully allocate 77Gb (e.g. to test 80GB A100s) (have to subtract a few GBs for cuda kernels).
771
+
772
+
773
+ ```python
774
+ import torch, os
775
+ import time
776
+ import socket
777
+ hostname = socket.gethostname()
778
+
779
+ local_rank = int(os.environ["LOCAL_RANK"]);
780
+
781
+ gbs = 77
782
+ try:
783
+ torch.ones((gbs*2**28)).cuda(local_rank).contiguous() # alloc on cpu, then move to gpu
784
+ print(f"{local_rank} {hostname} is OK")
785
+ except:
786
+ print(f"{local_rank} {hostname} failed to allocate {gbs}GB DRAM")
787
+ pass
788
+
789
+ time.sleep(5)
790
+
791
+
792
+ ```
793
+
794
+
795
+ ### Broken Network
796
+
797
+ Yet another issue with a node is when its network is broken and other nodes fail to connect to it.
798
+
799
+ You're likely to experience it with an error similar to:
800
+ ```
801
+ work = default_pg.barrier(opts=opts)
802
+ RuntimeError: NCCL error in: /opt/conda/conda-bld/pytorch_1616554793803/work/torch/lib/c10d/ProcessGroupNCCL.cpp:825, unhandled system error, NCCL version 2.7.8
803
+ ncclSystemError: System call (socket, malloc, munmap, etc) failed.
804
+ ```
805
+ Here is how to debug this issue:
806
+
807
+ 1. Add:
808
+ ```
809
+ export NCCL_DEBUG=INFO
810
+ ```
811
+ before the `srun` command and re-run your slurm script.
812
+
813
+ 2. Now study the logs. If you find:
814
+ ```
815
+ r11i6n2:486514:486651 [1] include/socket.h:403 NCCL WARN Connect to 10.148.3.247<56821> failed : Connection refused
816
+ ```
817
+ Let's see which node refuses to accept connections. We get the IP address from the error above and reverse resolve it to its name:
818
+ ```
819
+ nslookup 10.148.3.247
820
+ 247.3.148.10.in-addr.arpa name = r10i6n5.ib0.xa.idris.fr.
821
+ ```
822
+
823
+ Add `--exclude=r10i6n5` to your `sbatch` command and report it to JZ admins.
824
+
825
+
826
+ ### Run py-spy or any other monitor program across all nodes
827
+
828
+ When dealing with hanging, here is how to automatically log `py-spy` traces for each process.
829
+
830
+ Of course, this same process can be used to run some command for all nodes of a given job. i.e. it can be used to run something during the normal run - e.g. dump all the memory usage in each process via `nvidia-smi` or whatever other program is needed to be run.
831
+
832
+
833
+
834
+ ```
835
+ cd ~/prod/code/tr8b-104B/bigscience/train/tr11-200B-ml/
836
+
837
+ salloc --partition=gpu_p5 --constraint=a100 --nodes=40 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
838
+
839
+ bash 200B-n40-bf16-mono.slurm
840
+ ```
841
+
842
+ In another shell get the JOBID for the above `salloc`:
843
+ ```
844
+ squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"
845
+ ```
846
+ adjust jobid per above and the nodes count (XXX: probably can remove `--nodes=40` altogether and rely on `salloc` config):
847
+ ```
848
+ srun --jobid=2180718 --gres=gpu:0 --nodes=40 --tasks-per-node=1 --output=trace-%N.out sh -c 'ps aux | grep python | egrep -v "grep|srun" | grep `whoami` | awk "{print \$2}" | xargs -I {} py-spy dump --native --pid {}' || echo "failed"
849
+ ```
850
+ now all `py-spy` traces go into the `trace-$nodename.out` files under `cwd`.
851
+
852
+ The key is to use `--gres=gpu:0` or otherwise the 2nd `srun` will block waiting for the first one to release the gpus.
853
+
854
+ Also the assumption is that some conda env that has `py-spy` installed got activated in `~/.bashrc`. If yours doesn't already do that, add the instruction to load the env to the above command, before the `py-spy` command - it'll fail to find it otherwise.
855
+
856
+ Don't forget to manually release the allocation when this process is done.
857
+
858
+
859
+ ## TODO
860
+
861
+ absorb more goodies from here: https://ubccr.freshdesk.com/support/solutions/articles/5000686861-how-do-i-check-the-status-of-my-job-s-
bigscience/jz/slurm/hf-ds-gpt2-multi-node.slurm ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=hf_ds_gpt2_multi_node
3
+ #SBATCH --nodes=2
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:4 # number of gpus
8
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
11
+ #SBATCH --account=six@gpu
12
+
13
+ GPUS_PER_NODE=4
14
+ NNODES=$SLURM_JOB_NUM_NODES
15
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
16
+
17
+ set -x -e
18
+
19
+ source $six_ALL_CCFRWORK/start-prod
20
+
21
+ cd $six_ALL_CCFRWORK/code/transformers
22
+ export PYTHONPATH=$six_ALL_CCFRWORK/code/transformers
23
+
24
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
25
+ MASTER_PORT=13370
26
+
27
+ export LAUNCHER=" \
28
+ python -u -m torch.distributed.launch \
29
+ --nproc_per_node $GPUS_PER_NODE \
30
+ --nnodes $NNODES \
31
+ --master_addr $MASTER_ADDR \
32
+ --master_port $MASTER_PORT \
33
+ "
34
+
35
+ MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m
36
+ DATASET="stas/openwebtext-10k"
37
+
38
+ export CMD=" \
39
+ `pwd`/examples/pytorch/language-modeling/run_clm.py \
40
+ --model_name_or_path $MODEL \
41
+ --dataset_name $DATASET \
42
+ --output_dir output_dir \
43
+ --overwrite_output_dir \
44
+ --do_train \
45
+ --do_eval \
46
+ --max_train_samples 1000 \
47
+ --max_eval_samples 200 \
48
+ --per_device_train_batch_size 4 \
49
+ --per_device_eval_batch_size 4 \
50
+ --num_train_epochs 1 \
51
+ --warmup_steps 8 \
52
+ --block_size 64 \
53
+ --fp16 \
54
+ --report_to none \
55
+ --deepspeed tests/deepspeed/ds_config_zero2.json \
56
+ "
57
+
58
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
59
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
60
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
61
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
62
+ export PYTHONPATH=src
63
+ export HF_DATASETS_OFFLINE=1
64
+ export TRANSFORMERS_OFFLINE=1
65
+
66
+ # to debug - add echo (it exits and prints what it would have launched)
67
+ srun bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
bigscience/jz/slurm/meg-gpt2-multi-node.slurm ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=meg_gpt2_multi_node
3
+ #SBATCH --nodes=2
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:4 # number of gpus
8
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
11
+ #SBATCH --account=six@gpu
12
+
13
+ GPUS_PER_NODE=4
14
+ NNODES=$SLURM_JOB_NUM_NODES
15
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
16
+
17
+ set -x -e
18
+
19
+ source $six_ALL_CCFRWORK/start-prod
20
+
21
+ cd $six_ALL_CCFRWORK/code/megatron-lm
22
+
23
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
24
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
25
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
26
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
27
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2
28
+
29
+ MASTER_ADDR=`hostname`
30
+ MASTER_PORT=13370
31
+
32
+ # --train-iters 100000 \
33
+ # --lr-decay-iters 320000 \
34
+ GPT_ARGS=" \
35
+ --num-layers 24 \
36
+ --hidden-size 1024 \
37
+ --num-attention-heads 16 \
38
+ --seq-length 1024 \
39
+ --max-position-embeddings 1024 \
40
+ --micro-batch-size 4 \
41
+ --global-batch-size 16 \
42
+ --lr 0.00015 \
43
+ --lr-decay-style cosine \
44
+ --min-lr 1.0e-5 \
45
+ --finetune \
46
+ --train-iters 1000 \
47
+ --lr-decay-iters 800 \
48
+ --lr-warmup-fraction .01 \
49
+ --weight-decay 1e-2 \
50
+ --clip-grad 1.0 \
51
+ --vocab-file $VOCAB_FILE \
52
+ --merge-file $MERGE_FILE \
53
+ --fp16 \
54
+ --checkpoint-activations \
55
+ "
56
+
57
+ OUTPUT_ARGS=" \
58
+ --log-interval 10 \
59
+ --save-interval 500 \
60
+ --eval-interval 100 \
61
+ --eval-iters 10 \
62
+ "
63
+
64
+ export LAUNCHER="python -u -m torch.distributed.launch \
65
+ --nproc_per_node $GPUS_PER_NODE \
66
+ --nnodes $NNODES \
67
+ --master_addr $MASTER_ADDR \
68
+ --master_port $MASTER_PORT \
69
+ "
70
+
71
+ export CMD=" \
72
+ `pwd`/pretrain_gpt.py \
73
+ --tensor-model-parallel-size 2 \
74
+ --pipeline-model-parallel-size 2 \
75
+ $GPT_ARGS \
76
+ $OUTPUT_ARGS \
77
+ --save $SAVE_CHECKPOINT_PATH \
78
+ --load $CHECKPOINT_PATH \
79
+ --data-path $DATA_PATH \
80
+ --data-impl mmap \
81
+ --split 949,50,1 \
82
+ --distributed-backend nccl \
83
+ "
84
+
85
+ # to debug - add echo (it exits and prints what it would have launched)
86
+ srun bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
bigscience/jz/slurm/multi-node-launcher3.slurm ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This version I haven't quite figured out - the job hangs on the master host - probably misconfigured megatron-lm launching command
2
+ # this script I found here https://www.glue.umd.edu/hpcc/help/software/pytorch.html
3
+ # did some mods to it
4
+
5
+ #!/bin/bash
6
+ #SBATCH --job-name=megatron_multi_node
7
+ #SBATCH --nodes=2
8
+ #SBATCH --ntasks-per-node=4
9
+ #SBATCH --hint=nomultithread
10
+ #SBATCH --gres=gpu:4
11
+ #SBATCH --time 00:30:00
12
+ #SBATCH --output=%x_%j.out
13
+ #SBATCH --output=%x-%j.out
14
+ #SBATCH --account=six@gpu
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-prod
19
+
20
+ cd $six_ALL_CCFRWORK/code/megatron-lm
21
+
22
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release/
23
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
24
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
25
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
26
+ SAVE_CHECKPOINT_PATH=data/checkpoints
27
+
28
+ GPUS_PER_NODE=4
29
+ NNODES=2
30
+
31
+ MASTER_ADDR=`/bin/hostname -s`
32
+ SLAVES=`scontrol show hostnames $SLURM_JOB_NODELIST | grep -v $MASTER_ADDR`
33
+ #Make sure this node (MASTER) comes first
34
+ HOSTLIST="$MASTER_ADDR $SLAVES"
35
+
36
+ MASTER_PORT=12345
37
+ #`ss -tan | awk '{print $4}' | cut -d':' -f2 | \
38
+ # grep "[2-9][0-9]\{3,3\}" | grep -v "[0-9]\{5,5\}" | \
39
+ # sort | uniq | shuf | head -1`
40
+
41
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
42
+
43
+
44
+ # --train-iters 100000 \
45
+ # --lr-decay-iters 320000 \
46
+ GPT_ARGS=" \
47
+ --num-layers 24 \
48
+ --hidden-size 1024 \
49
+ --num-attention-heads 16 \
50
+ --seq-length 1024 \
51
+ --max-position-embeddings 1024 \
52
+ --micro-batch-size 4 \
53
+ --global-batch-size 16 \
54
+ --lr 0.00015 \
55
+ --lr-decay-style cosine \
56
+ --min-lr 1.0e-5 \
57
+ --finetune \
58
+ --train-iters 1000 \
59
+ --lr-decay-iters 800 \
60
+ --lr-warmup-fraction .01 \
61
+ --weight-decay 1e-2 \
62
+ --clip-grad 1.0 \
63
+ --vocab-file $VOCAB_FILE \
64
+ --merge-file $MERGE_FILE \
65
+ --fp16 \
66
+ "
67
+
68
+ OUTPUT_ARGS=" \
69
+ --log-interval 10 \
70
+ --save-interval 500 \
71
+ --eval-interval 100 \
72
+ --eval-iters 10 \
73
+ --checkpoint-activations \
74
+ "
75
+
76
+ #Launch the pytorch processes, first on master (first in $HOSTLIST) then
77
+ #on the slaves
78
+ NODE_RANK=0
79
+ for node in $HOSTLIST; do
80
+ ssh -q $node \
81
+ python -m torch.distributed.launch \
82
+ --nproc_per_node $GPUS_PER_NODE \
83
+ --nnodes $NNODES \
84
+ --node_rank $NODE_RANK \
85
+ --master_addr $MASTER_ADDR \
86
+ --master_port $MASTER_PORT \
87
+ `pwd`/pretrain_gpt.py \
88
+ --tensor-model-parallel-size 2 \
89
+ --pipeline-model-parallel-size 2 \
90
+ $GPT_ARGS \
91
+ $OUTPUT_ARGS \
92
+ --save $SAVE_CHECKPOINT_PATH \
93
+ --load $CHECKPOINT_PATH \
94
+ --data-path $DATA_PATH \
95
+ --data-impl mmap \
96
+ --split 949,50,1 \
97
+ --distributed-backend nccl
98
+ NODE_RANK=$((NODE_RANK+1))
99
+ done
100
+ wait
bigscience/jz/slurm/openwebtext-jsonl-to-meg-gpt2.slurm ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=openwebtext-jsonl-to-meg-gpt2 # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ cd $six_ALL_CCFRWORK/code/megatron-lm
17
+ python tools/preprocess_data.py \
18
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \
19
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2 \
20
+ --vocab data/gpt2-vocab.json \
21
+ --dataset-impl mmap \
22
+ --tokenizer-type GPT2BPETokenizer \
23
+ --merge-file data/gpt2-merges.txt \
24
+ --append-eod \
25
+ --workers 8
bigscience/jz/slurm/openwebtext-jsonl-to-meg-t5.slurm ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=openwebtext-jsonl-to-meg-t5 # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ cd $six_ALL_CCFRWORK/code/megatron-lm
17
+ python tools/preprocess_data.py \
18
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \
19
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-t5 \
20
+ --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt \
21
+ --dataset-impl mmap \
22
+ --tokenizer-type BertWordPieceLowerCase \
23
+ --split-sentences \
24
+ --workers 8
bigscience/jz/slurms_scripts/README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Slurm scripts
2
+
3
+ Mainly here as indicative. Adapt to current traning.
4
+
5
+ - `cpu.slurm` -> for data preprocessing
6
+ - `gpu.slurm` -> arguments are adapted to maximize the gpu mem of the 8 32GB GPU requested
7
+
8
+
9
+
10
+
11
+ We are using common disk spaces for datasets, caches and experiment dumps:
12
+
13
+
14
+ - Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
15
+
16
+ `SCRATCH` disk spaces are wiped regularly (wiping every file that was not accessed in the past 30 days) so we have S3 buckets (https://console.cloud.google.com/storage/browser/bigscience-experiments and https://console.cloud.google.com/storage/browser/bigscience-datasets) as shared storage that is accessible from JZ but from others instances too.
bigscience/jz/slurms_scripts/multi_node_deconlyt5.slurm ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=deconlyt5
3
+ #SBATCH --qos=qos_gpu-t4
4
+ #SBATCH --nodes=32
5
+ #SBATCH --ntasks-per-node=1 # number of MP tasks
6
+ #SBATCH --gres=gpu:8 # number of GPUs per node
7
+ #SBATCH -C v100-32g
8
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
9
+ #SBATCH --hint=nomultithread # we get physical cores not logical
10
+ #SBATCH --time=50:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%j.out # output file name
12
+ #SBATCH --error=%j.out # error file name (same to watch just one file)
13
+ #SBATCH --account=six@gpu
14
+ #SBATCH --mail-type=ALL
15
+
16
+ GPUS_PER_NODE=8
17
+ NNODES=$SLURM_JOB_NUM_NODES
18
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
19
+
20
+ set -x -e
21
+
22
+ source $six_ALL_CCFRWORK/start-prod
23
+
24
+ cd $six_ALL_CCFRWORK/code/transformers
25
+ export PYTHONPATH=$six_ALL_CCFRWORK/code/transformers
26
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
27
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
28
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
29
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
30
+ export PYTHONPATH=src
31
+ export HF_DATASETS_OFFLINE=1
32
+ export TRANSFORMERS_OFFLINE=1
33
+
34
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
35
+ MASTER_PORT=13370
36
+
37
+ export LAUNCHER=" \
38
+ python -u -m torch.distributed.launch \
39
+ --nproc_per_node $GPUS_PER_NODE \
40
+ --nnodes $NNODES \
41
+ --master_addr $MASTER_ADDR \
42
+ --master_port $MASTER_PORT \
43
+ "
44
+
45
+ DATASET=openwebtext
46
+ LOGG_FREQUENCY=125
47
+ SAVE_FREQUENCY=250
48
+ EVAL_FREQUENCY=1000
49
+ SERIALIZATION_DIR=${ALL_CCFRSCRATCH}/experiments/dec_only_t5-xl-multinode
50
+ LOGGING_DIR=${ALL_CCFRSCRATCH}/tensorboard/dec_only_t5-xl-multinode
51
+
52
+ export CMD=" \
53
+ ${SCRATCH}/code/bigscience/jz/scripts/run_clm.py \
54
+ --deepspeed ${six_ALL_CCFRWORK/code/bigscience/jz/configs/deepspeed/ds_zero3.json \
55
+ --model_type decoder_only_t5 \
56
+ --tokenizer_name t5-small \
57
+ --config_name ${six_ALL_CCFRWORK/code/bigscience/jz/configs/dec_only_t5/decoder_only_t5-xl.json \
58
+ --dataset_name ${DATASET} --block_size 1024 \
59
+ --preprocessing_num_workers 76 \
60
+ --do_train --do_eval \
61
+ --max_steps 34000 \
62
+ --per_device_train_batch_size 1 --gradient_accumulation_steps 2 \
63
+ --per_device_eval_batch_size 1 \
64
+ --learning_rate 6e-4 \
65
+ --adam_beta1 0.9 --adam_beta2 0.95 --weight_decay 0.1 \
66
+ --warmup_steps 800 \
67
+ --max_grad_norm 1.0 \
68
+ --output_dir ${SERIALIZATION_DIR} --overwrite_output_dir \
69
+ --report_to tensorboard \
70
+ --logging_strategy steps --logging_first_step --logging_dir ${LOGGING_DIR} --logging_steps ${LOGG_FREQUENCY} \
71
+ --eval_steps ${EVAL_FREQUENCY} --evaluation_strategy steps --max_val_samples 10000 \
72
+ --save_strategy steps --save_steps ${SAVE_FREQUENCY} --save_total_limit 200
73
+ "
74
+
75
+ # to debug - add echo (it exits and prints what it would have launched)
76
+ srun bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
bigscience/jz/slurms_scripts/preprocess_deconlyt5.slurm ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=preprocessdeconlyt5
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --constraint=v100-16g
5
+ #SBATCH --gres=gpu:1 # number of GPUs per node
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --qos=qos_gpu-t4 # t4 enables 100H trainings
9
+ #SBATCH --time=40:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@gpu # It's kind of stupid but we don't have pure CPU allocation with eha.
13
+ #SBATCH --mail-type=ALL
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-prod
18
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
19
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
20
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
21
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
22
+ export HF_DATASETS_OFFLINE=1
23
+ export TRANSFORMERS_OFFLINE=1
24
+
25
+ DATASET=openwebtext
26
+ LOGG_FREQUENCY=500
27
+ SAVE_FREQUENCY=500
28
+ EVAL_FREQUENCY=100000
29
+ SERIALIZATION_DIR=${eha_ALL_CCFRSCRATCH}/experiments/t5openwebtextpreprocess
30
+ LOGGING_DIR=${eha_ALL_CCFRSCRATCH}/tensorboard/t5openwebtextpreprocess
31
+
32
+ python ${six_ALL_CCFRWORK/code/bigscience/jz/scripts/run_clm.py \
33
+ --model_type decoder_only_t5 \
34
+ --tokenizer_name t5-small \
35
+ --config_name ${six_ALL_CCFRWORK/code/bigscience/jz/configs/dec_only_t5/decoder_only_t5-tiny.json \
36
+ --dataset_name ${DATASET} --block_size 1024 \
37
+ --preprocessing_num_workers 76 \
38
+ --do_train --do_eval \
39
+ --max_steps 1 \
40
+ --max_val_samples 10 \
41
+ --per_device_train_batch_size 1 --gradient_accumulation_steps 1 \
42
+ --per_device_eval_batch_size 1 \
43
+ --per_device_eval_batch_size 1 \
44
+ --learning_rate 6e-4 \
45
+ --adam_beta1 0.9 --adam_beta2 0.95 --weight_decay 0.1 \
46
+ --warmup_steps 800 \
47
+ --max_grad_norm 1.0 \
48
+ --output_dir ${SERIALIZATION_DIR} --overwrite_output_dir \
49
+ --report_to tensorboard \
50
+ --logging_strategy steps --logging_first_step --logging_dir ${LOGGING_DIR} --logging_steps ${LOGG_FREQUENCY} \
51
+ --eval_steps ${EVAL_FREQUENCY} --evaluation_strategy steps \
52
+ --save_strategy steps --save_steps ${SAVE_FREQUENCY} --save_total_limit 200
bigscience/jz/tools/diagnostics.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tools for diagnostics of training problems
2
+
3
+
4
+ ## Hanging processes
5
+
6
+
7
+ To track down the culprit of a hung process dumping the stack traces of the training processes.
8
+ ```
9
+ pgrep -f pretrain_gpt | xargs -i /path/to/py-spy dump --pid {} > /networked/path/unique/for/node
10
+ ```
11
+
12
+ Given the dumps of a hung 3D trainer, the node with issues usually get stuck in a different part of the training pipeline. Pipelines with no issues will be waiting at an all-reduce before step, whereas the problematic pipeline usually hangs somewhere in the training microbatches. We often see the pipeline-adjacent processes stuck on a pipe send/recv from the problematic node(s).
13
+
14
+ If `py-spy` isn't already installed, do:
15
+ ```
16
+ pip install py-spy
17
+ ```
18
+
19
+
20
+ ## Malfunctioning GPUs
21
+
22
+ Usually these require a reboot as once a problem happens on a hardware level, the recovery is not possible w/o a reboot.
23
+
24
+ For example if a GPU can't allocate memory because it has a hardware issue, as simple test could be:
25
+
26
+ ```
27
+ python -c "import torch; torch.ones(1).cuda()"
28
+ ```
bigscience/jz/tools/google-cloud-sdk.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # google-cloud-sdk
2
+
3
+ Installed in `$six_ALL_CCFRWORK/lib/google-cloud-sdk` following the linux installation instructions [here](https://cloud.google.com/sdk/docs/install?hl=en).
4
+
5
+ To activate add to your `~/.bashrc`:
6
+
7
+ ```
8
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc'; fi
9
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc'; fi
10
+
11
+ ```
12
+
13
+ and restart `bash`.
14
+
15
+ # Downloading from the `bigscience` bucket
16
+
17
+ Go to the location to download, e.g.:
18
+ `https://console.cloud.google.com/storage/browser/bigscience/mc4_preprocessing?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))`
19
+
20
+ Select dirs to download and click on 'Download` and it will give instructions to download all the dirs using `gsutil`, e.g.:
21
+
22
+ ```
23
+ gsutil -m cp -r \
24
+ "gs://bigscience/mc4_sampled_raw/am/" \
25
+ "gs://bigscience/mc4_sampled_raw/ar/" \
26
+ .
27
+ ```
28
+
29
+ To debug add `-d`.
30
+
31
+ To download a single file, go to the file's page, e.g.:
32
+
33
+ https://console.cloud.google.com/storage/browser/_details/bigscience/mc4_preprocessing/en/train_text_document_1.bin
34
+
35
+ and it'll have the `gsutil URI` entry, in this case: `gs://bigscience/mc4_preprocessing/en/train_text_document_1.bin` which you then feed to `gsutil`:
36
+
37
+ ```
38
+ gsutil -m cp "gs://bigscience/mc4_preprocessing/en/train_text_document_1.bin" .
39
+ ```
40
+
41
+ rsync might be a better way to sync files when they are large and the client keeps on crashing, example:
42
+ ```
43
+ gsutil -m rsync -r "gs://bigscience/mc4_preprocessing" mc4_preprocessing
44
+ ```
45
+ note that `gsutil` keeps track of what it failed to do and tries to re-do it even if you manually fetched a large file and inserted it into the right location, it'll ignore its appearance, will delete it and will attempt to fetch it a new. Not really great `rsync` feature, if you're used to the normal `rsync(1)` tool.
46
+
47
+ ## moving multiple folders
48
+
49
+
50
+ `gsutil mv` is supposed to support globbing, but it doesn't. so here is a poor man's workaround:
51
+
52
+ e.g. to move `"gs://bigscience-backups/tr1-13B/global_step*"` to `"gs://bigscience-backups/tr1-13B/checkpoints-bak/"`
53
+
54
+ ```
55
+ for x in `gsutil ls "gs://bigscience-backups/tr1-13B"`; do y=$(basename -- "$x");echo gsutil mv ${x} gs://bigscience-backups/tr1-13B/checkpoints-bak/${y}; done > cmd
56
+ ```
57
+ edit `cmd` to your liking to remove any folders that shouldn't be moved. surely can be further improved to filter out the wanted pattern, but the principle is clear.
bigscience/jz/tools/monitoring.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Monitoring
2
+
3
+ ## nvtop
4
+
5
+ A nice alternative to `watch -n1 nvidia-smi`
6
+
7
+ ```
8
+ module load nvtop
9
+ nvtop
10
+ ```
bigscience/jz/tools/tensorboard.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tensorboard
2
+
3
+ Jean Zay has a specific procedure to check tensorboard logs detailed [here](http://www.idris.fr/eng/jean-zay/pre-post/jean-zay-jupyter-notebook-eng.html). It essentially boils down to:
4
+ ```bash
5
+ module load tensorflow-gpu/py3/2.3.0 # You can use your own env or other JZ existing envs
6
+ jupyter tensorboard enable --user
7
+ idrjup
8
+ ```
9
+ Please note that you need to connect from the declared IP adress.
10
+
11
+ # Potential errors
12
+
13
+ On Jupyter, if you run into an *Invalid credentials* error, or a *Jupyter tensorboard extension error*, as suggested by Rémi Lacroix, you can remove the `~/.jupyter` folder (command: `rm -rf ~/.jupyter`) and restart the procedure from scratch. In particular, make sure you re-activate the tensorboard plugin for your user: `jupyter tensorboard enable --user`. It generally fixes that kind of problems.
bigscience/tools/README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Instrumenting your run
2
+ We assume you're following the structure of the [arch-and-scaling template](https://github.com/bigscience-workshop/bigscience/blob/master/train/arch-and-scaling-template.slurm)
3
+ Go to https://huggingface.co/ and create two models (currently, under your icon on the top right/new model)
4
+ - <YOUR_MODEL_NAME>-checkpoints
5
+ - <YOUR_MODEL_NAME>-logs
6
+ in your output path (DATA_OUTPUT_PATH in the arch-and-scaling template), `git clone` the logs repo and rename the folder to `logs` (mv `<YOUR_MODEL_NAME>-logs` `logs`)
7
+
8
+ ## How to synch your logs with the hub
9
+ `python tools/hub-sync.py --repo-path <DATA_OUTPUT_PATH>/logs/tensorboard/ --patterns "*tfevent*"`
10
+
11
+ ## How to synch your checkpoints with the hub
12
+ Latest version of what was used in [training 1](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr1-13B-base).
13
+
14
+ Go to your `checkpoints` folder, which should contain a bunch of `global_stepXXXXXX` folders. Open a long running interactive shell:
15
+ ```
16
+ srun -p compil --cpus-per-task=40 -A six@cpu --time=6:00:00 --pty bash
17
+ ```
18
+ then convert:
19
+
20
+ ```
21
+ time find * -maxdepth 0 -type d -name "global_step*" -exec $six_ALL_CCFRWORK/code/Megatron-DeepSpeed/tools/convert_checkpoint/deepspeed_to_transformers.py --input_folder {} --output_folder ../hf-fixed/{} \;
22
+ ```
23
+ to prepare the target dir:
24
+
25
+ ```
26
+ #git -c http.extraHeader="Authorization: Basic " clone https://huggingface.co/bigscience/<YOUR_REPO>/
27
+ cd YOUR_REPO
28
+ huggingface-cli lfs-enable-largefiles .
29
+ git config --unset user.email
30
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
31
+ ```
32
+ We are going to put each checkpoint into its own branch with the same name.
33
+ - If you have added tokenizer files:
34
+
35
+ ```
36
+ mv ../hf_fixed/global_step* .
37
+ time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout -b {} \; -exec mv {}/config.json . \; -exec mv {}/pytorch_model.bin . \; -exec git add config.json pytorch_model.bin <TOKENIZER_FILES> \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \; --exec mv config.json {}/ --exec mv pytorch_model.bin {}/;
38
+ git checkout main
39
+ ```
40
+ - If you just want to add the checkpoints, without tokenizer files:
41
+
42
+ ```
43
+ mv ../hf_fixed/global_step* .
44
+ time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout -b {} \; -exec mv {}/config.json . \; -exec mv {}/pytorch_model.bin . \; -exec git add config.json pytorch_model.bin \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \; --exec mv config.json {}/ --exec mv pytorch_model.bin {}/
45
+ git checkout main
46
+ ```
47
+ - If you want to add tokenizer files later:
48
+
49
+ ```
50
+ time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout {} \; -exec git add <TOKENIZER_FILES> \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \;
51
+ git checkout main
52
+ ```
53
+ ## Fast branch switching in case you messed up and want to fix all your checkpoints
54
+ What you want is `export GIT_LFS_SKIP_SMUDGE=1`.
55
+ Here's an example that changes the activation function in the `config.json` files for each branch:
56
+ ```
57
+ export GIT_LFS_SKIP_SMUDGE=1
58
+ git clone https://huggingface.co/bigscience/tr3e-1B3-c4-checkpoints
59
+ cd tr3e-1B3-c4-checkpoints
60
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
61
+ set +H
62
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
63
+ export GIT_LFS_SKIP_SMUDGE=0
64
+ ```
65
+ And an example that fixes checkpoints in the old format (contained within a `global_step` subfolder, no tokenizer files) to be compatible with `from_pretrained`:
66
+ ```
67
+ export GIT_LFS_SKIP_SMUDGE=1
68
+ my_callback () {
69
+ INDEX=${1}
70
+ BRANCH=${2}
71
+ if [[ $BRANCH == origin/global_step* ]];
72
+ then
73
+ git checkout "${BRANCH:7}"
74
+ git mv "${BRANCH:7}"/* .
75
+ cp ../gpt2_tokenizer/tokenizer.json .
76
+ git add tokenizer.json
77
+ git commit -m "fixed checkpoints to be from_pretrained-compatible"
78
+ git push
79
+ fi
80
+ }
81
+ get_branches () {
82
+ git branch --all --format='%(refname:short)'
83
+ }
84
+ # mapfile -t -C my_callback -c 1 BRANCHES < <( get_branches ) # if you want the branches that were sent to mapfile in a new array as well
85
+ # echo "${BRANCHES[@]}"
86
+ mapfile -t -C my_callback -c 1 < <( get_branches )
87
+ ```
bigscience/tools/fixing_checkpoints_for_from_pretrained.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ my_callback () {
2
+ INDEX=${1}
3
+ BRANCH=${2}
4
+ if [[ $BRANCH == origin/global_step* ]];
5
+ then
6
+ git checkout "${BRANCH:7}"
7
+ git mv "${BRANCH:7}"/* .
8
+ cp ../gpt2_tokenizer/tokenizer.json .
9
+ git add tokenizer.json
10
+ git commit -m "fixed checkpoints to be from_pretrained-compatible"
11
+ git push
12
+ fi
13
+ }
14
+ get_branches () {
15
+ git branch --all --format='%(refname:short)'
16
+ }
17
+ # mapfile -t -C my_callback -c 1 BRANCHES < <( get_branches ) # if you want the branches that were sent to mapfile in a new array as well
18
+ # echo "${BRANCHES[@]}"
19
+
20
+ export GIT_LFS_SKIP_SMUDGE=1
21
+ mapfile -t -C my_callback -c 1 < <( get_branches )
bigscience/tools/fs-watchdog.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ #
4
+ # This tool alerts on the status of the filesystem - when it's getting close to running out of disk space or inodes on various partitions at JZ
5
+ #
6
+ # Example:
7
+ #
8
+ # fs-watchdog.py
9
+ #
10
+
11
+ import argparse
12
+ import re
13
+ import smtplib
14
+ import socket
15
+ import subprocess
16
+ import sys
17
+
18
+ SLURM_GROUP_NAME = "six"
19
+
20
+ # this needs to be an actual email subscribed to [email protected]
21
+ FROM_ADDR = "[email protected]"
22
+ TO_ADDRS = ["[email protected]", "[email protected]"] # wants a list
23
+
24
+ def send_email(subject, body):
25
+ message = f"""\
26
+ From: {FROM_ADDR}
27
+ To: {", ".join(TO_ADDRS)}
28
+ Subject: {subject}
29
+
30
+ {body}
31
+ """
32
+
33
+ server = smtplib.SMTP("localhost")
34
+ #server.set_debuglevel(3) # uncomment if need to debug
35
+ server.sendmail(FROM_ADDR, TO_ADDRS, message)
36
+ server.quit()
37
+
38
+ def send_email_alert(msg):
39
+
40
+ subject = f"[ALERT] JZ filesystem is getting close to being full"
41
+ body = f"""
42
+ ***ALERT: One or more partitions at JZ are getting close to being full! Alert someone at Eng WG***
43
+
44
+ {msg}
45
+
46
+ Please reply to this email once the issue has been taken care of, or if you are in the process of doing that, should new alerts be sent again.
47
+
48
+ If unsure what to do, please post in the #bigscience-engineering slack channel.
49
+
50
+ """
51
+
52
+ send_email(subject, body)
53
+
54
+ def check_running_on_jean_zay():
55
+ fqdn = socket.getfqdn()
56
+ # sometimes it gives fqdn, other times it doesn't, so try to use both patterns
57
+ if not ("idris.fr" in fqdn or "idrsrv" in fqdn):
58
+ raise ValueError("This script relies on JZ's specific environment and won't work elsewhere. "
59
+ f"You're attempting to run it on '{fqdn}'.")
60
+
61
+ def run_cmd(cmd, check=True):
62
+ try:
63
+ git_status = subprocess.run(
64
+ cmd,
65
+ stderr=subprocess.PIPE,
66
+ stdout=subprocess.PIPE,
67
+ check=check,
68
+ encoding="utf-8",
69
+ ).stdout.strip()
70
+ except subprocess.CalledProcessError as exc:
71
+ raise EnvironmentError(exc.stderr)
72
+
73
+ return git_status
74
+
75
+
76
+ def get_args():
77
+ parser = argparse.ArgumentParser()
78
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
79
+ parser.add_argument("--no-email", action='store_true', help="do not email alerts")
80
+ return parser.parse_args()
81
+
82
+ def main():
83
+
84
+ check_running_on_jean_zay()
85
+ args = get_args()
86
+
87
+ alerts = []
88
+ def analyse_partition_bytes(partition_name, partition_path, hard_limit_bytes, alert_bytes_threshold):
89
+ soft_limit_bytes = hard_limit_bytes * alert_bytes_threshold
90
+ cmd = f"du -bs {partition_path}"
91
+ response = run_cmd(cmd.split(), check=False) # du could report partial errors for wrong perms
92
+ size_bytes = int(response.split()[0])
93
+ if args.debug:
94
+ print(f"{partition_name} bytes: {size_bytes}")
95
+
96
+ if size_bytes > soft_limit_bytes:
97
+ current_usage_percent = 100*size_bytes/hard_limit_bytes
98
+ alerts.append(f"{partition_name} is at {current_usage_percent:.2f}% bytes usage ({size_bytes/2**30:.2f}GB/{hard_limit_bytes/2**30:.2f}GB)")
99
+ alerts.append("")
100
+
101
+ def analyse_partition_inodes(partition_name, partition_path, hard_limit_inodes, alert_inodes_threshold):
102
+ soft_limit_inodes = hard_limit_inodes * alert_inodes_threshold
103
+ cmd = f"du -s -BK --inodes {partition_path}"
104
+ response = run_cmd(cmd.split(), check=False) # du could report partial errors for wrong perms
105
+ size_inodes = int(response.split()[0])
106
+ if args.debug:
107
+ print(f"{partition_name} Inodes: {size_inodes}")
108
+
109
+ if size_inodes > soft_limit_inodes:
110
+ current_usage_percent = 100*size_inodes/hard_limit_inodes
111
+ alerts.append(f"{partition_name} is at {current_usage_percent:.2f}% inodes usage ({size_inodes/2**10:.2f}K/{hard_limit_inodes/2**10:.2f}K)")
112
+ alerts.append("")
113
+
114
+ def analyse_partition_idrquota(partition_name, partition_flag, alert_bytes_threshold, alert_inodes_threshold):
115
+ cmd = f"idrquota {partition_flag} -p {SLURM_GROUP_NAME}"
116
+ response = run_cmd(cmd.split())
117
+ match = re.findall(' \(([\d\.]+)%\)', response)
118
+ if match:
119
+ bytes_percent, inodes_percent = [float(x) for x in match]
120
+ else:
121
+ raise ValueError(f"{cmd} failed")
122
+ if args.debug:
123
+ print(f"{partition_name} bytes: {bytes_percent}%")
124
+ print(f"{partition_name} inodes: {inodes_percent}%")
125
+
126
+ msg = []
127
+ if bytes_percent/100 > alert_bytes_threshold:
128
+ msg.append(f"{partition_name} is at {bytes_percent:.2f}% bytes usage")
129
+
130
+ if inodes_percent/100 > alert_inodes_threshold:
131
+ msg.append(f"{partition_name} is at {inodes_percent:.2f}% inodes usage")
132
+
133
+ if len(msg) > 0:
134
+ alerts.extend(msg)
135
+ alerts.append(response)
136
+ alerts.append("")
137
+
138
+ def analyse_shared_disk(partition_name, alert_bytes_threshold):
139
+ partition_name_2_disk = {
140
+ "SCRATCH": "gpfsssd",
141
+ "WORK": "gpfsdswork",
142
+ "STORE": "gpfsdsstore"
143
+ }
144
+ cmd = "df"
145
+ response = run_cmd(cmd.split())
146
+ disk_metas = response.split("\n")
147
+ column_names = disk_metas[0].split()
148
+ disk_meta = [disk_meta_.split() for disk_meta_ in disk_metas if disk_meta_.startswith(partition_name_2_disk[partition_name])][0]
149
+ disk_meta = {column_name: value for column_name, value in zip(column_names, disk_meta)}
150
+
151
+ # default `df` counts uses 1024-byte units, and `1024 == 2 ** 10`
152
+ available_disk_left = int(disk_meta["Available"]) * 2 ** 10
153
+ if available_disk_left < alert_bytes_threshold:
154
+ alerts.append(f"Shared {partition_name} has {available_disk_left/2**40:.2f}TB left")
155
+ alerts.append("")
156
+
157
+ # WORK and STORE partitions stats can be accessed much faster through `idrquota`, and it already
158
+ # includes the quota info
159
+ analyse_partition_idrquota(partition_name="WORK", partition_flag="-w", alert_bytes_threshold=0.85, alert_inodes_threshold=0.85)
160
+ analyse_partition_idrquota(partition_name="STORE", partition_flag="-s", alert_bytes_threshold=0.85, alert_inodes_threshold=0.85)
161
+
162
+ # SCRATCH - check only bytes w/ a hard quota of 400TB - alert on lower threshold than other
163
+ # partitions due to it filling up at a faster rate (dumping huge checkpoints)
164
+ analyse_partition_bytes(partition_name="SCRATCH", partition_path="/gpfsssd/scratch/rech/six/", hard_limit_bytes=400*2**40, alert_bytes_threshold=0.75)
165
+ # Actually SCRATCH is shared with everyone and we should monitor the output of `df -h | grep gpfsssd`
166
+ # Check that there's still 40TB left
167
+ analyse_shared_disk("SCRATCH", 100 * 2 ** 40)
168
+
169
+ # WORKSF - check both bytes and inodes w/ hard quotas of 2TB / 3M
170
+ analyse_partition_bytes(partition_name="WORKSF", partition_path="/gpfsssd/worksf/projects/rech/six/", hard_limit_bytes=2*2**40, alert_bytes_threshold=0.85)
171
+ analyse_partition_inodes(partition_name="WORKSF", partition_path="/gpfsssd/worksf/projects/rech/six/", hard_limit_inodes=3*10**6, alert_inodes_threshold=0.85)
172
+
173
+ if len(alerts) > 0 :
174
+ print(f"[ALERT] JZ filesystem is getting close to being full")
175
+ msg = "\n".join(alerts)
176
+ print(msg)
177
+
178
+ if not args.no_email:
179
+ send_email_alert(msg)
180
+ else:
181
+ print("All partitions are in a good standing")
182
+
183
+ if __name__ == "__main__":
184
+
185
+ main()
bigscience/tools/fs-watchdog.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=fs-watchdog # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --hint=nomultithread # we get physical cores not logical
6
+ #SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS)
7
+ #SBATCH --output=%x-%j.out # output file name
8
+ #SBATCH --partition=compil
9
+ #SBATCH --account=six@cpu
10
+
11
+ set -e
12
+
13
+ echo "START TIME: $(date)"
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+
17
+ echo "running partition watchdog"
18
+
19
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/fs-watchdog.py
22
+
23
+ echo "END TIME: $(date)"
bigscience/tools/hub-auth.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # creates a local auth token file which can then be safely used by other programs without leaking
4
+ # the password in public git
5
+
6
+ import getpass
7
+ import json
8
+ from pathlib import Path
9
+ from huggingface_hub import HfApi
10
+
11
+ HUB_DATA_PATH_SHARED = "/gpfsdswork/projects/rech/six/commun/auth/.hub_info.json"
12
+ #HUB_DATA_PATH = Path(__file__).resolve().parent / ".hub_info.json"
13
+
14
+ username = input("Hub username: ")
15
+ password = getpass.getpass("Hub password: ")
16
+ email = input("Hub email: ")
17
+ auth_token = HfApi().login(username=username, password=password)
18
+
19
+ data = dict(username=username, email=email, auth_token=auth_token)
20
+ #print(data)
21
+
22
+ with open(HUB_DATA_PATH_SHARED, 'w') as f:
23
+ json.dump(data, f)
bigscience/tools/hub-sync.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ #
4
+ # This tool automatically pushes newly added and modified files into the hub repo, if they match the
5
+ # provided one or more patterns.
6
+ #
7
+ # If the program fails to run the first time make sure to run `hub-auth.py` to authenticate and save
8
+ # the token, and user name/email locally which will then be used by this program to alter the config
9
+ # of the target repo to automatically commit as the user you authenticated with. This is needed when
10
+ # pushing as someone else, which is the case here, as we want the software to always work and not
11
+ # depend on the developer's git setup.
12
+ #
13
+ # Example:
14
+ #
15
+ # hub-sync.py --repo-path /hf/Megatron-DeepSpeed-master/output_dir/tensorboard/ --patterns '*tfevents*'
16
+ #
17
+ # multiple patterns can be passed
18
+
19
+ import argparse
20
+ import io
21
+ import json
22
+ import os
23
+ import re
24
+ import subprocess
25
+ import sys
26
+
27
+ from collections import defaultdict
28
+ from fnmatch import fnmatch
29
+ from huggingface_hub import HfApi, HfFolder, Repository
30
+ from pathlib import Path
31
+ from typing import List, Optional, Union
32
+
33
+ # normally using a globally shared hub data, but can override it with the local token if need be
34
+ HUB_DATA_PATH_SHARED = "/gpfsdswork/projects/rech/six/commun/auth/.hub_info.json"
35
+ # for now disabling local, since it leads to outdated auth tokens
36
+ HUB_DATA_PATH_LOCAL = Path(__file__).resolve().parent / ".hub_info.json"
37
+
38
+ HUB_AUTH_TOKEN_PATH = "/gpfsdswork/projects/rech/six/commun/auth/.hub_auth"
39
+
40
+ # map https://git-scm.com/docs/git-status#_short_format
41
+ #
42
+
43
+ # ' ' = unmodified
44
+ # M = modified
45
+ # A = added
46
+ # D = deleted
47
+ # R = renamed
48
+ # C = copied
49
+ # U = updated but unmerged
50
+
51
+ # X Y Meaning
52
+ # -------------------------------------------------
53
+ # [AMD] not updated
54
+ # M [ MD] updated in index
55
+ # A [ MD] added to index
56
+ # D deleted from index
57
+ # R [ MD] renamed in index
58
+ # C [ MD] copied in index
59
+ # [MARC] index and work tree matches
60
+ # [ MARC] M work tree changed since index
61
+ # [ MARC] D deleted in work tree
62
+ # [ D] R renamed in work tree
63
+ # [ D] C copied in work tree
64
+ # -------------------------------------------------
65
+ # D D unmerged, both deleted
66
+ # A U unmerged, added by us
67
+ # U D unmerged, deleted by them
68
+ # U A unmerged, added by them
69
+ # D U unmerged, deleted by us
70
+ # A A unmerged, both added
71
+ # U U unmerged, both modified
72
+ # -------------------------------------------------
73
+ # ? ? untracked
74
+ # ! ! ignored
75
+
76
+ git_status_lookup = {
77
+ "?": "untracked",
78
+ "M": "modified",
79
+ "A": "added",
80
+ "D": "deleted",
81
+ "R": "renamed",
82
+ "C": "copied",
83
+ "U": "updated_unmerged",
84
+ }
85
+
86
+ def get_git_files_by_status(local_dir):
87
+ try:
88
+ git_status = subprocess.run(
89
+ ["git", "status", "-s"],
90
+ stderr=subprocess.PIPE,
91
+ stdout=subprocess.PIPE,
92
+ check=True,
93
+ encoding="utf-8",
94
+ cwd=local_dir,
95
+ ).stdout.strip()
96
+ except subprocess.CalledProcessError as exc:
97
+ raise EnvironmentError(exc.stderr)
98
+
99
+ if len(git_status) == 0:
100
+ return {}
101
+
102
+ file_statuses = [status.strip() for status in git_status.split("\n")]
103
+
104
+ # create a dict of lists for each long key in git_status_lookup
105
+ files = defaultdict(list)
106
+ for l in file_statuses:
107
+ k, v = l.split(' ', 1)
108
+ k = k.strip()[0] # get first column
109
+ # remap to sensible name
110
+ k = git_status_lookup.get(k, "unknown")
111
+ files[k].append(v)
112
+
113
+ #print(files)
114
+
115
+ return files
116
+
117
+
118
+ # XXX: this should be PR'ed into https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/repository.py
119
+ # after adjusting the API self, self.local_dir
120
+ def get_untracked_files(local_dir) -> List[str]:
121
+ """
122
+ Returns a list of untracked files in the working directory
123
+ """
124
+ key = "untracked"
125
+ files_by_status = get_git_files_by_status(local_dir)
126
+ return files_by_status[key] if key in files_by_status else []
127
+
128
+ def get_modified_files(local_dir) -> List[str]:
129
+ """
130
+ Returns a list of modified files in the working directory
131
+ """
132
+ key = "modified"
133
+ files_by_status = get_git_files_by_status(local_dir)
134
+ return files_by_status[key] if key in files_by_status else []
135
+
136
+
137
+ def get_new_and_modified_files(local_dir) -> List[str]:
138
+ """
139
+ Returns a list of untracked and modified files in the working directory recursively.
140
+ It will include relative path for files under sub-dirs that are untracked.
141
+ """
142
+
143
+ try:
144
+ cmd = "git ls-files --modified --others --exclude-standard".split()
145
+ output = subprocess.run(
146
+ cmd,
147
+ stderr=subprocess.PIPE,
148
+ stdout=subprocess.PIPE,
149
+ check=True,
150
+ encoding="utf-8",
151
+ cwd=local_dir,
152
+ ).stdout.strip()
153
+ except subprocess.CalledProcessError as exc:
154
+ raise EnvironmentError(exc.stderr)
155
+
156
+ if len(output) == 0:
157
+ return []
158
+
159
+ return [f.strip() for f in output.split("\n")]
160
+
161
+
162
+ def run_cmd(cmd, local_dir):
163
+ try:
164
+ git_status = subprocess.run(
165
+ cmd,
166
+ stderr=subprocess.PIPE,
167
+ stdout=subprocess.PIPE,
168
+ check=True,
169
+ encoding="utf-8",
170
+ cwd=local_dir,
171
+ ).stdout.strip()
172
+ except subprocess.CalledProcessError as exc:
173
+ raise EnvironmentError(exc.stderr)
174
+
175
+ return git_status
176
+
177
+
178
+ def hub_config_repo(hub_data, local_dir):
179
+
180
+ # if we have the bot user email set, that means we have done this process already
181
+ # but some users don't have any `user.email` set, so recover gracefully if that's the case
182
+ try:
183
+ cmd = f"git config user.email"
184
+ email = run_cmd(cmd.split(), local_dir)
185
+ if len(email) > 0 and email == hub_data['email']:
186
+ return
187
+ except:
188
+ pass
189
+
190
+ print(f"* Detected a new clone. Setting it up for {hub_data['username']}")
191
+
192
+ # to work as another user we need
193
+ # 1. their user.email ( but also user.name is required but can be anything)
194
+ cmd = f"git config user.email {hub_data['email']}"
195
+ run_cmd(cmd.split(), local_dir)
196
+ cmd = f"git config user.name {hub_data['username']}"
197
+ run_cmd(cmd.split(), local_dir)
198
+
199
+ # 2. pre-auth the repo
200
+ # a. get url
201
+ cmd = "git remote get-url origin"
202
+ url = run_cmd(cmd.split(), local_dir)
203
+
204
+ # b. extract just the huggingface.co/app-test-user/test-tensorboard part
205
+ repo_part_url = re.sub(r'https.*(?=huggingface)', '', url, 0, re.M)
206
+ cmd = f"git remote set-url origin --push https://{hub_data['username']}:{hub_data['auth_token']}@{repo_part_url}"
207
+ run_cmd(cmd.split(), local_dir)
208
+
209
+
210
+ def get_hub_data():
211
+ """
212
+ To simplify the setup of different projects we use a common hug info data file at HUB_DATA_PATH_SHARED.
213
+
214
+ But if desired it can be overriden with a local data file at HUB_DATA_PATH_LOCAL
215
+ """
216
+
217
+ # if os.path.isfile(HUB_DATA_PATH_LOCAL):
218
+ # hub_data_path = HUB_DATA_PATH_LOCAL
219
+ if os.path.isfile(HUB_DATA_PATH_SHARED):
220
+ hub_data_path = HUB_DATA_PATH_SHARED
221
+ else:
222
+ raise FileNotFoundError(f"Couldn't locate {HUB_DATA_PATH_SHARED}. "
223
+ "Please run hub-auth.py first")
224
+
225
+ with io.open(hub_data_path, 'r', encoding='utf-8') as f:
226
+ return json.load(f)
227
+
228
+ def get_args():
229
+ parser = argparse.ArgumentParser()
230
+ parser.add_argument("--patterns", nargs='+', default=None, required=True, type=str, help="one or more patterns of files to match to add to the hub - make sure to quote those!")
231
+ parser.add_argument("--repo-path", type=str, required=True, help="path to the already cloned repo")
232
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
233
+ return parser.parse_args()
234
+
235
+ def main():
236
+
237
+ args = get_args()
238
+
239
+ if not (os.path.isdir(args.repo_path) and os.path.isdir(f"{args.repo_path}/.git")):
240
+ raise FileNotFoundError(f"Directory '{args.repo_path}' either doesn't exist or it's not a git clone directory. "
241
+ "Clone the desired repo first to '{args.repo_path}'.")
242
+
243
+ if len(args.patterns) == 0:
244
+ raise ValueError("At least one --pattern is required.")
245
+
246
+ print(f"* Processing {args.repo_path}")
247
+
248
+ if args.debug:
249
+ print(f"Tracking {len(args.patterns)} patterns:")
250
+ print(''.join(f"- {x}\n" for x in args.patterns))
251
+
252
+ hub_data = get_hub_data()
253
+ repo = Repository(args.repo_path)
254
+
255
+ hub_config_repo(hub_data, local_dir=args.repo_path)
256
+
257
+ files_dict = get_git_files_by_status(args.repo_path)
258
+
259
+ # we want untracked and modified files
260
+ uncommitted_files = get_new_and_modified_files(args.repo_path)
261
+
262
+ total_to_commit = 0
263
+ if len(uncommitted_files) > 0:
264
+ print(f"* Found {len(uncommitted_files)} uncommitted files:")
265
+ if args.debug:
266
+ print(''.join(f"- {f}\n" for f in uncommitted_files))
267
+
268
+ for pattern in args.patterns:
269
+
270
+ # *** new and modified files ***
271
+ # check that these are the files that match the pattern passed to git_add
272
+ uncommitted_files_matched = [f for f in uncommitted_files if fnmatch(f, pattern)]
273
+ print(f"* Found {len(uncommitted_files_matched)} uncommitted files matching pattern: {pattern}:")
274
+
275
+ if args.debug:
276
+ print(''.join(f"- {f}\n" for f in uncommitted_files_matched))
277
+
278
+ if len(uncommitted_files_matched) > 0:
279
+ total_to_commit += len(uncommitted_files_matched)
280
+
281
+ # # auto_lfs_track requires huggingface-hub-0.0.15, but transformers forces 0.0.12
282
+ repo.git_add(pattern=pattern) # , auto_lfs_track=True)
283
+ repo.git_commit(commit_message="new data")
284
+
285
+ if total_to_commit:
286
+ print(f"* Pushing {total_to_commit} files")
287
+ repo.git_push()
288
+ print("* Pushed")
289
+ else:
290
+ print("* Detected no new or modified files. Nothing to push.")
291
+
292
+
293
+ if __name__ == "__main__":
294
+
295
+ main()
bigscience/tools/slurm-status.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ #
4
+ # This tool reports on the status of the job - whether it's running or scheduled and various other
5
+ # useful data
6
+ #
7
+ # Example:
8
+ #
9
+ # slurm-status.py --job-name tr1-13B-round3
10
+ #
11
+
12
+ import argparse
13
+ import io
14
+ import json
15
+ import os
16
+ import re
17
+ import shlex
18
+ import smtplib
19
+ import socket
20
+ import subprocess
21
+ import sys
22
+ from datetime import datetime, timedelta
23
+
24
+ SLURM_GROUP_NAME = "six"
25
+
26
+ # this needs to be an actual email subscribed to [email protected]
27
+ FROM_ADDR = "[email protected]"
28
+ TO_ADDRS = ["[email protected]", "[email protected]"] # wants a list
29
+
30
+ def send_email(subject, body):
31
+ message = f"""\
32
+ From: {FROM_ADDR}
33
+ To: {", ".join(TO_ADDRS)}
34
+ Subject: {subject}
35
+
36
+ {body}
37
+ """
38
+
39
+ server = smtplib.SMTP("localhost")
40
+ #server.set_debuglevel(3) # uncomment if need to debug
41
+ server.sendmail(FROM_ADDR, TO_ADDRS, message)
42
+ server.quit()
43
+
44
+ def send_email_alert_job_not_scheduled(job_name):
45
+
46
+ subject = f"[ALERT] {job_name} is neither running nor scheduled to run"
47
+ body = f"""
48
+ ***ALERT: {job_name} is neither RUNNING nor SCHEDULED! Alert someone at Eng WG***
49
+
50
+ Please reply to this email once the issue has been taken care of, or if you are in the process of doing that, should new alerts be sent again.
51
+
52
+ If unsure what to do, please post in the #bigscience-engineering slack channel.
53
+
54
+ *** Useful info ***
55
+
56
+ On call info: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr1-13B-base#on-call
57
+ Training logs: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr1-13B-base#watching-the-training-logs
58
+ Launching training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr1-13B-base#training-scripts
59
+ """
60
+
61
+ send_email(subject, body)
62
+
63
+ def check_running_on_jean_zay():
64
+ fqdn = socket.getfqdn()
65
+ # sometimes it gives fqdn, other times it doesn't, so try to use both patterns
66
+ if not ("idris.fr" in fqdn or "idrsrv" in fqdn):
67
+ raise ValueError("This script relies on JZ's specific environment and won't work elsewhere. "
68
+ f"You're attempting to run it on '{fqdn}'.")
69
+
70
+ def run_cmd(cmd):
71
+ try:
72
+ git_status = subprocess.run(
73
+ cmd,
74
+ stderr=subprocess.PIPE,
75
+ stdout=subprocess.PIPE,
76
+ check=True,
77
+ encoding="utf-8",
78
+ ).stdout.strip()
79
+ except subprocess.CalledProcessError as exc:
80
+ raise EnvironmentError(exc.stderr)
81
+
82
+ return git_status
83
+
84
+
85
+ def get_slurm_group_status():
86
+ # we need to monitor slurm jobs of the whole group six, since the slurm job could be owned by
87
+ # any user in that group
88
+ cmd = f"getent group {SLURM_GROUP_NAME}"
89
+ getent = run_cmd(cmd.split())
90
+ # sample output: six:*:3015222:foo,bar,tar
91
+ usernames = getent.split(':')[-1]
92
+
93
+ # get all the scheduled and running jobs
94
+ # use shlex to split correctly and not on whitespace
95
+ cmd = f'squeue --user={usernames} -o "%.16i %.9P %.40j %.8T %.10M %.6D %.20S %R"'
96
+ data = run_cmd(shlex.split(cmd))
97
+ lines = [line.strip() for line in data.split("\n")]
98
+ return lines
99
+
100
+
101
+ def get_remaining_time(time_str):
102
+ """
103
+ slurm style time_str = "2021-08-06T15:23:46"
104
+ """
105
+
106
+ delta = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S") - datetime.now()
107
+ # round micsecs
108
+ delta -= timedelta(microseconds=delta.microseconds)
109
+ return delta
110
+
111
+
112
+ def get_preamble():
113
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
114
+ # add a string that is easy to grep for:
115
+ return f"[{timestamp}] PULSE:"
116
+
117
+
118
+ def process_job(jobid, partition, name, state, time, nodes, start_time, notes):
119
+
120
+ job_on_partition = f"{jobid} on '{partition}' partition"
121
+ preamble = get_preamble()
122
+
123
+ if state == "RUNNING":
124
+ print(f"{preamble} {name} is running for {time} since {start_time} ({job_on_partition} ({notes})")
125
+ elif state == "PENDING":
126
+ if start_time == "N/A":
127
+ if notes == "(JobArrayTaskLimit)":
128
+ print(f"{preamble} {name} is waiting for the previous Job Array job to finish before scheduling a new one ({job_on_partition})")
129
+ elif notes == "(Dependency)":
130
+ print(f"{preamble} {name} is waiting for the previous job to finish before scheduling a new one using the dependency mechanism ({job_on_partition})")
131
+ else:
132
+ print(f"{preamble} {name} is waiting to be scheduled ({job_on_partition})")
133
+ else:
134
+ remaining_wait_time = get_remaining_time(start_time)
135
+ print(f"{preamble} {name} is scheduled to start in {remaining_wait_time} (at {start_time}) ({job_on_partition})")
136
+
137
+ return True
138
+ else:
139
+ # Check that we don't get some 3rd state
140
+ print(f"{preamble} {name} is unknown - fix me: (at {start_time}) ({job_on_partition}) ({notes})")
141
+
142
+
143
+ def get_args():
144
+ parser = argparse.ArgumentParser()
145
+ parser.add_argument("--job-name", type=str, required=True, help="slurm job name")
146
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
147
+ parser.add_argument("--no-email", action='store_true', help="do not email alerts")
148
+ return parser.parse_args()
149
+
150
+
151
+ def main():
152
+
153
+ check_running_on_jean_zay()
154
+
155
+ args = get_args()
156
+ status_lines = get_slurm_group_status()
157
+
158
+ in_the_system = False
159
+ for l in status_lines:
160
+ #print(f"l=[{l}]")
161
+
162
+ # XXX: apparently some jobs can be run w/o name and break the split() call, so match our
163
+ # name first and then split
164
+ if args.job_name in l:
165
+ jobid, partition, name, state, time, nodes, start_time, notes = l.split(None, 7)
166
+ #print("-".join([jobid, partition, name, state, time, nodes, start_time, notes]))
167
+ # XXX: add support for regex matching so partial name can be provided
168
+ if name == args.job_name:
169
+ in_the_system = True
170
+ process_job(jobid, partition, name, state, time, nodes, start_time, notes)
171
+
172
+ if not in_the_system:
173
+ preamble = get_preamble()
174
+ print(f"{preamble} ***ALERT: {args.job_name} is not RUNNING or SCHEDULED! Alert someone at Eng WG***")
175
+ if not args.no_email:
176
+ send_email_alert_job_not_scheduled(args.job_name)
177
+
178
+
179
+ if __name__ == "__main__":
180
+
181
+ main()
bigscience/train/tr1-13B-base/README.md ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Train 1 - 13B - unmodified Megatron gpt2 - baseline
2
+
3
+
4
+ ## Task
5
+
6
+ Auto-regressive objective using regular Megatron-LM GPT2 language model
7
+
8
+ ## Environment
9
+
10
+ To launch the environment use [start-tr1-13B](./start-tr1-13B)
11
+
12
+ ```
13
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
14
+ ```
15
+
16
+ We are using the following branches specific to this training:
17
+
18
+ - `$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B` a frozen `tr1-13B` branch - can cherry pick from `main` if need be.
19
+ - `$six_ALL_CCFRWORK/code/tr1-13B/DeepSpeed-big-science` - a mostly frozen `big-science` branch - under Deepspeed's team control - so it may also require a specific SHA if something gets broken upstream.
20
+
21
+
22
+ How the environment was built:
23
+ ```
24
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
25
+
26
+ conda create -y -n tr1-13B python=3.8
27
+ conda activate tr1-13B
28
+ conda install pytorch==1.8.1 torchvision cudatoolkit=10.2 -c pytorch -y
29
+ pip install deepspeed
30
+ pip install tensorboard
31
+
32
+ mkdir $six_ALL_CCFRWORK/code/tr1-13B
33
+
34
+ cd $six_ALL_CCFRWORK/code/tr1-13B
35
+ git clone https://github.com/bigscience-workshop/bigscience
36
+
37
+ cd $six_ALL_CCFRWORK/code/tr1-13B
38
+ git clone https://github.com/huggingface/transformers
39
+ cd transformers
40
+ pip install -e .
41
+
42
+ cd $six_ALL_CCFRWORK/code/tr1-13B
43
+ git clone https://github.com/bigscience-workshop/Megatron-DeepSpeed Megatron-DeepSpeed-tr1-13B
44
+ cd Megatron-DeepSpeed-tr1-13B
45
+ git checkout tr1-13B
46
+ pip install -r requirements.txt
47
+ pip install -e .
48
+ mkdir data
49
+ cd data
50
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json
51
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt
52
+ ```
53
+
54
+ `apex` and `deepspeed` build require an instance w/ beefy cpu and internet (unless cloned beforehand), so continue on the `prepost` partition:
55
+
56
+ ```
57
+ ssh jean-zay-pp
58
+ conda activate tr1-13B
59
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
60
+
61
+ cd $six_ALL_CCFRWORK/code/tr1-13B
62
+ git clone https://github.com/microsoft/DeepSpeed DeepSpeed-big-science
63
+ cd DeepSpeed-big-science
64
+ git checkout big-science
65
+ rm -rf build
66
+ TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
67
+
68
+ cd $six_ALL_CCFRWORK/code/tr1-13B
69
+ git clone https://github.com/NVIDIA/apex
70
+ cd apex
71
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
72
+
73
+ #cp $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B ...
74
+
75
+ ```
76
+
77
+
78
+ ## Architecture
79
+
80
+ Config:
81
+
82
+ ```
83
+ NLAYERS=40
84
+ NHIDDEN=5120
85
+ NHEADS=32
86
+ FFN_HIDDEN_SIZE=20480
87
+
88
+ # --ffn_hidden_size $FFN_HIDDEN_SIZE \
89
+ GPT_ARGS=" \
90
+ --num-layers $NLAYERS \
91
+ --hidden-size $NHIDDEN \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --num-attention-heads $NHEADS \
94
+ [...]
95
+ "
96
+ ```
97
+
98
+ Sanity check:
99
+ ```
100
+ $ VOCAB_SIZE=50257 NLAYERS=40 NHIDDEN=5120 NHEADS=32 SEQ_LEN=2048; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
101
+ Model size: 13B
102
+ ```
103
+
104
+
105
+
106
+ ## Sequence Length
107
+
108
+ Default Megatron-LM language model with 2048 tokens sequence length
109
+
110
+ ```
111
+ SEQ_LEN=2048
112
+
113
+ --seq-length $SEQ_LEN \
114
+ --max-position-embeddings $SEQ_LEN \
115
+
116
+ ```
117
+
118
+
119
+ ## Global batch size
120
+
121
+ GBS = Global Batch Size
122
+
123
+ Use a schedule:
124
+
125
+ - start from 32k tokens (gbs=16)
126
+ - increase linearly to 2048k (gbs=1024) over 5M samples (for a total of ~10B tokens / 5k steps)
127
+ - then continue at 2048k (gbs=1024) for 145M samples (290B tokens / 145K steps)
128
+
129
+ Total: 300B tokens (150K steps)
130
+
131
+ Note: the training script wasn't updated when we flipped seqlen/gbs from 1024/2048 to 2048/1024, so we are currently planning to train for 300K steps (samples) and 600B tokens. But since longer doesn't impact anything, we will just stop at half the time. I updated the document to use the right 150K number so we don't repeat this mistake in the next training.
132
+
133
+ syntax:
134
+ ```
135
+ --rampup-batch-size <start batch size> <batch size increment> <ramp-up samples>
136
+ ```
137
+
138
+ At seqlen 2048 (1k tokens is bs=1), we get:
139
+
140
+ ```
141
+ --rampup-batch-size 16 16 5_000_000 \
142
+ --global-batch-size 1024 \
143
+ ```
144
+
145
+ This means it will start with global batch size 16 and over 63 (`(1024-16)/16`) intervals will increase the
146
+ batch size by 16 linearly to 1024.
147
+
148
+ 79365 (`5_000_000/63`) is the number of samples before the next GBS increment. That is we run at GBS=16 for 79365 samples, or 4960 steps (`79365/16`). Then we run at GBS=32 for 79365 samples, or 2480 steps. Then 1653 steps at GBS=48, 1240 at GBS=64, etc....
149
+
150
+ Notes:
151
+ * `--rampup-batch-size` requires the use of `--train-samples` and can't be used with `--train-iters`.
152
+ * global batch size has to be divisible by micro-batch-size * DP_SIZE
153
+
154
+ Important: the software will fail if GBS is not divisible by `MBS * DP_SIZE`.
155
+ Though Jared's recommendation is to use MBS=1 and then it's much easier to match GBS/DP_SIZE even at GBS=16.
156
+
157
+ `DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE)`
158
+
159
+ Since the increments are in GBS=16, we can do only DP_SIZE=16, which means that at most we can use 32 nodes (`32*4/(4*2)=16`).
160
+
161
+ Once GBS reaches 1024, we can use up to 8192 GPUs (1024*2*4), so we will be able to switch to 64 nodes or may be even 128 nodes (4 gpus each). We can't use any number of nodes between 64 and 128 though, because the number has to be 2**X. So 96 nodes won't work, because it has a multiplier of 3 there.
162
+
163
+
164
+
165
+
166
+ ## Checkpoints
167
+
168
+ We need the checkpoints:
169
+
170
+ 1. in order to be able to resume the training when the training is prematurely stopped for whatever reason.
171
+ 2. In addition a special saving schedule has been requested by the interpretabity group.
172
+
173
+ Because there are 3 different schedules, and Megatron-LM has only fixed checkpoint saving schedule, we will need 3 different run scripts, to be launched in a sequence, each starting once the previous has finished.
174
+
175
+ 1. steps 1-100 - 10 checkpoints, interval 10 steps
176
+ 2. steps 101-1000 - 50 checkpoints, interval 18 steps
177
+ 3. steps 1001-150K - 100+ checkpoints, interval 1500 steps
178
+ 4. if still needed, can continue with schedule 3
179
+
180
+ note: the interoperability study doesn't care for checkpoints in the range of 1k-20k, so we only save those to be able to restart the training.
181
+
182
+ It'd have been
183
+ ```
184
+ ROUND=1
185
+ if [[ ${ROUND} == 1 ]]; then TRAIN_ITER=100 SAVE_INTERVAL=10
186
+ elif [[ ${ROUND} == 2 ]]; then TRAIN_ITER=1000 SAVE_INTERVAL=18
187
+ elif [[ ${ROUND} == 3 ]]; then TRAIN_ITER=150000 SAVE_INTERVAL=1500
188
+ else echo "invalid ROUND: $ROUND"
189
+ fi
190
+ --train-iter $TRAIN_ITER \
191
+ --save-interval $SAVE_INTERVAL \
192
+ ```
193
+
194
+ Unfortunately, `--rampup-batch-size` can't work with `--train-iter` and we have to use `--train-samples` instead. It has to be fixed through all of trainings and can't be changed, otherwise resume from checkpoint will break.
195
+
196
+ So the only thing left is to use `--exit-interval` which is in steps.
197
+
198
+ Which gives us the three rounds:
199
+
200
+ ```
201
+ ROUND=1
202
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
203
+ elif [[ ${ROUND} == 2 ]]; then EXIT_INTERVAL=900 SAVE_INTERVAL=18
204
+ elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500
205
+ else echo "invalid ROUND: $ROUND"
206
+ fi
207
+
208
+ --train-samples 150_000_000 \
209
+ --exit-interval $EXIT_INTERVAL \
210
+ --save-interval $SAVE_INTERVAL \
211
+ ```
212
+
213
+ `--exit-interval` counts steps only for the current run, regardless of previous steps. So to stop at effective step 1000, the second round we tell it to exit at 900 (the first round did the first 100).
214
+
215
+ And unfortunately, this proved to be not supported by Megatron-LM either at the moment. There are a few possible ways to approach this:
216
+
217
+ 1. One approach is to simply use 3 independent trainings, while using the same `--seed ` and just have `--exit_interval` as above. Though after each training moving the checkpoints away.
218
+
219
+ 2.
220
+ XXX: Also megatron code could be extended to implement `--exit-samples` - so sample-based exit strategy
221
+
222
+ 3. Yet another approach is to do it manually. Kill the training after 100, and then restart and kill after 900 iterations, while changing the save interval, and manually fixing up the `checkpoints/latest` to point to the correct checkpoint - since the manual killing might have a few extra checkpoints. So the recipe to follow:
223
+
224
+ ```
225
+ ROUND=1
226
+ if [[ ${ROUND} == 1 ]]; then SAVE_INTERVAL=10
227
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=18
228
+ elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500
229
+ else echo "invalid ROUND: $ROUND"
230
+ fi
231
+
232
+ --train-samples 150_000_000 \
233
+ --save-interval $SAVE_INTERVAL \
234
+ ```
235
+
236
+ (could also do it with 3 parallel jobs by using the same seed!)
237
+
238
+ ```
239
+ --seed 42
240
+ ```
241
+
242
+ Therefore do this manually:
243
+
244
+ 0.
245
+ * delete the old checkpoints `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints`
246
+
247
+ 1.
248
+
249
+ * set to `ROUND=1`
250
+ * `sbatch tr1-13B-round1.slurm`
251
+ * run for 100+ steps
252
+ * scancel the job
253
+ * clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 100
254
+ * make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 100
255
+
256
+
257
+ 2.
258
+
259
+ * set to `ROUND=2`
260
+ * `sbatch tr1-13B-round1.slurm`
261
+ * run for the additional 900+ steps (it's incremental, so the script already knows it started at 100)
262
+ * scancel the job
263
+ * clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 1000
264
+ * make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 1000
265
+
266
+
267
+ 3.
268
+
269
+ * set to `ROUND=3`
270
+ * `sbatch tr1-13B-round1.slurm`
271
+ * run normally
272
+
273
+
274
+
275
+ Because it'd be potentially too demanding to export TBs of data and the intended users might not be even able to download all that data, most likely we will need to run the interpretabity post-analysis experiments on JZ and send the reports to those who need the reports.
276
+
277
+ Megatron-LM resumes from the most recent checkpoint by default. Does it need the exact path or does it auto-discover the latest checkpoint by default.
278
+
279
+ ```
280
+ --load path_to_check_point \
281
+ ```
282
+
283
+
284
+ Remi suggests 100TB on SCRATCH shouldn't be a problem.
285
+
286
+
287
+
288
+
289
+
290
+ ## Optimizer
291
+
292
+ - AdamW,  β1=0.9, β2=0.999 eps=1e−8
293
+ - learning rate:
294
+ * peak=1e-4
295
+ * warmup over 2000 steps
296
+ * cosine decay for learning rate down to 10% of its value, over 260B tokens (after 260 billion tokens, training continues at 10% of the original learning rate)
297
+ - clipping by global norm of 1 (as in GPT-3)
298
+ - weight decay of 0.1
299
+
300
+ We need lr-decay in samples, so tokens2samples = 260B / 2048 = 126_953_125
301
+
302
+ We need lr-warmup in samples, so doing the math again as in checkpoints
303
+
304
+ 2000=160*12+80
305
+
306
+ so we will get to 2000 in 216_320 samples `16*160*12*(12+1)/2+16*13*80`
307
+
308
+
309
+
310
+ ```
311
+ --optimizer adam \
312
+ --adam-beta1 0.9 \
313
+ --adam-beta2 0.999 \
314
+ --adam-eps 1e-8 \
315
+ --lr 1e-4 \
316
+ --min-lr 1e-5 \
317
+ --lr-decay-style cosine \
318
+ --lr-decay-samples 126_953_125 \
319
+ --lr-warmup-samples 216_320 \
320
+ --clip-grad 1.0 \
321
+ --weight-decay 1e-1 \
322
+ ```
323
+
324
+
325
+ ## Logging
326
+
327
+
328
+ For now enable all tensorboard features, later we might decide to not log it all.
329
+
330
+ We are logging:
331
+
332
+ - lr (enabled by default)
333
+ - bs (enabled)
334
+ - loss (always)
335
+ - loss-scale (log_loss) (enabled by default)
336
+ - grad-norm (always)
337
+ - num-zeros (always)
338
+ - param-norm (always)
339
+ - timers (enabled)
340
+ - validation loss (always)
341
+ - validation ppl (perplexity) (enabled)
342
+
343
+ almost all of these are also logged as a comparison to consumed_train_samples
344
+
345
+ XXX: nice to have:
346
+ - throughput - Tflops/gpu or tokens
347
+
348
+
349
+ **Tensorboard config**:
350
+
351
+ ```
352
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
353
+
354
+ --tensorboard-dir $TENSORBOARD_PATH \
355
+ --tensorboard-queue-size 5 \
356
+ --log-timers-to-tensorboard \
357
+ --log-batch-size-to-tensorboard \
358
+ --log-validation-ppl-to-tensorboard \
359
+ ```
360
+
361
+ **CodeCarbon config**:
362
+
363
+ ```
364
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
365
+
366
+ --codecarbon-dir $CODECARBON_PATH \
367
+ ```
368
+
369
+
370
+
371
+ **Training logs**
372
+
373
+ All training logs are piped into `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt`.
374
+
375
+
376
+ ## Exporting
377
+
378
+ Before starting training create cloned git repos to where output data will go.
379
+
380
+ The last 4 should all be git repo clones
381
+ ```
382
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
383
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
384
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
385
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
386
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
387
+ ```
388
+
389
+ I created 4 repos at https://huggingface.co/bigscience/ and now we can clone those as the dirs data will be output into:
390
+
391
+ ```
392
+ cd $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
393
+ git clone https://huggingface.co/bigscience/tr1-13B-checkpoints checkpoints
394
+ git clone https://huggingface.co/bigscience/tr1-13B-tensorboard tensorboard
395
+ git clone https://huggingface.co/bigscience/tr1-13B-codecarbon codecarbon
396
+ git clone https://huggingface.co/bigscience/tr1-13B-logs logs
397
+ ```
398
+
399
+ If this is your first time running git-lfs on this system, you need to init it once:
400
+ ```
401
+ module load git-lfs
402
+ git lfs install
403
+ ```
404
+
405
+ Most of the data types we are going to sync will be large or huge, and most are already lfs-tracked by default, so no setup is required. Except our log file which too can grow large, so we need to set it up:
406
+
407
+ ```
408
+ cd logs
409
+ git-lfs track *.txt
410
+ git commit -m "large text files" .gitattributes
411
+ git push
412
+ ```
413
+
414
+ ### Cronjobs to auto-sync the hub
415
+
416
+ Now we just need a cronjob to automatically do for each type of data to export:
417
+
418
+ ```
419
+ cd checkpoints
420
+ git add */*.pt
421
+ git commit -am "new data"
422
+ git push
423
+ ```
424
+
425
+ This job is performed automatically by `hub-sync.py`. For full details see: [Automated upload to the hub](../../data/export.md#automated-upload-to-the-hub).
426
+
427
+ **Weights checkpoints**
428
+
429
+ Currently, we aren't exporting checkpoints.
430
+
431
+ **Tensorboard**
432
+
433
+ Here is the slurm script to sync the tensorboard data: [tr1-13B-hub-sync-tensorboard.slurm](./tr1-13B-hub-sync-tensorboard.slurm)
434
+
435
+ **CodeCarbon**
436
+
437
+ Currently the feature is not enabled, so there is nothing to log.
438
+
439
+ **Log of logs**
440
+
441
+ Let's also create a log of logs. We will pipe all the logs in there and also the various status reports - e.g. while SLURM is queued the training and it's not running.
442
+
443
+ Here is the slurm script to sync the raw logs data: [tr1-13B-hub-sync-logs.slurm](./tr1-13B-hub-sync-logs.slurm)
444
+
445
+ The main source of logs is the training scripts. The logs are gathered via
446
+ ```
447
+ $CMD ... 2>&1 | tee -a $LOGS_PATH/main_log.txt
448
+ ```
449
+ in the training slurm script.
450
+
451
+ XXX: we could also add various other diagnostics appended to the main log file. e.g. shared memory, etc.
452
+
453
+
454
+
455
+
456
+ ## Deepspeed config
457
+
458
+ Using Deepspeed's activation checkpointing to use a lot less GPU memory
459
+
460
+ ```
461
+ --deepspeed-activation-checkpointing \
462
+ ```
463
+
464
+ Possible extras:
465
+
466
+ - Enabling `"contiguous_memory_optimization": true,` can help to reduce memory fragmentation, but it requiressetting `number_checkpoints`. This should be set to be equal to number of transformer blocks per pipeline stage times the number of pipeline parallel stage. Samyam says: Full disclaimer: I have only used this with ZeRO but not with pipeline parallelism. But by setting the number_checkpoints as described, it should work for PP too. The benefit of using it is usually only apparent when running very close to the memory limit.
467
+
468
+
469
+
470
+ ## Dataset
471
+
472
+ - Full 304.2M version (529GB) : `$six_ALL_CCFRWORK/datasets-custom/oscar-en`
473
+ - Tiny 10K version (56M): `$six_ALL_CCFRWORK/datasets-custom/oscar-en-10k`
474
+
475
+ We are using English-only subset of [the OSCAR dataset](https://huggingface.co/datasets/oscar) with full documents (*not* individual sentences).
476
+
477
+ We have about 300M records in 1.2TB of jsonl data (about 3/4 of which are smaller than 1K tokens), which amounts to about 280B tokens (estimated at about 4.5chars/word).
478
+
479
+ Megatron's preprocessing tool indexes everything and then at training time the Dataloader serves chunks of the desired fixed sequence length (2048 tokens in our case).
480
+
481
+ For more information on the pre-processing process and various estimations see: [OSCAR](../../data/oscar/README.md).
482
+
483
+
484
+
485
+ ## Dealing with 20h SLURM limit
486
+
487
+ First, let's ensure we save a checkpoint just before SLURM kills the job
488
+
489
+ Let's try 19:50 1190=60*20-10
490
+
491
+ ```
492
+ --exit-duration-in-mins 1190 \
493
+ ```
494
+
495
+ For the bigger models 10min might not be long enoug to finish an iteration (assume the limit hits right as one starts) and write out a checkpoint.
496
+
497
+ Then we need to figure out how to schedule the next slurm job as soon as the currently running one is over in 20h.
498
+
499
+ We will use job arrays, to solve this. Let's start with just 10 such jobs:
500
+
501
+ ```
502
+ sbatch --array=1-10%1 tr1-13B-round1.slurm
503
+ ```
504
+
505
+ `%1` limits the number of simultaneously running tasks from this job array to 1, since we want them to run in a sequence.
506
+
507
+ Alternatively, as always this param can be part of the script:
508
+ ```
509
+ #SBATCH --array=1-10%1
510
+ ```
511
+
512
+ ## Crontab
513
+
514
+ JZ doesn't have a user-accessible crontab facility, so we have to emulate it with a self-restarting slurm job that polls some dir for new jobs to run. For full details on how this works please see [Crontab Jobs](../../jz/crontab/).
515
+
516
+ But to use it simply put your slurm scripts into either:
517
+ ```
518
+ $six_ALL_CCFRWORK/cron/cron.hourly
519
+ $six_ALL_CCFRWORK/cron/cron.daily
520
+ ```
521
+
522
+ and the jobs will be run on hourly or daily basis. This is similar to Linux's `/etc/cron.*` setup. Except the jobs aren't guaranteed to start on the hour, but should be around that time.
523
+
524
+ Currently we have:
525
+
526
+ ```
527
+ ls -1 $six_ALL_CCFRWORK/cron/cron.hourly/*slurm
528
+ tr1-13B-hub-sync-logs.slurm
529
+ tr1-13B-hub-sync-tensorboard.slurm
530
+ tr1-13B-slurm-status.slurm
531
+ ```
532
+
533
+ The first 2 sync log files to the hub and the last one monitors the health of the training and alerts of any problems.
534
+
535
+
536
+ ## Estimated run time
537
+
538
+ Best case scenario when training 24/7 on 64 nodes with 4 gpus each:
539
+ ```
540
+ $ python -c 'Btokens=300; Bmodel=13; n_gpus=256; Tflops=45; \
541
+ print(f"{Btokens*1e9*8*Bmodel*1e9/(n_gpus*Tflops*1e12*60*60*24):0.2f} days")'
542
+ 31.35 days
543
+ ```
544
+
545
+ You will find the detailed explanation of the estimation formula [here](../../math/README.md#estimate-model-training-time).
546
+
547
+ The training was much slower in the first 10k steps because of the batch size rampup, where the pipeline was very inefficient.
548
+
549
+ And then we were only able to use 20h slurm jobs, with unpredictable gaps of wait time in between (1-30 hours!), so it's impossible to predict when the finish line will be finished.
550
+
551
+
552
+ ## Memory usage
553
+
554
+ During training currently we use 256GB (8x 32GB gpus) per each full replica (TP=2 + PP=4), the rest are ZeRO-DP. So if we throw x times more GPUs we just speed things up by having more 2-node replicas.
555
+ The required memory breakdown:
556
+
557
+ 1. 4B for fp32 weights
558
+ 2. 2B for fp16 weights
559
+ 3. 8B for optimizer states.
560
+ 4. 4B for gradients (we don't save these in the checkpoint)
561
+ 5. plus memory for activations and temps, which total majorly depends on the seqlen and mini batch size - and since we use activation checkpointing this memory need is quite small.
562
+
563
+ Total: 234GB (18*13) plus activations and temps memory. So we are close to 256GB here.
564
+
565
+ Activation memory would have been much much bigger if it weren't for activation checkpointing.
566
+
567
+
568
+ ## Checkpoint Back Up
569
+
570
+ To copy multiple checkpoints excluding optimizer states. First move the desired checkpoints to back up to some dedicated dir, e.g. `tr1-13B-round2/checkpoints`, then copy just the needed files:
571
+
572
+ ```
573
+ srun -p prepost -A six@cpu --time=20:00:00 --pty bash
574
+ mkdir to-upload
575
+ rsync -acvhu --no-compress --info=progress2 --exclude "zero*pt" tr1-13B-round2/checkpoints/ to-upload
576
+ ```
577
+
578
+ then to back those up:
579
+
580
+ ```
581
+ cp -arun $six_ALL_CCFRSCRATCH/checkpoints/to-upload/* $six_ALL_CCFRSTORE/checkpoints/tr1-13B
582
+ ```
583
+
584
+
585
+ **Final checkpoint with optimizer states:**
586
+
587
+ ```
588
+ mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim
589
+ cp -arun $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/global_step168000 $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/
590
+ ```
591
+
592
+ This is the final checkpoint, that can be resumed from at will:
593
+
594
+ ```
595
+ $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/global_step168000
596
+ ```
597
+
598
+ Here is the corresponding log:
599
+ ```
600
+ iteration 168000/ 311541 | consumed samples: 153013584 | elapsed time per iteration (ms): 13248.2 | learning rate: 1.000E-05 | global batch size: 1024 | lm loss: 2.376641E+00 | loss scale: 131072.0 | grad norm: 19767.052 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
601
+ time (ms)
602
+ --------------------------------------------------------------------------------------------------
603
+ validation loss at iteration 168000 | lm loss value: 2.342049E+00 | lm loss PPL: 1.040253E+01 |
604
+ --------------------------------------------------------------------------------------------------
605
+ ```
606
+
607
+ ## Checkpoint Conversion and Upload
608
+
609
+
610
+ **Important**: there was a bug in the converter on the transformers side, so we need this fix:
611
+ https://github.com/huggingface/transformers/pull/13735
612
+ if it's not merged yet, install this branch first. If it's already merged just make sure you use `transformers@master` - XXX: I will update the script to require a specific version once a new version of transformers is released.
613
+
614
+
615
+ Open a long running interactive shell:
616
+ ```
617
+ srun -p compil --cpus-per-task=40 -A six@cpu --time=6:00:00 --pty bash
618
+ ```
619
+ then convert:
620
+
621
+ ```
622
+ cd $six_ALL_CCFRSCRATCH/checkpoints/to-upload
623
+ time find * -maxdepth 0 -type d -name "global_step*" -exec $six_ALL_CCFRWORK/code/Megatron-DeepSpeed/tools/convert_checkpoint/deepspeed_to_transformers.py --input_folder {} --output_folder hf-fixed/{} \;
624
+ ```
625
+
626
+ It takes about 100sec per 26GB checkpoint.
627
+
628
+ The results will be all under `hf/`.
629
+
630
+ Now to uploading to the hub.
631
+
632
+ Prepare the target dir:
633
+
634
+ ```
635
+ #git -c http.extraHeader="Authorization: Basic " clone https://huggingface.co/bigscience/tr1-13B-checkpoints/
636
+
637
+ cd tr1-13B-checkpoints
638
+
639
+
640
+ huggingface-cli lfs-enable-largefiles .
641
+
642
+ git config --unset user.email
643
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
644
+ ```
645
+ We are going to put each checkpoint into its own branch with the same name.
646
+
647
+ ```
648
+ mv ../hf/global_step* .
649
+ time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout -b {} \; -exec git add {} \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \;
650
+ git checkout main
651
+ ```
652
+
653
+ Fixing up failed pushes / verifying that all pushes went through, re-pushing if needed
654
+
655
+ ```
656
+ git branch | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; git push --set-upstream origin $1]'
657
+ ```
658
+
659
+ If `git push` fails re-run with: `GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 git push` to see what the actual error is.
660
+
661
+
662
+ OK, the branch-per-checkpoint hub repo proved to be very difficult to upload and even more so using it after the upload.
663
+
664
+ So let's try GCS bucket:
665
+
666
+ ```
667
+ gcloud auth login
668
+ gcloud config set project bigscience
669
+ gsutil cp -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/
670
+
671
+ ```
672
+ or via rsync:
673
+ ```
674
+ gsutil -m rsync -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/
675
+ ```
676
+
677
+ ```
678
+ start-prod
679
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
680
+ gsutil -m rsync -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/
681
+
682
+ ```
683
+
684
+ or if needed to speed up the upload via multiple parallel copies open 2 `srun` instances and in one:
685
+ ```
686
+ gsutil cp -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/
687
+ ```
688
+ and in another:
689
+ ```
690
+ gsutil cp -r hf-fixed2/* gs://bigscience-backups/tr1-13B/checkpoints/
691
+ ```
692
+
693
+ can't use `rsync` with multiple sources - can only rsync a single dir.
694
+
695
+ Later fixing `config.json` to include the correct `gelu_fast` activation correction and rsyncing the GCS bucket.
696
+
697
+ (moved all the hf-fixed sub-dirs into a new folder `checkpoints`)
698
+
699
+ ```
700
+ start-prod
701
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
702
+ perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json
703
+ gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints
704
+ ```
705
+ this is really fast since we exclude the checkpoint files (`-x ".*bin$"`)
706
+
707
+
708
+ ## Other backups
709
+
710
+ Logs:
711
+
712
+ ```
713
+ mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/
714
+ tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/tensorboard.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tensorboard
715
+ tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/logs.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs
716
+ ```
717
+
718
+ note: codecarbon wasn't ready during this training, so nothing to back up there.
719
+
720
+
721
+ ## Exports
722
+
723
+ - GCS https://console.cloud.google.com/storage/browser/bigscience
724
+ - The Hub https://huggingface.co/bigscience
725
+
726
+
727
+ ## Training scripts
728
+
729
+ The training script is:
730
+
731
+ - [tr1-13B-round1.slurm](./tr1-13B-round1.slurm)
732
+
733
+ We also have:
734
+
735
+ - [tr1-13B-short.slurm](./tr1-13B-short.slurm)
736
+
737
+ which is a very small model to do quick testing and debug, but otherwise the same as the main script.
738
+
739
+ The scripts are located at:
740
+
741
+ ```
742
+ cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base
743
+ ```
744
+
745
+ When no jobs are scheduled, currently we launch the main training script using:
746
+
747
+ ```
748
+ sbatch --array=1-5%1 tr1-13B-round1.slurm
749
+ ```
750
+ This will schedule 5 20h-trainings which will run one at a time, once the scheduler yields to the request, with unknown wait time in between each job.
751
+
752
+ If there is a job running already, **do not use the above command** as we can't have 2 trainings overlap. If there is a training already running you can:
753
+
754
+ 1. either tell `sbatch` to start the new job once the currently running job succeeds, using:
755
+
756
+ ```
757
+ sbatch --dependency=CURRENTLY_RUNNING_JOB_ID --array=1-5%1 tr1-13B-round1.slurm
758
+ ```
759
+
760
+ Where `CURRENTLY_RUNNING_JOB_ID` is the job being reported running. For example if the report of the last job is:
761
+ ```
762
+ [2021-08-16 22:08:01] tr1-13B-round3 is running for 18:15:59 since 2021-08-16T03:52:02 (711114_4 on 'gpu_p13' partition (r7i4n[1-7],r7i7n[1-8],r8i0n0,r8i5n[3-8],r8i6n[0-8],r9i0n8,r9i1n[0-8],r9i2n[7-8],r9i3n[0-8],r9i4n[0-8],r9i5n[0-2])
763
+ ```
764
+ then the currently running job ID is `711114_4`. You can also gather the same info about the current scheduler status using `squeue`:
765
+
766
+ ```
767
+ squeue --user=$(getent group six | cut -d: -f4) | grep tr1-13B
768
+ ```
769
+
770
+ 2. you could also see how much time is left before the current job finished (based on training log files) and then pass that many hours to `sbatch`. For example, if the job has **less** than 2 hours to run, but more than 1 hour, you want to launch it `now+2hours` from now:
771
+
772
+ ```
773
+ sbatch --begin now+2hours --array=1-5%1 tr1-13B-round1.slurm
774
+ ```
775
+
776
+ Using `--dependency` may lead to shorter wait times, since if the time passed to `--begin` allows even for a few minutes of delay since the stopping of the last job, the scheduler may already start some other jobs even if their priority is lower than our job. That's because the scheduler ignores any jobs with `--begin` until the specified time arrives.
777
+
778
+
779
+ ## On Call
780
+
781
+ When a person is on call, they need to watch that the training is either running or scheduled to run. If neither is happening they need to schedule a new training. When this situation occurs the log file will report:
782
+
783
+ ```
784
+ ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
785
+ ```
786
+
787
+ An email alert is sent as well to `[email protected]`.
788
+
789
+
790
+ The next section explains how to watch the logs.
791
+
792
+
793
+ Other than waiting for the watchdog which runs once an hour, one can immediately see if anything is scheduled with:
794
+
795
+ ```
796
+ $six_ALL_CCFRWORK/code/tr1-13B/bigscience/tools/slurm-status.py --job-name tr1-13B-round3
797
+ ```
798
+
799
+ If for some reason the training is not scheduled or running, to schedule a new training:
800
+
801
+ ```
802
+ cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base
803
+ sbatch --array=1-5%1 tr1-13B-round1.slurm
804
+ ```
805
+
806
+ This will schedule a job array of 5 jobs of 20h each, so if all goes well, that's at least 4 days of not needing to do anything other than being on the lookout for potential crashes.
807
+
808
+ XXX: need a troubleshooting section, but elsewhere in the document that is not this training specific.
809
+
810
+ 1. if one of the nodes gets a corrupted gpu, and the training crashes there is a risk that the next job in the training will get allocated the same node, in which case it'll crash again. We need a method to identify which node is corrupted, report that to [email protected] so they know to fix it and exclude this node from the slurm job by adding a list of nodes to exclude as following:
811
+
812
+ ```
813
+ sbatch --exclude=r7i5n2,r7i5n6 ...
814
+ ```
815
+ but we currently have no way to identify which node is faulty. I think if we switch to pt-1.9.0 or higher where torch elastic replaces the usual launcher. Or we have to use dedicated log files per node via: `#SBATCH --output=%x-%j-%N.out`.
816
+
817
+
818
+ ## Watching the training logs
819
+
820
+ On JZ:
821
+ ```
822
+ tail -f $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt
823
+ ```
824
+
825
+ Outside of JZ:
826
+ ```
827
+ perl -e '$u=shift; $b=0; while(1){($e)=qx[curl -sI $u]=~/content-length: (\d+)/; \
828
+ print qx[curl -sr $b-$e -L $u] if $e>$b; $b=$e; sleep 300}' \
829
+ https://huggingface.co/bigscience/tr1-13B-logs/resolve/main/main_log.txt
830
+ ```
831
+ Currently the updates happen hourly, so this is a delayed version of `tail -f`.
832
+
833
+
834
+ ## CodeCarbon
835
+
836
+
837
+ CodeCarbon wasn't ready until the training was over so we only did an additional 10h run to measure with and the to extrapolate to the whole training.
838
+
839
+ https://huggingface.co/bigscience/tr1-13B-codecarbon
840
+
841
+ This set of records captures the startup time and 2499 iterations in 2 records per gpu, since there was also an intermediary checkpoint saved half-way and we flush the CC records on each checkpoint saving.
842
+
843
+ The training had 168000 iterations. Therefore multiply the reported data by 67. This would be quite approximate since we were using 16 nodes when doing the ramp up, then 64 and only the last 3 weeks 128 nodes.
844
+
845
+ Caveat emptor: I'm not sure whether CC-reports overlap since each report is per gpu and I think they may be measuring the same thing, other than the gpu itself. So this requires research.
846
+
847
+ Each csv file contains a report for a single gpu/process. There are 512 reports.
848
+
849
+
850
+ ## Extras
bigscience/train/tr1-13B-base/chronicles.md ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tr1-13B Chronicles
2
+
3
+ Notes on the training progress with a particular focus on any encountered problems and their diagnosis and solutions/prevention.
4
+
5
+ To follow the training progress charts, see: [tensorboard](https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard).
6
+
7
+ To follow the raw training logs see: [logs](https://huggingface.co/bigscience/tr1-13B-logs/).
8
+
9
+
10
+ ## Round1 SAVE_INTERVAL=10
11
+
12
+ NNODES=16
13
+
14
+ saved checkpoint each 10 steps
15
+
16
+ `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round1/checkpoints`
17
+
18
+ 10 checkpoints (Every 10 steps 1-100) - 4TB
19
+
20
+ ## Round2 SAVE_INTERVAL=18
21
+
22
+ NNODES=16
23
+
24
+ moved the round1's checkpoints away
25
+
26
+ rerun from scratch with the same seed
27
+
28
+ saved checkpoint each 18 steps
29
+
30
+ `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round2/checkpoints`
31
+
32
+ 51 checkpoints (Every 18 steps 101-1000) - 20TB
33
+
34
+
35
+ ## Round3 SAVE_INTERVAL=1500 NNODES=16
36
+
37
+ NNODES=16
38
+
39
+ moved the round2's checkpoints away
40
+
41
+ rerun from scratch with the same seed
42
+
43
+ saved checkpoint each 1500 steps
44
+
45
+ I did the full re-run because otherwise I couldn't separate the tensorboard logs - it is not possible to restart from a checkpoing using `TRAIN_ITER` or `EXIT_INTERVAL` which is not fixed.
46
+
47
+ now we started uploading tensorboard logs
48
+
49
+
50
+ ## Round3 SAVE_INTERVAL=1500 NNODES=32
51
+
52
+ Tried to switch to 64 nodes, but the training failed because GBS gets incremented by 16, which limits us to DP_SIZE=16 (with MBS=1) so we can do 32 nodes (128gpus at most).
53
+
54
+ ```
55
+ DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE)
56
+ 16 = 32*4/(4*2)
57
+ ```
58
+
59
+ will switch to 64 nodes once GBS reaches 1024.
60
+
61
+
62
+ The training then crashed with shared memory error after some 10h+ of training:
63
+ ```
64
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
65
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
66
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
67
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
68
+ Traceback (most recent call last):
69
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data
70
+ Traceback (most recent call last):
71
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data
72
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get
73
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get
74
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait
75
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait
76
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
77
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
78
+ RuntimeError: DataLoader worker (pid 30882) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.
79
+ RuntimeError
80
+ The above exception was the direct cause of the following exception:
81
+ : Traceback (most recent call last):
82
+ DataLoader worker (pid 30801) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in <module>
83
+ The above exception was the direct cause of the following exception:
84
+ Traceback (most recent call last):
85
+ File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in <module>
86
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
87
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain
88
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
89
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain
90
+ iteration = train(forward_step_func,iteration = train(forward_step_func,
91
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train
92
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train
93
+ train_step(forward_step_func,
94
+ train_step(forward_step_func, File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step
95
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step
96
+ loss = model[0].train_batch(data_iter=data_iterator)
97
+ loss = model[0].train_batch(data_iter=data_iterator)
98
+ ```
99
+
100
+ Each node has 94GB of /dev/shm, so it's very strange that this happened.
101
+
102
+ ```
103
+ df -h | grep shm
104
+ tmpfs 94G 336K 94G 1% /dev/shm
105
+ ```
106
+ This is after 2h of training on one node. I wonder if the problem was on some specific node.
107
+
108
+ Though Remi checked that all nodes used by the training that crashed had this exact setup. And all reported %1 usage.
109
+
110
+
111
+
112
+ To continually diagnose the running nodes's shm memory usage:
113
+ ```
114
+ for ((;;)) { (srun --jobid 637799 --gres=gpu:0 $six_ALL_CCFRWORK/bin/report_shm_usage | grep -v "1%"); sleep 10; }
115
+ ```
116
+ after adjusting the jobid number.
117
+
118
+ where:
119
+ ```
120
+ cat $six_ALL_CCFRWORK/bin/report_shm_usage
121
+ #!/usr/bin/bash
122
+
123
+ # print shared memory usage with the host
124
+
125
+ echo $(hostname) $(df -h | grep /dev/shm)
126
+ ```
127
+
128
+ The shared memory is used by `DataLoader` workers. We just use the default `args.num_workers==2` and 94GB of shm available on each node is a huge amount of shared memory.
129
+
130
+ And given that we use TP+PP, a single node doesn't have DDP on it, so no multiproc on the local host. Currently one full model replica uses 2 full nodes (`TP*PP = 2*4 = 8`) So it's really a single Dataloader call per each 2 nodes. i.e. tiny tiny needs.
131
+
132
+ If this happens again, setting `args.num_workers==0` will stop using shared memory, but it'll impact the data loading speed.
133
+
134
+ Jared hasn't seen this problem in his experience.
135
+
136
+ So at the moment we don't know what happened.
137
+
138
+ 2 more 20h trainings have been run since then w/o any problems.
139
+
140
+ ## Checking the progress
141
+
142
+ Someone asked when the current training will complete:
143
+
144
+ Let's do math:
145
+
146
+ 1. we are currently going at 784 samples in 32 seconds, or 24.5 samples / sec
147
+ 2. roughly we have 145M samples to go, so at the current speed 32nodes if we manage to have 20h allocation every 24 hours we get about 82 days. (145_000_000/(20*60*60*24.5))
148
+ 3. we should reach GBS=1024 hopefully today and then we can crank up to 64nodes, which should roughly double the speed, so it'll take 41 days to complete if all goes well and we don't sit in the queue for more than 4 hours.
149
+ 4. we can dare to try 128 nodes, which would quadruple the speed and we should be done in about 20 days. It's hard to tell how quickly the SLURM scheduler will provide such a large allocation - if more than half-day of wait time, we are probably better off with 64 nodes.
150
+
151
+
152
+ ## Round3 SAVE_INTERVAL=1500 NNODES=64
153
+
154
+ Finally GBS is at 1024, so we can do 64 nodes. Clocking about 23-26 secs / iteration - the performance jumps around quite a lot from run to run. But we know that already about JZ - it's very unsteady and depends on network usage by others.
155
+
156
+ Created a dedicated branch `tr1-13B`, which allows further development w/o the risk of breaking the current training.
157
+
158
+ ## A huge lm loss spike
159
+
160
+ The training loss just jumped from ~3 to ~9
161
+ ```
162
+ iteration 29020/ 311541 | consumed samples: 10698064 | elapsed time per iteration (ms): 22306.6 | learning rate: 9.850E-05 | global batch size: 1024 | lm loss: 2.775923E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
163
+ time (ms)
164
+ iteration 29030/ 311541 | consumed samples: 10708304 | elapsed time per iteration (ms): 22336.4 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.772822E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
165
+ time (ms)
166
+ iteration 29040/ 311541 | consumed samples: 10718544 | elapsed time per iteration (ms): 22332.6 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.768131E+00 | loss scale: 65536.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
167
+ time (ms)
168
+ iteration 29050/ 311541 | consumed samples: 10728784 | elapsed time per iteration (ms): 22148.5 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 7.343709E+00 | loss scale: 8192.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
169
+ time (ms)
170
+ iteration 29060/ 311541 | consumed samples: 10739024 | elapsed time per iteration (ms): 22181.7 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 8.715872E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
171
+ time (ms)
172
+ iteration 29070/ 311541 | consumed samples: 10749264 | elapsed time per iteration (ms): 22107.1 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.654131E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
173
+ time (ms)
174
+ iteration 29080/ 311541 | consumed samples: 10759504 | elapsed time per iteration (ms): 22131.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.192470E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
175
+ time (ms)
176
+ iteration 29090/ 311541 | consumed samples: 10769744 | elapsed time per iteration (ms): 22119.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 6.849044E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
177
+ ```
178
+
179
+ You can see the spike at https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard
180
+
181
+ It took some 500 iterations to recover.
182
+
183
+ There was a second spike a bit later, half the first one this time and recovered very quickly.
184
+
185
+ We discussed why it may have happened, but we don't have any definitive answer.
186
+
187
+
188
+ ## Checkpoint bloat issue
189
+
190
+ We have an issue with per-layer checkpoints that are way bigger than they should be. They are 10x bigger than what they should be. After some research we discovered that `torch.save()` doesn't save the current view, but the whole tensor with its original tensor storage. So that's why were were getting 10x bigger files than the actual data in the per-layer checkpoints.
191
+
192
+ We need to `.clone()` the tensors before saving them. and then the checkpoint for layers is just modelsize*2 bytes. The reason they were bloated is because ZeRO-1 pre-allocated large tensor buffers for run-time optimization. So this needs to be fixed in Deepspeed's pipe checkpoing saving.
193
+
194
+ Also will write a script to fix the already-saved checkpoints to `clone` and re-save.
195
+
196
+
197
+ ## old NCCL
198
+
199
+ Discovered the NCCL was statically linked into the distributed pytorch and it's really old 2.7.9. Supposedly newer NCCL should help with OPA interlink performance. But that means we either need to switch to a more recent pytorch or build our own. This is not resolved yet.
200
+
201
+
202
+ ## Watchdog
203
+
204
+ We created a watchdog, that reports if we are running/scheduled and alerts if neither is happening. E.g. the recent log in the main log file was:
205
+
206
+ ```
207
+ iteration 33240/ 311541 | consumed samples: 15019344 | elapsed time per iteration (ms): 23491.4 | learning rate: 9.702E-05 | global batch size: 1024 | lm loss: 2.722675E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
208
+ time (ms)
209
+ saving checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints
210
+ [2021-08-08 01:00:44,221] [INFO] [logging.py:68:log_dist] [Rank 0] Saving model checkpoint: /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints/global_step33241/mp_rank_00_model_states.pt
211
+ successfully saved checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints
212
+ time (ms) | save-checkpoint: 57514.53
213
+ [exiting program after 1190.0357275923093 minutes] datetime: 2021-08-08 01:00:51
214
+ [2021-08-08 01:49:40] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
215
+ [2021-08-08 02:49:44] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
216
+ [2021-08-08 03:56:54] tr1-13B-round3 is scheduled to start in 3 days, 7:24:19 (at 2021-08-11T11:21:14) (682842_[1-5%1] on 'gpu_p13' partition)
217
+ ```
218
+
219
+ ## NNODES=96
220
+
221
+ We thoughts that trying more nodes would be a good idea, but 96 nodes proved to be unacceptable, since
222
+
223
+ GBS=1024 is not divisible by 384 (96*4), so there is no way to spread data evenly across all replicas.
224
+
225
+ We can only have either 256, 512 or 1024 gpus (64, 128, 256 nodes)
226
+
227
+ ## Corrupt GPU crashes the training multiple times
228
+
229
+ One of the array job trainings crashes after many hours of training:
230
+
231
+ ```
232
+ iteration 43680/ 311541 | consumed samples: 25709904 | elapsed time per iteration (ms): 25593.4 | learning rate: 9.135E-05 | global batch size: 1024 | lm loss: 2.635663E+00 | loss scale: 131072.0 | grad norm: 17224.723 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
233
+ time (ms)
234
+ Traceback (most recent call last):
235
+ File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in <module>
236
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
237
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 144, in pretrain
238
+ iteration = train(forward_step_func,
239
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 677, in train
240
+ train_step(forward_step_func,
241
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 381, in train_step
242
+ loss = model[0].train_batch(data_iter=data_iterator)
243
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 291, in train_batch
244
+ self._exec_schedule(sched)
245
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 1237, in _exec_schedule
246
+ self._exec_instr(**cmd.kwargs)
247
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 679, in _exec_backward_pass
248
+ torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
249
+ File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/autograd/__init__.py", line 145, in backward
250
+ Variable._execution_engine.run_backward(
251
+ RuntimeError: transform: failed to synchronize: cudaErrorECCUncorrectable: uncorrectable ECC error encountered
252
+ terminate called after throwing an instance of 'c10::Error'
253
+ what(): CUDA error: uncorrectable ECC error encountered
254
+ Exception raised from create_event_internal at /opt/conda/conda-bld/pytorch_1616554793803/work/c10/cuda/CUDACachingAllocator.cpp:733 (most recent call first):
255
+ frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x1500fb4d42f2 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
256
+ frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::string const&) + 0x5b (0x1500fb4d167b in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
257
+ frame #2: c10::cuda::CUDACachingAllocator::raw_delete(void*) + 0x809 (0x1500fb72d219 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
258
+ frame #3: c10::TensorImpl::release_resources() + 0x54 (0x1500fb4bc3a4 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
259
+ frame #4: <unknown function> + 0x6e0e5a (0x150152432e5a in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so)
260
+ frame #5: <unknown function> + 0x6e0ef1 (0x150152432ef1 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so)
261
+ frame #6: <unknown function> + 0x1a6b5a (0x56434fce9b5a in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
262
+ frame #7: <unknown function> + 0x110b7c (0x56434fc53b7c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
263
+ frame #8: <unknown function> + 0x1105b9 (0x56434fc535b9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
264
+ frame #9: <unknown function> + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
265
+ frame #10: <unknown function> + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
266
+ frame #11: <unknown function> + 0x177917 (0x56434fcba917 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
267
+ frame #12: PyDict_SetItemString + 0x4c (0x56434fcbd86c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
268
+ frame #13: PyImport_Cleanup + 0xac (0x56434fd2f0ec in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
269
+ frame #14: Py_FinalizeEx + 0x79 (0x56434fd95589 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
270
+ frame #15: Py_RunMain + 0x1bc (0x56434fd988fc in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
271
+ frame #16: Py_BytesMain + 0x39 (0x56434fd98ce9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
272
+ frame #17: __libc_start_main + 0xf3 (0x150183467873 in /lib64/libc.so.6)
273
+ frame #18: <unknown function> + 0x1f7847 (0x56434fd3a847 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
274
+ ```
275
+
276
+ Nobody was around to notice and slurm scheduler started the next training job in the array, and it crashed too this time right away on:
277
+
278
+ ```
279
+ > initializing tensor model parallel with size 2
280
+ > initializing pipeline model parallel with size 4
281
+ > setting random seeds to 42 ...
282
+ [2021-08-12 08:19:28,225] [INFO] [checkpointing.py:226:model_parallel_cuda_manual_seed] > initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 2760 and data parallel seed: 42
283
+ > compiling dataset index builder ...
284
+ make: Entering directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data'
285
+ make: Nothing to be done for 'default'.
286
+ make: Leaving directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data'
287
+ >>> done with dataset index builder. Compilation time: 0.338 seconds
288
+ > compiling and loading fused kernels ...
289
+ Traceback (most recent call last):
290
+ File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in <module>
291
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
292
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 95, in pretrain
293
+ initialize_megatron(extra_args_provider=extra_args_provider,
294
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 89, in initialize_megatron
295
+ _compile_dependencies()
296
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 140, in _compile_dependencies
297
+ torch.distributed.barrier()
298
+ File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/distributed/distributed_c10d.py", line 2420, in barrier
299
+ work = default_pg.barrier(opts=opts)
300
+ RuntimeError: CUDA error: out of memory
301
+ ```
302
+
303
+ We figured one of the gpus had a hardware problem. So it crashed the first time. And then the scheduler allocated the same node and of course, we crashed again.
304
+
305
+ We contacted JZ admins and indeed one of the nodes was faulty. The next training didn't hit this node and the training continued.
306
+
307
+ Unfortunately we currently don't have a way to correlate the exceptions to the hostname of the node that it happened on. It's really to have this feature available, since if we don't, we can keep on hitting the faulty node and it'll continue crashing the training. If we know the node's hostname we can exclude it from the `sbatch --exclude=node1,node2,... `.
308
+
309
+ update: At the moment we have to add `%N` to `#SBATCH --output=%x-%j-%N.out` and then each node will have is own log file and then we can tell which node has a corrupt GPU.
310
+
311
+ ## Really long wait time to get allocation
312
+
313
+ When a job gets queued we often see 3 days expected wait time before yielding, but most of the time the job comes through in several hours. Sometimes we have to wait for a really long time, like 30h, with scheduler bumping our job down multiple times. This is a big problem as it pushes the finish line away continuously. We aren't anywhere close to being able to train 24/7 despite having many hours allocated to us for this project.
314
+
315
+ Another problem is that within a project we don't have a way to give the main training job a higher priority than other jobs that we run in parallel on various experiments and small trainings. There really should be a way for a user to say, this is a high priority job amongst all other jobs of the same group. But we didn't find a way to do that.
316
+
317
+ ## Test suite added
318
+
319
+ A `Megatron-Deepspeed` test suite was finally added. It was odd Megatron-LM didn't have one in the first place, so we had to create our own.
320
+
321
+ Now need to find some hardware with 2 gpus to create a CI.
322
+
323
+ ## Reduced evaluation iterations
324
+
325
+ Noticed that somehow it was configured to run eval for 100 iterations, after discussion reduced it to 5, thus saving some resources. While validation iterations are much faster than training, this wasn't really needed.
326
+
327
+ ## NNODES=128
328
+
329
+ Taking advantage of August's holiday in France was able to switch to 128 nodes.
330
+
331
+ Observed a further drop in TFLOPs, since now we had even less microbatches to go around. This is because Global BS remained the same (GBS=1024) and we currently use 2 nodes for a single replica (TP=2 * TP=4). So with 128 nodes, we have 64 replicas, which leaves only GAS=16 per replica, and that's too little for an efficient pipeline. The idle bubble is too big.
332
+
333
+ The benchmarking/tune up was done with GAS=128 (GBS=1024/8) and that's where we were getting high TFLops.
334
+
335
+ Nevertheless, the training is going much faster now and we will catch up lost time quickly.
336
+
337
+ ## NCCL experiments
338
+
339
+ It was suggested that newer NCCL will lead to faster inter-node communication.
340
+
341
+
342
+ hypothesis that newer nccl should be faster on JZ, but the short experiments I run didn't support it. I get the same throughput with:
343
+
344
+ 1. pt=1.8.1, cuda=11.1, nccl=2708
345
+ 2. pt=1.9.0, cuda=11.1, nccl=2708
346
+ 3. pt=1.10.0.dev20210821, cuda=11.3, nccl=(2, 10, 3)
347
+
348
+ The experiment was run on the same 4-node allocation with GBS=64, but otherwise everything else was the same as the current training script. The speed was 17-17.5 secs per iteration. Did about 100 iterations.
349
+ So we will stick to pt=1.8.1 for now until a need arises to change that.
350
+
351
+ ## SLURM Job Arrays and Dependency
352
+
353
+ Switched to using SLURM Job Arrays and Dependency to schedule jobs. Since our account has a huge allocation we were able to start new 20h jobs with no delay.
354
+
355
+ If this approach is not used even a tiny delay between finishing one job and scheduling the next one often lead to 1-30 hours of wait time in the queue. This is because the scheduler was quick to allocate other jobs in the first few seconds of finishing the currently running job.
356
+
357
+ The problem remained if something goes wrong - e.g. a mistake in a script or some hardware issue, would lead to a delay in staring new jobs and a long long wait time.
358
+
359
+ This training was getting its software updated a lot as missing features were added, so it wasn't a super-stable polished production environment.
360
+
361
+ So as long as we had a stable setup using SLURM Job Arrays and Dependency chaining things went well. When we couldn't use those SLURM was delaying our training sometimes by a lot.
362
+
363
+ Also since we run secondary trainings we learned to use `--nice=10000` for those trainings. Without this method all slurm jobs of the same account had the same priority.
364
+
365
+ ## Added an alert email notification
366
+
367
+ Previously implemented watchdog now got hooked up to email notifications, so if it detected that no job was running or scheduled it'd let the group know.
368
+
369
+ ## Checkpoint bloat fixed
370
+
371
+ The Deepspeed team fixed the bloat in the checkpoints, so new checkpoints were taking 10x less space for layer weights.
372
+
373
+ I then processed all the old checkpoints to remove the bloat using:
374
+
375
+ ```
376
+ srun -p prepost -A six@cpu --time=20:00:00 --pty bash
377
+ wget https://raw.githubusercontent.com/stas00/toolbox/master/pytorch/pt-checkpoint-shrink.py
378
+ chmod a+x pt-checkpoint-shrink.py
379
+ cd checkpoints
380
+ find -type d -name "global_step*" -exec pt-checkpoint-shrink.py --checkpoint_dir {} --patterns "layer*pt" \;
381
+ ```
382
+
383
+ ## CI was added
384
+
385
+ A CI was implemented using EC2 instance on demand. With the help of https://github.com/machulav/ec2-github-runner
386
+
387
+ Eventually it proved to be not usable for PRs made from the forks, as EC2 needs secrets that github actions won't give to PRs not originating from the origin. So this CI is not very useful.
388
+
389
+
390
+ ## Training completed
391
+
392
+ On Sep 6th we reached the 300B tokens and on Sep 7th we stopped the training - It took some ~5 weeks to complete.
393
+
394
+
395
+ ## Checkpoint conversion
396
+
397
+ We still need to figure out how to make the checkpoint available in the HF `transformers` format. This is a work in progress.
398
+
399
+ Update: This has been done. All checkpoints converted to HF format and uploaded to HUB.
400
+
401
+ See [README.md](README.md) for nuances of the conversion.
402
+
403
+ Made a mistake in the activation function setting when writing the HF model after the conversion. It proved to be a complex situation but it needs to be `gelu_fast` on the HF side since we are using `args.openai_gelu = False; args.bias_gelu_res = True`. So applied fixes to the models on the HUB using the following:
404
+
405
+ ```
406
+ cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/
407
+ export GIT_LFS_SKIP_SMUDGE=1
408
+ git clone https://huggingface.co/bigscience/tr3e-1B3-c4-checkpoints
409
+ cd tr3e-1B3-c4-checkpoints
410
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
411
+ set +H
412
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
413
+ export GIT_LFS_SKIP_SMUDGE=0
414
+ ```
415
+ Note using the trick of not checkout out LFS files since we only need to modify `config.json` which is a normal file - this is thousands times faster than normal checkout.
416
+
417
+
418
+
419
+ and for GCS:
420
+ ```
421
+ start-prod
422
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
423
+ perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json
424
+ gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints
425
+ ```
bigscience/train/tr1-13B-base/start-tr1-13B ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a python production script for JZ / tr1-13B training
2
+ #
3
+ # Activate with:
4
+ #
5
+ # source ./start-tr1-13B
6
+ #
7
+ #
8
+
9
+ # # if this session isn't run via a login shell, which is the case when running a
10
+ # # command which is not shell via ssh, the bash function `module` will be missing.
11
+ # # so work around it by emulating part of the login shell that loads modules environment
12
+ # if [ -z $(type -t module) ]
13
+ # then
14
+ # . /etc/profile.d/z_modules.sh
15
+ # fi
16
+ module purge
17
+ module load pytorch-gpu/py3/1.8.1
18
+ module load nvtop git-lfs github-cli mc
19
+
20
+ # git prompt
21
+ export GIT_PROMPT_ONLY_IN_REPO=0;
22
+ export GIT_PROMPT_THEME="JZPRod"
23
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
24
+
25
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
26
+ #
27
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets`
28
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
29
+
30
+ # specific caches
31
+
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+
37
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
38
+
39
+ ### CONDA ###
40
+
41
+ # >>> conda initialize >>>
42
+ # !! Contents within this block are managed by 'conda init' !!
43
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
44
+ if [ $? -eq 0 ]; then
45
+ eval "$__conda_setup"
46
+ else
47
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
48
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
49
+ else
50
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
51
+ fi
52
+ fi
53
+ unset __conda_setup
54
+ # <<< conda initialize <<<
55
+
56
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
57
+ conda activate tr1-13B
bigscience/train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-hub-sync-logs # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=1:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=compil
10
+ #SBATCH --account=six@cpu
11
+
12
+ echo "START TIME: $(date)"
13
+
14
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
15
+ module load git-lfs
16
+
17
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
18
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
19
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.txt' -d
22
+
23
+ echo "END TIME: $(date)"
bigscience/train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-hub-sync-tensorboard # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=compil
10
+ #SBATCH --account=six@cpu
11
+
12
+ echo "START TIME: $(date)"
13
+
14
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
15
+ module load git-lfs
16
+
17
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
18
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
19
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d
22
+
23
+ echo "END TIME: $(date)"
bigscience/train/tr1-13B-base/tr1-13B-round1.slurm ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-round3
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=128
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
16
+
17
+ echo "START TIME: $(date)"
18
+
19
+ #ROUND=3
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
23
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
24
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
25
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
26
+
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/
28
+
29
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
30
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
31
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
32
+
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
36
+ MASTER_PORT=6000
37
+
38
+ GPUS_PER_NODE=4
39
+ NNODES=128 # switch to 128
40
+ TP_SIZE=2 # always fixed to the size of a single node
41
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
42
+ #DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer
43
+
44
+ # GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size
45
+ # GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed
46
+ MICRO_BATCH_SIZE=1
47
+ GLOBAL_BATCH_SIZE=1024
48
+
49
+ NLAYERS=40
50
+ NHIDDEN=5120
51
+ NHEADS=32
52
+ FFN_HIDDEN_SIZE=20480
53
+ SEQ_LEN=2048
54
+ VOCAB_SIZE=50257
55
+
56
+ SAVE_INTERVAL=1500
57
+
58
+ OPTIMIZER_ARGS=" \
59
+ --optimizer adam \
60
+ --adam-beta1 0.9 \
61
+ --adam-beta2 0.999 \
62
+ --adam-eps 1e-8 \
63
+ --lr 1e-4 \
64
+ --min-lr 1e-5 \
65
+ --lr-decay-style cosine \
66
+ --lr-decay-samples 126_953_125 \
67
+ --lr-warmup-samples 216_320 \
68
+ --clip-grad 1.0 \
69
+ --weight-decay 1e-1 \
70
+ "
71
+
72
+ EXIT_OPTS=" \
73
+ --exit-duration-in-mins 1190 \
74
+ "
75
+
76
+ GPT_ARGS=" \
77
+ --num-layers $NLAYERS \
78
+ --hidden-size $NHIDDEN \
79
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
80
+ --num-attention-heads $NHEADS \
81
+ --seq-length $SEQ_LEN \
82
+ --max-position-embeddings $SEQ_LEN \
83
+ --micro-batch-size $MICRO_BATCH_SIZE \
84
+ --rampup-batch-size 16 16 5_000_000 \
85
+ --global-batch-size $GLOBAL_BATCH_SIZE \
86
+ --train-samples 300_000_000 \
87
+ --vocab-file $VOCAB_FILE \
88
+ --merge-file $MERGE_FILE \
89
+ --loss-scale 12 \
90
+ --clip-grad 1.0 \
91
+ --fp16 \
92
+ --checkpoint-activations \
93
+ --seed 42
94
+ $OPTIMIZER_ARGS \
95
+ $EXIT_OPTS \
96
+ "
97
+
98
+ OUTPUT_ARGS=" \
99
+ --log-interval 10 \
100
+ --save-interval $SAVE_INTERVAL \
101
+ --eval-interval 1000 \
102
+ --eval-iters 5 \
103
+ --codecarbon-dir $CODECARBON_PATH \
104
+ --tensorboard-dir $TENSORBOARD_PATH \
105
+ --tensorboard-queue-size 5 \
106
+ --log-timers-to-tensorboard \
107
+ --log-batch-size-to-tensorboard \
108
+ --log-validation-ppl-to-tensorboard \
109
+ "
110
+
111
+ ZERO_STAGE=1
112
+
113
+ config_json="./ds_config.$SLURM_JOBID.json"
114
+
115
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
116
+ cat <<EOT > $config_json
117
+ {
118
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
119
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
120
+ "gradient_clipping": 1.0,
121
+ "zero_optimization": {
122
+ "stage": $ZERO_STAGE
123
+ },
124
+ "fp16": {
125
+ "enabled": true,
126
+ "loss_scale": 0,
127
+ "loss_scale_window": 500,
128
+ "hysteresis": 2,
129
+ "min_loss_scale": 1,
130
+ "initial_scale_power": 12
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOT
136
+
137
+
138
+ DEEPSPEED_ARGS=" \
139
+ --deepspeed \
140
+ --deepspeed_config ${config_json} \
141
+ --zero-stage ${ZERO_STAGE} \
142
+ --deepspeed-activation-checkpointing \
143
+ "
144
+
145
+ export LAUNCHER="python -u -m torch.distributed.launch \
146
+ --nproc_per_node $GPUS_PER_NODE \
147
+ --nnodes $NNODES \
148
+ --master_addr $MASTER_ADDR \
149
+ --master_port $MASTER_PORT \
150
+ "
151
+
152
+ export CMD=" \
153
+ `pwd`/pretrain_gpt.py \
154
+ --tensor-model-parallel-size $TP_SIZE \
155
+ --pipeline-model-parallel-size $PP_SIZE \
156
+ $GPT_ARGS \
157
+ $OUTPUT_ARGS \
158
+ --save $CHECKPOINT_PATH \
159
+ --load $CHECKPOINT_PATH \
160
+ --data-path $DATA_PATH \
161
+ --data-impl mmap \
162
+ --split 949,50,1 \
163
+ --distributed-backend nccl \
164
+ $DEEPSPEED_ARGS \
165
+ "
166
+
167
+ echo $CMD
168
+
169
+ # to debug - add echo (it exits and prints what it would have launched)
170
+ clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
171
+
172
+ echo "END TIME: $(date)"
173
+
174
+ #