winglian commited on
Commit
01a75fd
·
unverified ·
2 Parent(s): a924a33 b81c97f

Merge pull request #98 from NanoCode012/feat/pre-commit

Browse files
.bandit ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [bandit]
2
+ exclude = tests
3
+ skips = B101
.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 88
3
+
4
+ select = C,E,F,W,B,B950
5
+ extend-ignore = E203, E501, W503
.github/workflows/pre-commit.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pre-commit
2
+
3
+ on:
4
+ pull_request:
5
+ push:
6
+
7
+ jobs:
8
+ pre-commit:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v3
12
+ - uses: actions/setup-python@v4
13
+ with:
14
+ python-version: "3.9"
15
+ cache: 'pip' # caching pip dependencies
16
+ - uses: pre-commit/[email protected]
.gitignore CHANGED
@@ -160,4 +160,4 @@ cython_debug/
160
  # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
  # and can be added to the global gitignore or merged into this file. For a more nuclear
162
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
- .idea/
 
160
  # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
  # and can be added to the global gitignore or merged into this file. For a more nuclear
162
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ .idea/
.isort.cfg ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [settings]
2
+ profile=black
.mypy.ini ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [mypy]
2
+
3
+ exclude = venv
4
+
5
+ [mypy-alpaca_lora_4bit.*]
6
+ ignore_missing_imports = True
7
+
8
+ [mypy-flash_attn.*]
9
+ ignore_missing_imports = True
10
+
11
+ [mypy-huggingface_hub]
12
+ ignore_missing_imports = True
13
+
14
+ [mypy-transformers.*]
15
+ ignore_missing_imports = True
16
+
17
+ [mypy-peft]
18
+ ignore_missing_imports = True
19
+
20
+ [mypy-bitsandbytes]
21
+ ignore_missing_imports = True
22
+
23
+ [mypy-datasets]
24
+ ignore_missing_imports = True
25
+
26
+ [mypy-fire]
27
+ ignore_missing_imports = True
28
+
29
+ [mypy-setuptools]
30
+ ignore_missing_imports = True
31
+
32
+ [mypy-addict]
33
+ ignore_missing_imports = True
.pre-commit-config.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_language_version:
2
+ python: python3.9
3
+
4
+ repos:
5
+ - repo: https://github.com/pre-commit/pre-commit-hooks
6
+ rev: v4.4.0
7
+ hooks:
8
+ - id: check-yaml
9
+ - id: end-of-file-fixer
10
+ - id: trailing-whitespace
11
+ - repo: https://github.com/psf/black
12
+ rev: 23.3.0
13
+ hooks:
14
+ - id: black
15
+ - repo: https://github.com/pycqa/isort
16
+ rev: 5.12.0
17
+ hooks:
18
+ - id: isort
19
+ - repo: https://github.com/PyCQA/flake8
20
+ rev: 6.0.0
21
+ hooks:
22
+ - id: flake8
23
+ - repo: https://github.com/PyCQA/pylint
24
+ rev: v2.17.4
25
+ hooks:
26
+ - id: pylint
27
+ - repo: https://github.com/pre-commit/mirrors-mypy
28
+ rev: v1.3.0
29
+ hooks:
30
+ - id: mypy
31
+ additional_dependencies:
32
+ [
33
+ 'types-PyYAML',
34
+ ]
35
+ - repo: https://github.com/PyCQA/bandit
36
+ rev: 1.7.5
37
+ hooks:
38
+ - id: bandit
39
+ args: [
40
+ '--ini',
41
+ '.bandit',
42
+ ]
.pylintrc ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MASTER]
2
+ init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))"
3
+
4
+ [TYPECHECK]
5
+
6
+ # List of members which are set dynamically and missed by Pylint inference
7
+ # system, and so shouldn't trigger E1101 when accessed.
8
+ generated-members=numpy.*, torch.*
9
+
10
+
11
+ [pylint.messages_control]
12
+ disable=missing-function-docstring, line-too-long, import-error,
13
+ too-many-arguments, too-many-locals, too-many-statements, too-many-branches, too-few-public-methods,
14
+ too-many-instance-attributes, fixme, import-outside-toplevel, logging-fstring-interpolation,
README.md CHANGED
@@ -9,6 +9,8 @@
9
  <p>
10
  Go ahead and axolotl questions!!
11
  </p>
 
 
12
  </div>
13
  </div>
14
 
@@ -406,3 +408,12 @@ Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
406
  Bugs? Please check for open issue else create a new [Issue](https://github.com/OpenAccess-AI-Collective/axolotl/issues/new).
407
 
408
  PRs are **greatly welcome**!
 
 
 
 
 
 
 
 
 
 
9
  <p>
10
  Go ahead and axolotl questions!!
11
  </p>
12
+ <img src="https://github.com/OpenAccess-AI-Collective/axolotl/actions/workflows/pre-commit.yml/badge.svg?branch=main" alt="pre-commit">
13
+ <img alt="PyTest Status" src="https://github.com/OpenAccess-AI-Collective/axolotl/actions/workflows/tests.yml/badge.svg?branch=main">
14
  </div>
15
  </div>
16
 
 
408
  Bugs? Please check for open issue else create a new [Issue](https://github.com/OpenAccess-AI-Collective/axolotl/issues/new).
409
 
410
  PRs are **greatly welcome**!
411
+
412
+ Please run below to setup env
413
+ ```bash
414
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
415
+ pre-commit install
416
+
417
+ # test
418
+ pytest tests/
419
+ ```
docker/Dockerfile-base CHANGED
@@ -99,4 +99,3 @@ RUN pip3 install "peft @ git+https://github.com/huggingface/peft.git@main" \
99
  pip3 install awscli && \
100
  # The base image ships with `pydantic==1.8.2` which is not working
101
  pip3 install -U --no-cache-dir pydantic
102
-
 
99
  pip3 install awscli && \
100
  # The base image ships with `pydantic==1.8.2` which is not working
101
  pip3 install -U --no-cache-dir pydantic
 
examples/falcon/config-7b-lora.yml CHANGED
@@ -61,4 +61,3 @@ special_tokens:
61
  pad_token: "<|endoftext|>"
62
  bos_token: ">>ABSTRACT<<"
63
  eos_token: "<|endoftext|>"
64
-
 
61
  pad_token: "<|endoftext|>"
62
  bos_token: ">>ABSTRACT<<"
63
  eos_token: "<|endoftext|>"
 
examples/falcon/config-7b.yml CHANGED
@@ -61,4 +61,3 @@ special_tokens:
61
  pad_token: "<|endoftext|>"
62
  bos_token: ">>ABSTRACT<<"
63
  eos_token: "<|endoftext|>"
64
-
 
61
  pad_token: "<|endoftext|>"
62
  bos_token: ">>ABSTRACT<<"
63
  eos_token: "<|endoftext|>"
 
requirements-dev.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pre-commit
2
+ black
3
+ mypy
requirements.txt CHANGED
@@ -4,7 +4,6 @@ bitsandbytes>=0.39.0
4
  addict
5
  fire
6
  PyYAML==6.0
7
- black
8
  datasets
9
  accelerate>=0.19.0
10
  sentencepiece
 
4
  addict
5
  fire
6
  PyYAML==6.0
 
7
  datasets
8
  accelerate>=0.19.0
9
  sentencepiece
scripts/alpaca_json_to_jsonl.py CHANGED
@@ -1,24 +1,38 @@
 
 
1
  import os
2
  import sys
3
  from pathlib import Path
 
4
 
5
  import fire
6
- from typing import Optional
 
 
 
 
 
 
 
 
7
 
8
  # add src to the pythonpath so we don't need to pip install this
9
  project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
10
  src_dir = os.path.join(project_root, "src")
11
  sys.path.insert(0, src_dir)
12
 
13
- from axolotl.convert import *
14
-
15
 
16
  def main(
17
- input: Path,
18
  output: Optional[Path] = None,
19
  to_stdout: Optional[bool] = False,
20
  ):
 
 
 
 
21
  file_reader = FileReader()
 
22
  if to_stdout or output is None:
23
  writer = StdoutWriter()
24
  else:
@@ -28,7 +42,7 @@ def main(
28
 
29
  converter = JsonToJsonlConverter(file_reader, writer, json_parser, jsonl_serializer)
30
 
31
- converter.convert(input, output)
32
 
33
 
34
  if __name__ == "__main__":
 
1
+ """Module to convert json file to jsonl"""
2
+
3
  import os
4
  import sys
5
  from pathlib import Path
6
+ from typing import Optional, Union
7
 
8
  import fire
9
+
10
+ from axolotl.convert import (
11
+ FileReader,
12
+ FileWriter,
13
+ JsonlSerializer,
14
+ JsonParser,
15
+ JsonToJsonlConverter,
16
+ StdoutWriter,
17
+ )
18
 
19
  # add src to the pythonpath so we don't need to pip install this
20
  project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
21
  src_dir = os.path.join(project_root, "src")
22
  sys.path.insert(0, src_dir)
23
 
 
 
24
 
25
  def main(
26
+ file: Path,
27
  output: Optional[Path] = None,
28
  to_stdout: Optional[bool] = False,
29
  ):
30
+ """
31
+ Convert a json file to jsonl
32
+ """
33
+
34
  file_reader = FileReader()
35
+ writer: Union[StdoutWriter, FileWriter]
36
  if to_stdout or output is None:
37
  writer = StdoutWriter()
38
  else:
 
42
 
43
  converter = JsonToJsonlConverter(file_reader, writer, json_parser, jsonl_serializer)
44
 
45
+ converter.convert(file, output)
46
 
47
 
48
  if __name__ == "__main__":
scripts/finetune.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import importlib
2
  import logging
3
  import os
@@ -5,25 +7,26 @@ import random
5
  import signal
6
  import sys
7
  from pathlib import Path
8
- from typing import Optional, List, Dict, Any, Union
9
 
10
  import fire
11
  import torch
12
  import yaml
13
 
 
 
 
 
14
  # add src to the pythonpath so we don't need to pip install this
15
  from axolotl.utils.tokenization import check_dataset_labels
 
16
  from axolotl.utils.validation import validate_config
17
- from axolotl.utils.dict import DictDefault
18
 
19
  project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
20
  src_dir = os.path.join(project_root, "src")
21
  sys.path.insert(0, src_dir)
22
 
23
- from axolotl.utils.data import load_prepare_datasets
24
- from axolotl.utils.models import load_model, load_tokenizer
25
- from axolotl.utils.trainer import setup_trainer
26
- from axolotl.utils.wandb import setup_wandb_env_vars
27
 
28
  logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
29
  DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
@@ -31,14 +34,16 @@ DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
31
 
32
  def choose_device(cfg):
33
  def get_device():
34
- if torch.cuda.is_available():
35
- return f"cuda:{cfg.local_rank}"
36
- else:
37
- try:
38
- if torch.backends.mps.is_available():
39
- return "mps"
40
- except:
41
- return "cpu"
 
 
42
 
43
  cfg.device = get_device()
44
  if cfg.device == "cuda":
@@ -51,7 +56,7 @@ def get_multi_line_input() -> Optional[str]:
51
  print("Give me an instruction (Ctrl + D to finish): ")
52
  instruction = ""
53
  for line in sys.stdin:
54
- instruction += line
55
  # instruction = pathlib.Path("/proc/self/fd/0").read_text()
56
  return instruction
57
 
@@ -92,7 +97,7 @@ def do_inference(cfg, model, tokenizer, prompter="AlpacaPrompter"):
92
 
93
 
94
  def choose_config(path: Path):
95
- yaml_files = [file for file in path.glob("*.yml")]
96
 
97
  if not yaml_files:
98
  raise ValueError(
@@ -130,12 +135,12 @@ def train(
130
  config = choose_config(config)
131
 
132
  # load the config from the yaml file
133
- with open(config, "r") as f:
134
- cfg: DictDefault = DictDefault(yaml.load(f, Loader=yaml.Loader))
135
  # if there are any options passed in the cli, if it is something that seems valid from the yaml,
136
  # then overwrite the value
137
  cfg_keys = cfg.keys()
138
- for k in kwargs:
139
  # if not strict, allow writing to cfg even if it's not in the yml already
140
  if k in cfg_keys or cfg.strict is False:
141
  # handle booleans
@@ -167,13 +172,11 @@ def train(
167
 
168
  # load the tokenizer first
169
  logging.info("loading tokenizer...")
170
- tokenizer = load_tokenizer(
171
- cfg.base_model_config,
172
- cfg.tokenizer_type,
173
- cfg
174
- )
175
 
176
- if check_not_in(["inference", "shard", "merge_lora"], kwargs): # don't need to load dataset for these
 
 
177
  train_dataset, eval_dataset = load_prepare_datasets(
178
  tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
179
  )
@@ -182,7 +185,7 @@ def train(
182
  logging.info("check_dataset_labels...")
183
  check_dataset_labels(
184
  train_dataset.select(
185
- [random.randrange(0, len(train_dataset) - 1) for i in range(5)]
186
  ),
187
  tokenizer,
188
  )
@@ -239,7 +242,10 @@ def train(
239
  if cfg.local_rank == 0:
240
  signal.signal(
241
  signal.SIGINT,
242
- lambda signal, frame: (model.save_pretrained(cfg.output_dir), exit(0)),
 
 
 
243
  )
244
 
245
  logging.info("Starting trainer...")
@@ -252,7 +258,8 @@ def train(
252
  ]
253
  if len(possible_checkpoints) > 0:
254
  sorted_paths = sorted(
255
- possible_checkpoints, key=lambda path: int(path.split("-")[-1])
 
256
  )
257
  resume_from_checkpoint = sorted_paths[-1]
258
  logging.info(
@@ -266,6 +273,7 @@ def train(
266
  # only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
267
  if cfg.local_rank == 0:
268
  model.save_pretrained(cfg.output_dir)
 
269
  # trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time
270
 
271
 
 
1
+ """Prepare and train a model on a dataset. Can also infer from a model or merge lora"""
2
+
3
  import importlib
4
  import logging
5
  import os
 
7
  import signal
8
  import sys
9
  from pathlib import Path
10
+ from typing import Any, Dict, List, Optional, Union
11
 
12
  import fire
13
  import torch
14
  import yaml
15
 
16
+ from axolotl.utils.data import load_prepare_datasets
17
+ from axolotl.utils.dict import DictDefault
18
+ from axolotl.utils.models import load_model, load_tokenizer
19
+
20
  # add src to the pythonpath so we don't need to pip install this
21
  from axolotl.utils.tokenization import check_dataset_labels
22
+ from axolotl.utils.trainer import setup_trainer
23
  from axolotl.utils.validation import validate_config
24
+ from axolotl.utils.wandb import setup_wandb_env_vars
25
 
26
  project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
27
  src_dir = os.path.join(project_root, "src")
28
  sys.path.insert(0, src_dir)
29
 
 
 
 
 
30
 
31
  logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
32
  DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
 
34
 
35
  def choose_device(cfg):
36
  def get_device():
37
+ try:
38
+ if torch.cuda.is_available():
39
+ return f"cuda:{cfg.local_rank}"
40
+
41
+ if torch.backends.mps.is_available():
42
+ return "mps"
43
+
44
+ raise SystemError("No CUDA/mps device found")
45
+ except Exception: # pylint: disable=broad-exception-caught
46
+ return "cpu"
47
 
48
  cfg.device = get_device()
49
  if cfg.device == "cuda":
 
56
  print("Give me an instruction (Ctrl + D to finish): ")
57
  instruction = ""
58
  for line in sys.stdin:
59
+ instruction += line # pylint: disable=consider-using-join
60
  # instruction = pathlib.Path("/proc/self/fd/0").read_text()
61
  return instruction
62
 
 
97
 
98
 
99
  def choose_config(path: Path):
100
+ yaml_files = list(path.glob("*.yml"))
101
 
102
  if not yaml_files:
103
  raise ValueError(
 
135
  config = choose_config(config)
136
 
137
  # load the config from the yaml file
138
+ with open(config, encoding="utf-8") as file:
139
+ cfg: DictDefault = DictDefault(yaml.safe_load(file))
140
  # if there are any options passed in the cli, if it is something that seems valid from the yaml,
141
  # then overwrite the value
142
  cfg_keys = cfg.keys()
143
+ for k, _ in kwargs.items():
144
  # if not strict, allow writing to cfg even if it's not in the yml already
145
  if k in cfg_keys or cfg.strict is False:
146
  # handle booleans
 
172
 
173
  # load the tokenizer first
174
  logging.info("loading tokenizer...")
175
+ tokenizer = load_tokenizer(cfg.base_model_config, cfg.tokenizer_type, cfg)
 
 
 
 
176
 
177
+ if check_not_in(
178
+ ["inference", "shard", "merge_lora"], kwargs
179
+ ): # don't need to load dataset for these
180
  train_dataset, eval_dataset = load_prepare_datasets(
181
  tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
182
  )
 
185
  logging.info("check_dataset_labels...")
186
  check_dataset_labels(
187
  train_dataset.select(
188
+ [random.randrange(0, len(train_dataset) - 1) for _ in range(5)] # nosec
189
  ),
190
  tokenizer,
191
  )
 
242
  if cfg.local_rank == 0:
243
  signal.signal(
244
  signal.SIGINT,
245
+ lambda signal, frame: (
246
+ model.save_pretrained(cfg.output_dir),
247
+ sys.exit(0),
248
+ ),
249
  )
250
 
251
  logging.info("Starting trainer...")
 
258
  ]
259
  if len(possible_checkpoints) > 0:
260
  sorted_paths = sorted(
261
+ possible_checkpoints,
262
+ key=lambda path: int(path.split("-")[-1]),
263
  )
264
  resume_from_checkpoint = sorted_paths[-1]
265
  logging.info(
 
273
  # only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
274
  if cfg.local_rank == 0:
275
  model.save_pretrained(cfg.output_dir)
276
+
277
  # trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time
278
 
279
 
setup.py CHANGED
@@ -1,7 +1,9 @@
1
- from setuptools import setup, find_packages
 
 
2
 
3
  install_requires = []
4
- with open("./requirements.txt", "r") as requirements_file:
5
  # don't include peft yet until we check the int4
6
  # need to manually install peft for now...
7
  reqs = [r.strip() for r in requirements_file.readlines() if "peft" not in r]
 
1
+ """setup.py for axolotl"""
2
+
3
+ from setuptools import find_packages, setup
4
 
5
  install_requires = []
6
+ with open("./requirements.txt", encoding="utf-8") as requirements_file:
7
  # don't include peft yet until we check the int4
8
  # need to manually install peft for now...
9
  reqs = [r.strip() for r in requirements_file.readlines() if "peft" not in r]
src/axolotl/convert.py CHANGED
@@ -1,47 +1,76 @@
 
 
 
1
  import json
2
  import sys
3
 
4
 
5
  class FileReader:
 
 
 
 
6
  def read(self, file_path):
7
- with open(file_path, "r") as file:
8
  return file.read()
9
 
10
 
11
  class FileWriter:
 
 
 
 
12
  def __init__(self, file_path):
13
  self.file_path = file_path
14
 
15
  def write(self, content):
16
- with open(self.file_path, "w") as file:
17
  file.write(content)
18
 
19
 
20
  class StdoutWriter:
 
 
 
 
21
  def write(self, content):
22
  sys.stdout.write(content)
23
  sys.stdout.write("\n")
24
 
25
 
26
  class JsonParser:
 
 
 
 
27
  def parse(self, content):
28
  return json.loads(content)
29
 
30
 
31
  class JsonlSerializer:
 
 
 
 
32
  def serialize(self, data):
33
  lines = [json.dumps(item) for item in data]
34
  return "\n".join(lines)
35
 
36
 
37
  class JsonToJsonlConverter:
 
 
 
 
38
  def __init__(self, file_reader, file_writer, json_parser, jsonl_serializer):
39
  self.file_reader = file_reader
40
  self.file_writer = file_writer
41
  self.json_parser = json_parser
42
  self.jsonl_serializer = jsonl_serializer
43
 
44
- def convert(self, input_file_path, output_file_path):
 
 
45
  content = self.file_reader.read(input_file_path)
46
  data = self.json_parser.parse(content)
47
  # data = [r for r in data if r["conversations"]] # vicuna cleaned has rows with empty conversations
 
1
+ """Module containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes"""
2
+
3
+
4
  import json
5
  import sys
6
 
7
 
8
  class FileReader:
9
+ """
10
+ Reads a file and returns its contents as a string
11
+ """
12
+
13
  def read(self, file_path):
14
+ with open(file_path, encoding="utf-8") as file:
15
  return file.read()
16
 
17
 
18
  class FileWriter:
19
+ """
20
+ Writes a string to a file
21
+ """
22
+
23
  def __init__(self, file_path):
24
  self.file_path = file_path
25
 
26
  def write(self, content):
27
+ with open(self.file_path, "w", encoding="utf-8") as file:
28
  file.write(content)
29
 
30
 
31
  class StdoutWriter:
32
+ """
33
+ Writes a string to stdout
34
+ """
35
+
36
  def write(self, content):
37
  sys.stdout.write(content)
38
  sys.stdout.write("\n")
39
 
40
 
41
  class JsonParser:
42
+ """
43
+ Parses a string as JSON and returns the result
44
+ """
45
+
46
  def parse(self, content):
47
  return json.loads(content)
48
 
49
 
50
  class JsonlSerializer:
51
+ """
52
+ Serializes a list of JSON objects into a JSONL string
53
+ """
54
+
55
  def serialize(self, data):
56
  lines = [json.dumps(item) for item in data]
57
  return "\n".join(lines)
58
 
59
 
60
  class JsonToJsonlConverter:
61
+ """
62
+ Converts a JSON file to JSONL
63
+ """
64
+
65
  def __init__(self, file_reader, file_writer, json_parser, jsonl_serializer):
66
  self.file_reader = file_reader
67
  self.file_writer = file_writer
68
  self.json_parser = json_parser
69
  self.jsonl_serializer = jsonl_serializer
70
 
71
+ def convert(
72
+ self, input_file_path, output_file_path
73
+ ): # pylint: disable=unused-argument
74
  content = self.file_reader.read(input_file_path)
75
  data = self.json_parser.parse(content)
76
  # data = [r for r in data if r["conversations"]] # vicuna cleaned has rows with empty conversations
src/axolotl/datasets.py CHANGED
@@ -1,10 +1,12 @@
 
 
1
  import logging
2
  from typing import List
3
 
4
  import torch
5
  from datasets import IterableDataset
6
- from .prompt_tokenizers import PromptTokenizingStrategy, InvalidDataException
7
 
 
8
 
9
  # We want this to be a wrapper for an existing dataset that we have loaded
10
  # lets use the concept of middlewares to wrap each dataset, for example
@@ -14,7 +16,14 @@ from .prompt_tokenizers import PromptTokenizingStrategy, InvalidDataException
14
 
15
 
16
  class TokenizedPromptDataset(IterableDataset):
17
- def __init__(
 
 
 
 
 
 
 
18
  self,
19
  prompt_tokenizer: PromptTokenizingStrategy,
20
  dataset: IterableDataset,
@@ -42,7 +51,7 @@ class ConstantLengthDataset(IterableDataset):
42
  seq_length (int): Length of token sequences to return.
43
  """
44
 
45
- def __init__(
46
  self,
47
  tokenizer,
48
  datasets,
@@ -82,10 +91,8 @@ class ConstantLengthDataset(IterableDataset):
82
  else:
83
  example_len = 0
84
 
85
- if (
86
- not example_len
87
- or buffer_len + int(add_concat_token) + example_len
88
- > self.seq_length
89
  ):
90
  if buffer["input_ids"]:
91
  input_ids = torch.cat(buffer["input_ids"], dim=-1)[
@@ -95,9 +102,8 @@ class ConstantLengthDataset(IterableDataset):
95
  : self.seq_length
96
  ]
97
  labels = torch.cat(buffer["labels"], dim=-1)[: self.seq_length]
98
- if (
99
- labels.size() == input_ids.size()
100
- and attention_mask.size() == input_ids.size()
101
  ):
102
  yield {
103
  "input_ids": input_ids,
@@ -108,7 +114,11 @@ class ConstantLengthDataset(IterableDataset):
108
  logging.warning(
109
  f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
110
  )
111
- buffer = {"input_ids": [], "attention_mask": [], "labels": []}
 
 
 
 
112
  buffer_len = 0
113
 
114
  if example:
 
1
+ """Module containing Dataset functionality"""
2
+
3
  import logging
4
  from typing import List
5
 
6
  import torch
7
  from datasets import IterableDataset
 
8
 
9
+ from .prompt_tokenizers import InvalidDataException, PromptTokenizingStrategy
10
 
11
  # We want this to be a wrapper for an existing dataset that we have loaded
12
  # lets use the concept of middlewares to wrap each dataset, for example
 
16
 
17
 
18
  class TokenizedPromptDataset(IterableDataset):
19
+ """
20
+ Iterable dataset that returns tokenized prompts from a stream of text files.
21
+ Args:
22
+ prompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for proccessing the data.
23
+ dataset (dataset.Dataset): Dataset with text files.
24
+ """
25
+
26
+ def __init__( # pylint: disable=super-init-not-called
27
  self,
28
  prompt_tokenizer: PromptTokenizingStrategy,
29
  dataset: IterableDataset,
 
51
  seq_length (int): Length of token sequences to return.
52
  """
53
 
54
+ def __init__( # pylint: disable=super-init-not-called
55
  self,
56
  tokenizer,
57
  datasets,
 
91
  else:
92
  example_len = 0
93
 
94
+ if not example_len or (
95
+ buffer_len + int(add_concat_token) + example_len > self.seq_length
 
 
96
  ):
97
  if buffer["input_ids"]:
98
  input_ids = torch.cat(buffer["input_ids"], dim=-1)[
 
102
  : self.seq_length
103
  ]
104
  labels = torch.cat(buffer["labels"], dim=-1)[: self.seq_length]
105
+ if labels.size() == input_ids.size() and (
106
+ attention_mask.size() == input_ids.size()
 
107
  ):
108
  yield {
109
  "input_ids": input_ids,
 
114
  logging.warning(
115
  f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
116
  )
117
+ buffer = {
118
+ "input_ids": [],
119
+ "attention_mask": [],
120
+ "labels": [],
121
+ }
122
  buffer_len = 0
123
 
124
  if example:
src/axolotl/flash_attn.py CHANGED
@@ -1,17 +1,15 @@
 
 
1
  # copied from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/llama_flash_attn_monkey_patch.py
2
 
3
- from typing import List, Optional, Tuple
4
 
5
  import torch
6
- from torch import nn
7
-
8
  import transformers
9
- from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
10
-
11
  from einops import rearrange
12
-
13
  from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
14
- from flash_attn.bert_padding import unpad_input, pad_input
15
 
16
 
17
  def forward(
@@ -74,7 +72,11 @@ def forward(
74
  qkv = rearrange(qkv, "b s ... -> (b s) ...")
75
  max_s = q_len
76
  cu_q_lens = torch.arange(
77
- 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
 
 
 
 
78
  )
79
  output = flash_attn_unpadded_qkvpacked_func(
80
  qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
@@ -82,35 +84,56 @@ def forward(
82
  output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
83
  else:
84
  nheads = qkv.shape[-2]
 
 
85
  x = rearrange(qkv, "b s three h d -> b s (three h d)")
86
  x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
87
  x_unpad = rearrange(
88
- x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
 
 
 
89
  )
90
  output_unpad = flash_attn_unpadded_qkvpacked_func(
91
- x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
 
 
 
 
 
92
  )
93
  output = rearrange(
94
  pad_input(
95
- rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
 
 
 
96
  ),
97
  "b s (h d) -> b s h d",
98
  h=nheads,
99
  )
100
- return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None
 
 
 
 
101
 
102
 
103
  # Disable the transformation of the attention mask in LlamaModel as the flash attention
104
  # requires the attention mask to be the same as the key_padding_mask
105
  def _prepare_decoder_attention_mask(
106
- self, attention_mask, input_shape, inputs_embeds, past_key_values_length
107
- ):
 
 
 
 
108
  # [bsz, seq_len]
109
  return attention_mask
110
 
111
 
112
  def replace_llama_attn_with_flash_attn():
113
- transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
114
  _prepare_decoder_attention_mask
115
  )
116
  transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
 
1
+ """Flash attention monkey patch for llama model"""
2
+
3
  # copied from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/llama_flash_attn_monkey_patch.py
4
 
5
+ from typing import Optional, Tuple
6
 
7
  import torch
 
 
8
  import transformers
 
 
9
  from einops import rearrange
10
+ from flash_attn.bert_padding import pad_input, unpad_input
11
  from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
12
+ from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
13
 
14
 
15
  def forward(
 
72
  qkv = rearrange(qkv, "b s ... -> (b s) ...")
73
  max_s = q_len
74
  cu_q_lens = torch.arange(
75
+ 0,
76
+ (bsz + 1) * q_len,
77
+ step=q_len,
78
+ dtype=torch.int32,
79
+ device=qkv.device,
80
  )
81
  output = flash_attn_unpadded_qkvpacked_func(
82
  qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
 
84
  output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
85
  else:
86
  nheads = qkv.shape[-2]
87
+
88
+ # pylint: disable=invalid-name
89
  x = rearrange(qkv, "b s three h d -> b s (three h d)")
90
  x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
91
  x_unpad = rearrange(
92
+ x_unpad,
93
+ "nnz (three h d) -> nnz three h d",
94
+ three=3,
95
+ h=nheads,
96
  )
97
  output_unpad = flash_attn_unpadded_qkvpacked_func(
98
+ x_unpad,
99
+ cu_q_lens,
100
+ max_s,
101
+ 0.0,
102
+ softmax_scale=None,
103
+ causal=True,
104
  )
105
  output = rearrange(
106
  pad_input(
107
+ rearrange(output_unpad, "nnz h d -> nnz (h d)"),
108
+ indices,
109
+ bsz,
110
+ q_len,
111
  ),
112
  "b s (h d) -> b s h d",
113
  h=nheads,
114
  )
115
+ return (
116
+ self.o_proj(rearrange(output, "b s h d -> b s (h d)")),
117
+ None,
118
+ None,
119
+ )
120
 
121
 
122
  # Disable the transformation of the attention mask in LlamaModel as the flash attention
123
  # requires the attention mask to be the same as the key_padding_mask
124
  def _prepare_decoder_attention_mask(
125
+ self,
126
+ attention_mask,
127
+ input_shape,
128
+ inputs_embeds,
129
+ past_key_values_length,
130
+ ): # pylint: disable=unused-argument
131
  # [bsz, seq_len]
132
  return attention_mask
133
 
134
 
135
  def replace_llama_attn_with_flash_attn():
136
+ transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = ( # pylint: disable=protected-access
137
  _prepare_decoder_attention_mask
138
  )
139
  transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
src/axolotl/prompt_strategies/__init__.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import importlib
2
 
3
 
@@ -7,8 +9,8 @@ def load(strategy, tokenizer, cfg):
7
  if strategy.split(".")[-1].startswith("load_"):
8
  load_fn = strategy.split(".")[-1]
9
  strategy = ".".join(strategy.split(".")[:-1])
10
- m = importlib.import_module(f".{strategy}", "axolotl.prompt_strategies")
11
- fn = getattr(m, load_fn)
12
- return fn(tokenizer, cfg)
13
- except:
14
- pass
 
1
+ """Module to load prompt strategies."""
2
+
3
  import importlib
4
 
5
 
 
9
  if strategy.split(".")[-1].startswith("load_"):
10
  load_fn = strategy.split(".")[-1]
11
  strategy = ".".join(strategy.split(".")[:-1])
12
+ mod = importlib.import_module(f".{strategy}", "axolotl.prompt_strategies")
13
+ func = getattr(mod, load_fn)
14
+ return func(tokenizer, cfg)
15
+ except Exception: # pylint: disable=broad-exception-caught
16
+ return None
src/axolotl/prompt_strategies/alpaca_chat.py CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  from axolotl.prompt_tokenizers import (
2
  AlpacaPromptTokenizingStrategy,
3
  InstructionPromptTokenizingStrategy,
@@ -7,7 +11,7 @@ from axolotl.prompters import AlpacaPrompter, PromptStyle
7
 
8
  def load(tokenizer, cfg):
9
  return AlpacaPromptTokenizingStrategy(
10
- AlpacaPrompter(PromptStyle.chat.value),
11
  tokenizer,
12
  cfg.train_on_inputs,
13
  cfg.sequence_len,
@@ -15,7 +19,11 @@ def load(tokenizer, cfg):
15
 
16
 
17
  class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
18
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
19
  return (
20
  prompt["question"],
21
  "",
@@ -25,7 +33,7 @@ class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
25
 
26
  def load_qa(tokenizer, cfg):
27
  return AlpacaQAPromptTokenizingStrategy(
28
- AlpacaPrompter(PromptStyle.chat.value),
29
  tokenizer,
30
  cfg.train_on_inputs,
31
  cfg.sequence_len,
 
1
+ """Module containing the AlpacaQAPromptTokenizingStrategy class"""
2
+
3
+ from typing import Tuple
4
+
5
  from axolotl.prompt_tokenizers import (
6
  AlpacaPromptTokenizingStrategy,
7
  InstructionPromptTokenizingStrategy,
 
11
 
12
  def load(tokenizer, cfg):
13
  return AlpacaPromptTokenizingStrategy(
14
+ AlpacaPrompter(PromptStyle.CHAT.value),
15
  tokenizer,
16
  cfg.train_on_inputs,
17
  cfg.sequence_len,
 
19
 
20
 
21
  class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
22
+ """
23
+ Tokenizing strategy for AlpacaQA
24
+ """
25
+
26
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
27
  return (
28
  prompt["question"],
29
  "",
 
33
 
34
  def load_qa(tokenizer, cfg):
35
  return AlpacaQAPromptTokenizingStrategy(
36
+ AlpacaPrompter(PromptStyle.CHAT.value),
37
  tokenizer,
38
  cfg.train_on_inputs,
39
  cfg.sequence_len,
src/axolotl/prompt_strategies/alpaca_instruct.py CHANGED
@@ -1,10 +1,12 @@
 
 
1
  from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
2
  from axolotl.prompters import AlpacaPrompter, PromptStyle
3
 
4
 
5
  def load(tokenizer, cfg):
6
  return AlpacaPromptTokenizingStrategy(
7
- AlpacaPrompter(PromptStyle.instruct),
8
  tokenizer,
9
  cfg.train_on_inputs,
10
  cfg.sequence_len,
 
1
+ """Module loading the AlpacaInstructPromptTokenizingStrategy class"""
2
+
3
  from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
4
  from axolotl.prompters import AlpacaPrompter, PromptStyle
5
 
6
 
7
  def load(tokenizer, cfg):
8
  return AlpacaPromptTokenizingStrategy(
9
+ AlpacaPrompter(PromptStyle.INSTRUCT.value),
10
  tokenizer,
11
  cfg.train_on_inputs,
12
  cfg.sequence_len,
src/axolotl/prompt_strategies/creative_acr.py CHANGED
@@ -1,11 +1,18 @@
1
- from typing import Union, Generator
 
 
2
 
3
  import yaml
 
4
  from axolotl.prompt_tokenizers import InstructionPromptTokenizingStrategy
5
 
6
 
7
  class CreativeAnsweringPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
8
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
9
  question = prompt["instruction"]
10
  answer = prompt[
11
  "revision"
@@ -18,6 +25,10 @@ class CreativeAnsweringPromptTokenizingStrategy(InstructionPromptTokenizingStrat
18
 
19
 
20
  class CreativeCritiquePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
 
 
 
 
21
  user_prompt = """Given the following Question and Response, critique the Response on a scale of 1-10. You should critique the answer in the following criteria:
22
  refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
23
  prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means there is prescriptive bias.
@@ -49,12 +60,16 @@ Question: {question}
49
  Answer: {answer}
50
  """
51
 
52
- def parse_instruction_fields(self, prompt) -> (str, str, str):
53
  scores = yaml.dump(
54
- prompt["scores"], default_flow_style=False, Dumper=yaml.Dumper
 
 
55
  )
56
  critiques = yaml.dump(
57
- prompt["critiques"], default_flow_style=False, Dumper=yaml.Dumper
 
 
58
  )
59
  evaluation = scores + critiques
60
  question = prompt["instruction"]
@@ -67,6 +82,10 @@ Answer: {answer}
67
 
68
 
69
  class CreativeRevisePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
 
 
 
 
70
  user_prompt = """Definitions:
71
  refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
72
  prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means their is prescriptive bias.
@@ -81,12 +100,16 @@ Evaluation:
81
  {evaluation}
82
  """
83
 
84
- def parse_instruction_fields(self, prompt) -> (str, str, str):
85
  scores = yaml.dump(
86
- prompt["scores"], default_flow_style=False, Dumper=yaml.Dumper
 
 
87
  )
88
  critiques = yaml.dump(
89
- prompt["critiques"], default_flow_style=False, Dumper=yaml.Dumper
 
 
90
  )
91
  evaluation = scores + critiques
92
  question = prompt["instruction"]
@@ -101,13 +124,19 @@ Evaluation:
101
 
102
 
103
  class CreativePrompterBase:
 
 
 
 
104
  system_prompt = ""
105
  prompt_input = "{system_prompt}\nUSER: {instruction}\nASSISTANT:"
106
 
107
  def build_prompt(
108
  self,
109
  instruction: str,
110
- input: Union[None, str] = None,
 
 
111
  output: Union[None, str] = None,
112
  ) -> Generator[str, None, None]:
113
  if self.system_prompt:
@@ -120,30 +149,51 @@ class CreativePrompterBase:
120
 
121
 
122
  class CreativeAnswerPrompter(CreativePrompterBase):
 
 
 
 
123
  system_prompt = "Answer the following question in a comprehensive, in-depth, and creative way. Additionally your response should be relevant, accurate, and free of any ambiguity."
124
 
125
 
126
  class CreativeCritiquePrompter(CreativePrompterBase):
 
 
 
 
127
  system_prompt = ""
128
 
129
 
130
  class CreativeRevisePrompter(CreativePrompterBase):
 
 
 
 
131
  system_prompt = ""
132
 
133
 
134
  def load_answer(tokenizer, cfg):
135
  return CreativeAnsweringPromptTokenizingStrategy(
136
- CreativeAnswerPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
 
 
 
137
  )
138
 
139
 
140
  def load_critique(tokenizer, cfg):
141
  return CreativeCritiquePromptTokenizingStrategy(
142
- CreativeCritiquePrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
 
 
 
143
  )
144
 
145
 
146
  def load_revise(tokenizer, cfg):
147
  return CreativeRevisePromptTokenizingStrategy(
148
- CreativeRevisePrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
 
 
 
149
  )
 
1
+ """Module loading the CreativePromptTokenizingStrategy and similar classes"""
2
+
3
+ from typing import Generator, Tuple, Union
4
 
5
  import yaml
6
+
7
  from axolotl.prompt_tokenizers import InstructionPromptTokenizingStrategy
8
 
9
 
10
  class CreativeAnsweringPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
11
+ """
12
+ Tokenizing strategy for Creative Answering
13
+ """
14
+
15
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
16
  question = prompt["instruction"]
17
  answer = prompt[
18
  "revision"
 
25
 
26
 
27
  class CreativeCritiquePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
28
+ """
29
+ Tokenizing strategy for Creative Critique
30
+ """
31
+
32
  user_prompt = """Given the following Question and Response, critique the Response on a scale of 1-10. You should critique the answer in the following criteria:
33
  refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
34
  prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means there is prescriptive bias.
 
60
  Answer: {answer}
61
  """
62
 
63
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
64
  scores = yaml.dump(
65
+ prompt["scores"],
66
+ default_flow_style=False,
67
+ Dumper=yaml.Dumper,
68
  )
69
  critiques = yaml.dump(
70
+ prompt["critiques"],
71
+ default_flow_style=False,
72
+ Dumper=yaml.Dumper,
73
  )
74
  evaluation = scores + critiques
75
  question = prompt["instruction"]
 
82
 
83
 
84
  class CreativeRevisePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
85
+ """
86
+ Tokenizing strategy for Creative Revise
87
+ """
88
+
89
  user_prompt = """Definitions:
90
  refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
91
  prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means their is prescriptive bias.
 
100
  {evaluation}
101
  """
102
 
103
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
104
  scores = yaml.dump(
105
+ prompt["scores"],
106
+ default_flow_style=False,
107
+ Dumper=yaml.Dumper,
108
  )
109
  critiques = yaml.dump(
110
+ prompt["critiques"],
111
+ default_flow_style=False,
112
+ Dumper=yaml.Dumper,
113
  )
114
  evaluation = scores + critiques
115
  question = prompt["instruction"]
 
124
 
125
 
126
  class CreativePrompterBase:
127
+ """
128
+ Base class for Creative Prompters
129
+ """
130
+
131
  system_prompt = ""
132
  prompt_input = "{system_prompt}\nUSER: {instruction}\nASSISTANT:"
133
 
134
  def build_prompt(
135
  self,
136
  instruction: str,
137
+ input: Union[ # pylint: disable=redefined-builtin, unused-argument
138
+ None, str
139
+ ] = None,
140
  output: Union[None, str] = None,
141
  ) -> Generator[str, None, None]:
142
  if self.system_prompt:
 
149
 
150
 
151
  class CreativeAnswerPrompter(CreativePrompterBase):
152
+ """
153
+ Prompter for Creative Answering
154
+ """
155
+
156
  system_prompt = "Answer the following question in a comprehensive, in-depth, and creative way. Additionally your response should be relevant, accurate, and free of any ambiguity."
157
 
158
 
159
  class CreativeCritiquePrompter(CreativePrompterBase):
160
+ """
161
+ Prompter for Creative Critique
162
+ """
163
+
164
  system_prompt = ""
165
 
166
 
167
  class CreativeRevisePrompter(CreativePrompterBase):
168
+ """
169
+ Prompter for Creative Revise
170
+ """
171
+
172
  system_prompt = ""
173
 
174
 
175
  def load_answer(tokenizer, cfg):
176
  return CreativeAnsweringPromptTokenizingStrategy(
177
+ CreativeAnswerPrompter(),
178
+ tokenizer,
179
+ cfg.train_on_inputs,
180
+ cfg.sequence_len,
181
  )
182
 
183
 
184
  def load_critique(tokenizer, cfg):
185
  return CreativeCritiquePromptTokenizingStrategy(
186
+ CreativeCritiquePrompter(),
187
+ tokenizer,
188
+ cfg.train_on_inputs,
189
+ cfg.sequence_len,
190
  )
191
 
192
 
193
  def load_revise(tokenizer, cfg):
194
  return CreativeRevisePromptTokenizingStrategy(
195
+ CreativeRevisePrompter(),
196
+ tokenizer,
197
+ cfg.train_on_inputs,
198
+ cfg.sequence_len,
199
  )
src/axolotl/prompt_strategies/pygmalion.py CHANGED
@@ -1,29 +1,34 @@
 
 
1
  import copy
2
  import logging
3
  from collections import defaultdict
4
- from typing import Generator
5
 
6
- from axolotl.prompt_tokenizers import PromptTokenizingStrategy
 
 
 
 
7
 
8
  IGNORE_TOKEN_ID = -100
9
 
10
 
11
  class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
12
- bot_prefix_token_ids = []
 
 
 
 
13
 
14
  def __init__(self, prompter, tokenizer, *args, **kwargs):
15
- super().__init__(prompter, tokenizer)
16
  res = self._tokenize("<|model|>", add_eos_token=False, strip_bos_token=True)
17
  self.bot_prefix_token_ids = res["input_ids"]
18
 
19
  def tokenize_prompt(self, prompt):
20
- result = {
21
- "input_ids": [],
22
- "attention_mask": [],
23
- "labels": [],
24
- }
25
- current_len = 0
26
- for i, part in enumerate(self.prompter.build_prompt(prompt["conversations"])):
27
  role, message = part
28
  if role == "system":
29
  prefix = "<|system|>"
@@ -61,45 +66,29 @@ class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
61
  else:
62
  logging.warning(f"unknown role in conversation: {role}")
63
  res = defaultdict(lambda: [])
64
- input_ids = res["input_ids"]
65
- input_len = len(input_ids)
66
- result["input_ids"][current_len : current_len + input_len] = input_ids
67
- result["attention_mask"][current_len : current_len + input_len] = [
68
- 1 if x != self.tokenizer.pad_token_id else 0 for x in input_ids
69
- ]
70
- result["labels"][current_len : current_len + input_len] = labels
71
- current_len += input_len
72
- return result
73
 
74
- def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
75
- result = self.tokenizer(
76
- prompt,
77
- truncation=True,
78
- max_length=self.sequence_len,
79
- padding=False,
80
- return_tensors=None,
81
- )
82
- if (
83
- result["input_ids"][-1] != self.tokenizer.eos_token_id
84
- and len(result["input_ids"]) < self.sequence_len
85
- and add_eos_token
86
- ):
87
- result["input_ids"].append(self.tokenizer.eos_token_id)
88
- result["attention_mask"].append(1)
89
-
90
- if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
91
- result["input_ids"] = result["input_ids"][1:]
92
- result["attention_mask"] = result["attention_mask"][1:]
93
-
94
- result["labels"] = result["input_ids"].copy()
95
  return result
96
 
97
 
98
  class PygmalionPrompter:
 
 
 
 
99
  def __init__(self, *args, **kwargs):
100
  pass
101
 
102
- def build_prompt(self, source, *args, **kwargs) -> Generator[str, None, None]:
 
 
103
  for msg in source:
104
  yield msg["role"], msg["value"]
105
 
 
1
+ """Module containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class"""
2
+
3
  import copy
4
  import logging
5
  from collections import defaultdict
6
+ from typing import Generator, List, Tuple
7
 
8
+ from axolotl.prompt_tokenizers import (
9
+ PromptTokenizingStrategy,
10
+ parse_tokenized_to_result,
11
+ tokenize_prompt_default,
12
+ )
13
 
14
  IGNORE_TOKEN_ID = -100
15
 
16
 
17
  class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
18
+ """
19
+ Tokenizing strategy for Pygmalion.
20
+ """
21
+
22
+ bot_prefix_token_ids: List[int] = []
23
 
24
  def __init__(self, prompter, tokenizer, *args, **kwargs):
25
+ super().__init__(prompter, tokenizer, *args, **kwargs)
26
  res = self._tokenize("<|model|>", add_eos_token=False, strip_bos_token=True)
27
  self.bot_prefix_token_ids = res["input_ids"]
28
 
29
  def tokenize_prompt(self, prompt):
30
+ result, current_len = tokenize_prompt_default()
31
+ for _, part in enumerate(self.prompter.build_prompt(prompt["conversations"])):
 
 
 
 
 
32
  role, message = part
33
  if role == "system":
34
  prefix = "<|system|>"
 
66
  else:
67
  logging.warning(f"unknown role in conversation: {role}")
68
  res = defaultdict(lambda: [])
 
 
 
 
 
 
 
 
 
69
 
70
+ # pylint: disable=duplicate-code
71
+ result, current_len = parse_tokenized_to_result(
72
+ result,
73
+ current_len,
74
+ res,
75
+ labels,
76
+ pad_token_id=self.tokenizer.pad_token_id,
77
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  return result
79
 
80
 
81
  class PygmalionPrompter:
82
+ """
83
+ Prompter for Pygmalion.
84
+ """
85
+
86
  def __init__(self, *args, **kwargs):
87
  pass
88
 
89
+ def build_prompt(
90
+ self, source, *args, **kwargs # pylint: disable=unused-argument
91
+ ) -> Generator[Tuple[str, str], None, None]:
92
  for msg in source:
93
  yield msg["role"], msg["value"]
94
 
src/axolotl/prompt_tokenizers.py CHANGED
@@ -1,24 +1,33 @@
 
 
1
  import abc
2
  import copy
3
  import functools
4
  import logging
 
5
 
6
  from transformers import PreTrainedTokenizer
7
 
8
  from axolotl.prompters import IGNORE_TOKEN_ID
9
 
10
  IGNORE_INDEX = -100
11
- LLAMA_DEFAULT_PAD_TOKEN = "[PAD]"
12
- LLAMA_DEFAULT_EOS_TOKEN = "</s>"
13
- LLAMA_DEFAULT_BOS_TOKEN = "<s>"
14
- LLAMA_DEFAULT_UNK_TOKEN = "<unk>"
15
 
16
 
17
  class InvalidDataException(Exception):
18
- pass
 
 
19
 
20
 
21
  class PromptTokenizingStrategy(abc.ABC):
 
 
 
 
22
  def __init__(
23
  self,
24
  prompter,
@@ -35,27 +44,58 @@ class PromptTokenizingStrategy(abc.ABC):
35
  def tokenize_prompt(self, prompt):
36
  pass
37
 
38
- @functools.cache
39
  def _get_user_token(self):
40
  id_or_ids = self.tokenizer.convert_tokens_to_ids("<|USER|>")
41
  if isinstance(id_or_ids, (int,)):
42
  return id_or_ids
43
  return False
44
 
45
- @functools.cache
46
  def _get_assistant_token(self):
47
  id_or_ids = self.tokenizer.convert_tokens_to_ids("<|ASSISTANT|>")
48
  if isinstance(id_or_ids, (int,)):
49
  return id_or_ids
50
  return False
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
54
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
55
  raise NotImplementedError
56
 
57
  def tokenize_prompt(self, prompt):
58
- instruction, input, response = self.parse_instruction_fields(prompt)
 
 
 
 
59
  full_prompt = self._build_full_prompt(instruction, input, response)
60
  tokenized_full_prompt = self._tokenize(full_prompt)
61
  if not self.train_on_inputs:
@@ -76,7 +116,9 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
76
 
77
  return tokenized_full_prompt
78
 
79
- def _build_full_prompt(self, instruction, input, response):
 
 
80
  return next(
81
  iter(
82
  self.prompter.build_prompt(
@@ -87,32 +129,13 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
87
  )
88
  )
89
 
90
- def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
91
- result = self.tokenizer(
92
- prompt,
93
- truncation=True,
94
- max_length=self.sequence_len,
95
- padding=False,
96
- return_tensors=None,
97
- )
98
- if (
99
- result["input_ids"][-1] != self.tokenizer.eos_token_id
100
- and len(result["input_ids"]) < self.sequence_len
101
- and add_eos_token
102
- ):
103
- result["input_ids"].append(self.tokenizer.eos_token_id)
104
- result["attention_mask"].append(1)
105
-
106
- if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
107
- result["input_ids"] = result["input_ids"][1:]
108
- result["attention_mask"] = result["attention_mask"][1:]
109
-
110
- result["labels"] = result["input_ids"].copy()
111
- return result
112
-
113
 
114
  class AlpacaPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
115
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
116
  return (
117
  prompt["instruction"],
118
  prompt["input"] if "input" in prompt else "",
@@ -121,7 +144,11 @@ class AlpacaPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
121
 
122
 
123
  class AlpacaMultipleChoicePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
124
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
125
  return (
126
  prompt["question"],
127
  "\n".join(f'- "{choice}"' for choice in prompt["choices"]),
@@ -130,7 +157,11 @@ class AlpacaMultipleChoicePromptTokenizingStrategy(InstructionPromptTokenizingSt
130
 
131
 
132
  class JeopardyPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
133
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
134
  return (
135
  prompt["question"],
136
  prompt["category"],
@@ -139,7 +170,11 @@ class JeopardyPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
139
 
140
 
141
  class OpenAssistantPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
142
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
143
  return (
144
  prompt["INSTRUCTION"],
145
  "",
@@ -148,7 +183,11 @@ class OpenAssistantPromptTokenizingStrategy(InstructionPromptTokenizingStrategy)
148
 
149
 
150
  class SummarizeTLDRPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
151
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
152
  return (
153
  prompt["article"],
154
  "",
@@ -157,7 +196,11 @@ class SummarizeTLDRPromptTokenizingStrategy(InstructionPromptTokenizingStrategy)
157
 
158
 
159
  class GPTeacherPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
160
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
161
  return (
162
  prompt["instruction"],
163
  prompt["input"] if "input" in prompt else "",
@@ -166,7 +209,11 @@ class GPTeacherPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
166
 
167
 
168
  class NomicGPT4AllPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
169
- def parse_instruction_fields(self, prompt) -> (str, str, str):
 
 
 
 
170
  return (
171
  prompt["prompt"],
172
  "",
@@ -175,28 +222,34 @@ class NomicGPT4AllPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
175
 
176
 
177
  class CompletionPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
178
- def parse_instruction_fields(self, prompt) -> str:
179
- return prompt["text"]
 
180
 
181
  def tokenize_prompt(self, prompt):
182
- instruction = self.parse_instruction_fields(prompt)
183
- full_prompt = self._build_full_prompt(instruction, None, None)
184
  tokenized_full_prompt = self._tokenize(full_prompt)
185
 
186
  return tokenized_full_prompt
187
 
188
- def _build_full_prompt(self, instruction, input, response):
189
- return next(iter(self.prompter.build_prompt(instruction)))
 
 
190
 
191
 
192
  class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
193
- def parse_instruction_fields(self, prompt) -> (str, str, str, str, str):
 
 
 
 
194
  raise NotImplementedError
195
 
196
  def tokenize_prompt(self, prompt):
197
  (
198
  instruction,
199
- input,
200
  output,
201
  reflection,
202
  corrected,
@@ -223,7 +276,9 @@ class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
223
 
224
  return tokenized_full_prompt
225
 
226
- def _build_full_prompt(self, instruction, input, output, reflection, corrected):
 
 
227
  return next(
228
  iter(
229
  self.prompter.build_prompt(
@@ -236,7 +291,7 @@ class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
236
  )
237
  )
238
 
239
- def _tokenize(self, prompt, add_eos_token=True):
240
  result = self.tokenizer(
241
  prompt,
242
  truncation=True,
@@ -257,7 +312,11 @@ class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
257
 
258
 
259
  class AlpacaReflectionPTStrategy(ReflectionPromptTokenizingStrategy):
260
- def parse_instruction_fields(self, prompt) -> (str, str, str, str, str):
 
 
 
 
261
  return (
262
  prompt["instruction"],
263
  prompt["input"] if "input" in prompt else "",
@@ -268,20 +327,19 @@ class AlpacaReflectionPTStrategy(ReflectionPromptTokenizingStrategy):
268
 
269
 
270
  class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
 
 
 
 
271
  def get_conversation_thread(self, prompt):
272
  return prompt["conversations"]
273
 
274
  def tokenize_prompt(self, prompt):
275
- result = {
276
- "input_ids": [],
277
- "attention_mask": [],
278
- "labels": [],
279
- }
280
- current_len = 0
281
  user_token = self._get_user_token()
282
  assistant_token = self._get_assistant_token()
283
  try:
284
- for i, part in enumerate(
285
  self.prompter.build_prompt(self.get_conversation_thread(prompt))
286
  ):
287
  if isinstance(part, tuple):
@@ -289,7 +347,9 @@ class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
289
  part = part[0] + part[1] if not user_token else part[1]
290
  # this is still the user query, we should
291
  res = self._tokenize(
292
- part.strip(), add_eos_token=False, strip_bos_token=True
 
 
293
  )
294
  if user_token:
295
  res["input_ids"] = [user_token, *res["input_ids"]]
@@ -300,32 +360,39 @@ class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
300
  part = part[0] + part[1] if not assistant_token else part[1]
301
  # this should be the assistent response, should end with an eos token
302
  res = self._tokenize(
303
- part.strip(), add_eos_token=True, strip_bos_token=True
 
 
304
  )
305
  if assistant_token:
306
- res["input_ids"] = [assistant_token, *res["input_ids"]]
 
 
 
307
  # not masked out from labels
308
  labels = copy.deepcopy(res["input_ids"])
 
 
 
 
 
 
 
 
309
  else:
310
- logging.warning("unhandled role: " + part[0])
311
- else:
312
- # this is only ever the first part, should include the bos token and the user query
313
- res = self._tokenize(
314
- part.strip(), add_eos_token=False, strip_bos_token=False
315
- )
316
- # everything from this is masked out from the labels
317
- labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
318
- input_ids = res["input_ids"]
319
- input_len = len(input_ids)
320
- result["input_ids"][current_len : current_len + input_len] = input_ids
321
- result["attention_mask"][current_len : current_len + input_len] = [
322
- 1 if x != self.tokenizer.pad_token_id else 0 for x in input_ids
323
- ]
324
- result["labels"][current_len : current_len + input_len] = labels
325
- current_len += input_len
326
  return result
327
- except (KeyError, AssertionError, IndexError) as e:
328
- raise InvalidDataException(str(e))
329
 
330
  def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
331
  result = self.tokenizer(
@@ -349,3 +416,40 @@ class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
349
 
350
  result["labels"] = result["input_ids"].copy()
351
  return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module containing PromptTokenizingStrategy and Prompter classes"""
2
+
3
  import abc
4
  import copy
5
  import functools
6
  import logging
7
+ from typing import Dict, List, Tuple, Union
8
 
9
  from transformers import PreTrainedTokenizer
10
 
11
  from axolotl.prompters import IGNORE_TOKEN_ID
12
 
13
  IGNORE_INDEX = -100
14
+ LLAMA_DEFAULT_PAD_TOKEN = "[PAD]" # nosec
15
+ LLAMA_DEFAULT_EOS_TOKEN = "</s>" # nosec
16
+ LLAMA_DEFAULT_BOS_TOKEN = "<s>" # nosec
17
+ LLAMA_DEFAULT_UNK_TOKEN = "<unk>" # nosec
18
 
19
 
20
  class InvalidDataException(Exception):
21
+ """
22
+ Exception raised when the data is invalid
23
+ """
24
 
25
 
26
  class PromptTokenizingStrategy(abc.ABC):
27
+ """
28
+ Abstract class for tokenizing strategies
29
+ """
30
+
31
  def __init__(
32
  self,
33
  prompter,
 
44
  def tokenize_prompt(self, prompt):
45
  pass
46
 
47
+ @functools.lru_cache(maxsize=128)
48
  def _get_user_token(self):
49
  id_or_ids = self.tokenizer.convert_tokens_to_ids("<|USER|>")
50
  if isinstance(id_or_ids, (int,)):
51
  return id_or_ids
52
  return False
53
 
54
+ @functools.lru_cache(maxsize=128)
55
  def _get_assistant_token(self):
56
  id_or_ids = self.tokenizer.convert_tokens_to_ids("<|ASSISTANT|>")
57
  if isinstance(id_or_ids, (int,)):
58
  return id_or_ids
59
  return False
60
 
61
+ def _tokenize(self, prompt: str, add_eos_token=True, strip_bos_token=False):
62
+ result = self.tokenizer(
63
+ prompt,
64
+ truncation=True,
65
+ max_length=self.sequence_len,
66
+ padding=False,
67
+ return_tensors=None,
68
+ )
69
+ if (
70
+ result["input_ids"][-1] != self.tokenizer.eos_token_id
71
+ and len(result["input_ids"]) < self.sequence_len
72
+ and add_eos_token
73
+ ):
74
+ result["input_ids"].append(self.tokenizer.eos_token_id)
75
+ result["attention_mask"].append(1)
76
+
77
+ if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
78
+ result["input_ids"] = result["input_ids"][1:]
79
+ result["attention_mask"] = result["attention_mask"][1:]
80
+
81
+ result["labels"] = result["input_ids"].copy()
82
+ return result
83
+
84
 
85
  class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
86
+ """
87
+ Tokenizing strategy for instruction-based prompts.
88
+ """
89
+
90
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
91
  raise NotImplementedError
92
 
93
  def tokenize_prompt(self, prompt):
94
+ (
95
+ instruction,
96
+ input, # pylint: disable=redefined-builtin
97
+ response,
98
+ ) = self.parse_instruction_fields(prompt)
99
  full_prompt = self._build_full_prompt(instruction, input, response)
100
  tokenized_full_prompt = self._tokenize(full_prompt)
101
  if not self.train_on_inputs:
 
116
 
117
  return tokenized_full_prompt
118
 
119
+ def _build_full_prompt(
120
+ self, instruction, input, response # pylint: disable=redefined-builtin
121
+ ):
122
  return next(
123
  iter(
124
  self.prompter.build_prompt(
 
129
  )
130
  )
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  class AlpacaPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
134
+ """
135
+ Tokenizing strategy for Alpaca prompts.
136
+ """
137
+
138
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
139
  return (
140
  prompt["instruction"],
141
  prompt["input"] if "input" in prompt else "",
 
144
 
145
 
146
  class AlpacaMultipleChoicePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
147
+ """
148
+ Tokenizing strategy for Alpaca Multiple Choice prompts.
149
+ """
150
+
151
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
152
  return (
153
  prompt["question"],
154
  "\n".join(f'- "{choice}"' for choice in prompt["choices"]),
 
157
 
158
 
159
  class JeopardyPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
160
+ """
161
+ Tokenizing strategy for Jeopardy prompts.
162
+ """
163
+
164
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
165
  return (
166
  prompt["question"],
167
  prompt["category"],
 
170
 
171
 
172
  class OpenAssistantPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
173
+ """
174
+ Tokenizing strategy for OpenAssistant prompts.
175
+ """
176
+
177
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
178
  return (
179
  prompt["INSTRUCTION"],
180
  "",
 
183
 
184
 
185
  class SummarizeTLDRPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
186
+ """
187
+ Tokenizing strategy for SummarizeTLDR prompts.
188
+ """
189
+
190
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
191
  return (
192
  prompt["article"],
193
  "",
 
196
 
197
 
198
  class GPTeacherPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
199
+ """
200
+ Tokenizing strategy for GPTeacher prompts.
201
+ """
202
+
203
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
204
  return (
205
  prompt["instruction"],
206
  prompt["input"] if "input" in prompt else "",
 
209
 
210
 
211
  class NomicGPT4AllPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
212
+ """
213
+ Tokenizing strategy for NomicGPT4All prompts.
214
+ """
215
+
216
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
217
  return (
218
  prompt["prompt"],
219
  "",
 
222
 
223
 
224
  class CompletionPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
225
+ """
226
+ Tokenizing strategy for Completion prompts.
227
+ """
228
 
229
  def tokenize_prompt(self, prompt):
230
+ full_prompt = self._build_full_prompt(prompt["text"], None, None)
 
231
  tokenized_full_prompt = self._tokenize(full_prompt)
232
 
233
  return tokenized_full_prompt
234
 
235
+ def _build_full_prompt(
236
+ self, instruction, input, response
237
+ ): # pylint: disable=redefined-builtin
238
+ return next(iter(self.prompter.build_prompt(instruction, input, response)))
239
 
240
 
241
  class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
242
+ """
243
+ Tokenizing strategy for Reflection prompts.
244
+ """
245
+
246
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str, str]:
247
  raise NotImplementedError
248
 
249
  def tokenize_prompt(self, prompt):
250
  (
251
  instruction,
252
+ input, # pylint: disable=redefined-builtin
253
  output,
254
  reflection,
255
  corrected,
 
276
 
277
  return tokenized_full_prompt
278
 
279
+ def _build_full_prompt(
280
+ self, instruction, input, output, reflection, corrected
281
+ ): # pylint: disable=redefined-builtin
282
  return next(
283
  iter(
284
  self.prompter.build_prompt(
 
291
  )
292
  )
293
 
294
+ def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
295
  result = self.tokenizer(
296
  prompt,
297
  truncation=True,
 
312
 
313
 
314
  class AlpacaReflectionPTStrategy(ReflectionPromptTokenizingStrategy):
315
+ """
316
+ Tokenizing strategy for Alpaca Reflection prompts.
317
+ """
318
+
319
+ def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str, str]:
320
  return (
321
  prompt["instruction"],
322
  prompt["input"] if "input" in prompt else "",
 
327
 
328
 
329
  class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
330
+ """
331
+ Tokenizing strategy for ShareGPT prompts.
332
+ """
333
+
334
  def get_conversation_thread(self, prompt):
335
  return prompt["conversations"]
336
 
337
  def tokenize_prompt(self, prompt):
338
+ result, current_len = tokenize_prompt_default()
 
 
 
 
 
339
  user_token = self._get_user_token()
340
  assistant_token = self._get_assistant_token()
341
  try:
342
+ for _, part in enumerate(
343
  self.prompter.build_prompt(self.get_conversation_thread(prompt))
344
  ):
345
  if isinstance(part, tuple):
 
347
  part = part[0] + part[1] if not user_token else part[1]
348
  # this is still the user query, we should
349
  res = self._tokenize(
350
+ part.strip(),
351
+ add_eos_token=False,
352
+ strip_bos_token=True,
353
  )
354
  if user_token:
355
  res["input_ids"] = [user_token, *res["input_ids"]]
 
360
  part = part[0] + part[1] if not assistant_token else part[1]
361
  # this should be the assistent response, should end with an eos token
362
  res = self._tokenize(
363
+ part.strip(),
364
+ add_eos_token=True,
365
+ strip_bos_token=True,
366
  )
367
  if assistant_token:
368
+ res["input_ids"] = [
369
+ assistant_token,
370
+ *res["input_ids"],
371
+ ]
372
  # not masked out from labels
373
  labels = copy.deepcopy(res["input_ids"])
374
+ elif part[0] == "SYSTEM:":
375
+ part = part[1] # Ignore the system role from preamble
376
+ # this is only ever the first part, should include the bos token and the user query
377
+ res = self._tokenize(
378
+ part.strip(), add_eos_token=False, strip_bos_token=False
379
+ )
380
+ # everything from this is masked out from the labels
381
+ labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
382
  else:
383
+ logging.warning(f"unhandled role: {part[0]}")
384
+
385
+ # pylint: disable=duplicate-code
386
+ result, current_len = parse_tokenized_to_result(
387
+ result,
388
+ current_len,
389
+ res,
390
+ labels,
391
+ pad_token_id=self.tokenizer.pad_token_id,
392
+ )
 
 
 
 
 
 
393
  return result
394
+ except (KeyError, AssertionError, IndexError) as err:
395
+ raise InvalidDataException(str(err)) from err
396
 
397
  def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
398
  result = self.tokenizer(
 
416
 
417
  result["labels"] = result["input_ids"].copy()
418
  return result
419
+
420
+
421
+ def tokenize_prompt_default() -> Tuple[Dict[str, List[int]], int]:
422
+ """
423
+ Returns the default values for the tokenize prompt function
424
+ """
425
+
426
+ result: Dict[str, List[int]] = {
427
+ "input_ids": [],
428
+ "attention_mask": [],
429
+ "labels": [],
430
+ }
431
+ current_len = 0
432
+ return result, current_len
433
+
434
+
435
+ def parse_tokenized_to_result(
436
+ result: Dict[str, List[int]],
437
+ current_len: int,
438
+ res: Dict[str, List[int]],
439
+ labels: list[int],
440
+ pad_token_id: Union[int, None] = None,
441
+ ) -> Tuple[Dict[str, List[int]], int]:
442
+ """
443
+ Parses the tokenized prompt and append the tokenized input_ids, attention_mask and labels to the result
444
+ """
445
+
446
+ input_ids = res["input_ids"]
447
+ input_len = len(input_ids)
448
+ result["input_ids"][current_len : current_len + input_len] = input_ids
449
+ result["attention_mask"][current_len : current_len + input_len] = [
450
+ 1 if x != pad_token_id else 0 for x in input_ids
451
+ ]
452
+ result["labels"][current_len : current_len + input_len] = labels
453
+ current_len += input_len
454
+
455
+ return result, current_len
src/axolotl/prompters.py CHANGED
@@ -1,28 +1,37 @@
1
- import copy
 
2
  import dataclasses
3
  import logging
4
- from enum import auto, Enum
5
- from typing import List, Tuple, Any, Union, Generator
6
 
7
  IGNORE_TOKEN_ID = -100
8
 
9
 
10
  class PromptStyle(Enum):
11
- instruct = "instruct"
12
- chat = "chat"
 
 
 
 
13
 
14
 
15
  class AlpacaPrompter:
 
 
 
 
16
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
17
  system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
18
- prompt_style = None
19
 
20
- def __init__(self, prompt_style=PromptStyle.instruct.value):
21
- self.prompt_style = prompt_style if prompt_style else PromptStyle.instruct.value
22
  self.match_prompt_style()
23
 
24
  def match_prompt_style(self):
25
- if self.prompt_style == PromptStyle.instruct.value:
26
  self.prompt_input = (
27
  self.system_prompt
28
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
@@ -32,7 +41,7 @@ class AlpacaPrompter:
32
  + "### Instruction:\n{instruction}\n\n### Response:\n"
33
  )
34
  self.response_split = "### Response:"
35
- if self.prompt_style == PromptStyle.chat.value:
36
  self.prompt_input = (
37
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
38
  )
@@ -44,7 +53,7 @@ class AlpacaPrompter:
44
  def build_prompt(
45
  self,
46
  instruction: str,
47
- input: Union[None, str] = None,
48
  output: Union[None, str] = None,
49
  ) -> Generator[str, None, None]:
50
  # returns the full prompt from instruction and optional input
@@ -62,33 +71,60 @@ class AlpacaPrompter:
62
 
63
 
64
  class UnpromptedPrompter(AlpacaPrompter):
 
 
 
 
65
  system_prompt = ""
66
  system_no_input_prompt = ""
67
 
68
 
69
  class JeopardyPrompter(AlpacaPrompter):
 
 
 
 
70
  prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
71
 
72
 
73
  class MultipleChoiceExplainPrompter(AlpacaPrompter):
 
 
 
 
74
  system_prompt = (
75
  "Choose the answer that best answers the question. Explain your reasoning."
76
  )
77
 
78
 
79
  class MultipleChoiceConcisePrompter(AlpacaPrompter):
 
 
 
 
80
  prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
81
 
82
 
83
  class SummarizeTLDRPrompter(AlpacaPrompter):
 
 
 
 
84
  prompt_no_input = (
85
  "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
86
  )
87
 
88
 
89
  class CompletionPrompter:
 
 
 
 
90
  def build_prompt(
91
- self, instruction: str, input=None, output=None
 
 
 
92
  ) -> Generator[str, None, None]:
93
  yield instruction
94
 
@@ -97,14 +133,22 @@ class CompletionPrompter:
97
 
98
 
99
  class GPTeacherPrompter(AlpacaPrompter):
100
- ...
 
 
101
 
102
 
103
  class NomicGPT4AllPrompter(AlpacaPrompter):
104
- ...
 
 
105
 
106
 
107
  class ReflectAlpacaPrompter:
 
 
 
 
108
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
109
  system_no_input_prompt = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
110
 
@@ -120,7 +164,7 @@ class ReflectAlpacaPrompter:
120
  self.match_prompt_style()
121
 
122
  def match_prompt_style(self):
123
- if self.prompt_style == PromptStyle.instruct.value:
124
  self.prompt_input = (
125
  self.system_prompt
126
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
@@ -131,7 +175,7 @@ class ReflectAlpacaPrompter:
131
  )
132
  self.agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
133
  self.response_split = "### Final Response:"
134
- if self.prompt_style == PromptStyle.chat.value:
135
  self.prompt_input = (
136
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
137
  )
@@ -146,7 +190,7 @@ class ReflectAlpacaPrompter:
146
  def build_prompt(
147
  self,
148
  instruction: str,
149
- input: Union[None, str] = None,
150
  output: Union[None, str] = None,
151
  reflection: Union[None, str] = None,
152
  corrected: Union[None, str] = None,
@@ -159,7 +203,9 @@ class ReflectAlpacaPrompter:
159
  res = self.prompt_no_input.format(instruction=instruction)
160
  if output and reflection and corrected:
161
  label = self.agent_label.format(
162
- output=output, reflection=reflection, corrected=corrected
 
 
163
  )
164
  res = f"{res}{label}"
165
  yield res
@@ -187,18 +233,18 @@ class Conversation:
187
  offset: int
188
  sep_style: SeparatorStyle = SeparatorStyle.SINGLE
189
  sep: str = "###"
190
- sep2: str = None
191
 
192
- def get_prompt(self) -> Generator[str, None, None]:
193
- seps = [self.sep, self.sep2]
194
- preamble = self.system + seps[0]
195
- yield preamble
196
- for i, (role, message) in enumerate(self.messages):
197
  if message:
198
  yield (role + ":", " " + message)
199
  else:
200
- logging.warning("role with empty message: " + role)
201
- yield (role + ":",)
202
 
203
  def copy(self):
204
  return Conversation(
@@ -227,10 +273,14 @@ conv_vicuna_v1_1 = Conversation(
227
  )
228
 
229
 
230
- class ShareGPTPrompter:
 
 
 
 
231
  def __init__(self, prompt_style=None):
232
- if prompt_style != PromptStyle.chat.value:
233
- raise Exception(
234
  f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
235
  )
236
 
@@ -240,7 +290,7 @@ class ShareGPTPrompter:
240
  # self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
241
  # self.response_split = "ASSISTANT:"
242
 
243
- def build_prompt(self, source, *args, **kwargs) -> Generator[str, None, None]:
244
  # ignore the system prompt if provided
245
  if source[0]["from"] == "system":
246
  source.pop(0)
@@ -261,9 +311,9 @@ class ShareGPTPrompter:
261
  ):
262
  # Skip the first one if it is not from human
263
  source = source[1:]
264
- except IndexError as e:
265
  # sometimes there is a bing or system chat
266
- raise e
267
 
268
  conv.messages = []
269
  for j, sentence in enumerate(source):
 
1
+ """Module containing prompters"""
2
+
3
  import dataclasses
4
  import logging
5
+ from enum import Enum, auto
6
+ from typing import Generator, List, Optional, Tuple, Union
7
 
8
  IGNORE_TOKEN_ID = -100
9
 
10
 
11
  class PromptStyle(Enum):
12
+ """
13
+ Enum for prompt styles
14
+ """
15
+
16
+ INSTRUCT = "instruct"
17
+ CHAT = "chat"
18
 
19
 
20
  class AlpacaPrompter:
21
+ """
22
+ Base class for alpaca prompters
23
+ """
24
+
25
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
26
  system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
27
+ prompt_style: Optional[PromptStyle] = None
28
 
29
+ def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
30
+ self.prompt_style = prompt_style if prompt_style else PromptStyle.INSTRUCT.value
31
  self.match_prompt_style()
32
 
33
  def match_prompt_style(self):
34
+ if self.prompt_style == PromptStyle.INSTRUCT.value:
35
  self.prompt_input = (
36
  self.system_prompt
37
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
 
41
  + "### Instruction:\n{instruction}\n\n### Response:\n"
42
  )
43
  self.response_split = "### Response:"
44
+ if self.prompt_style == PromptStyle.CHAT.value:
45
  self.prompt_input = (
46
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
47
  )
 
53
  def build_prompt(
54
  self,
55
  instruction: str,
56
+ input: Union[None, str] = None, # pylint: disable=redefined-builtin
57
  output: Union[None, str] = None,
58
  ) -> Generator[str, None, None]:
59
  # returns the full prompt from instruction and optional input
 
71
 
72
 
73
  class UnpromptedPrompter(AlpacaPrompter):
74
+ """
75
+ Prompter for alpaca no system prompt
76
+ """
77
+
78
  system_prompt = ""
79
  system_no_input_prompt = ""
80
 
81
 
82
  class JeopardyPrompter(AlpacaPrompter):
83
+ """
84
+ Prompter for Jeopardy
85
+ """
86
+
87
  prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
88
 
89
 
90
  class MultipleChoiceExplainPrompter(AlpacaPrompter):
91
+ """
92
+ Prompter for multiple choice explain
93
+ """
94
+
95
  system_prompt = (
96
  "Choose the answer that best answers the question. Explain your reasoning."
97
  )
98
 
99
 
100
  class MultipleChoiceConcisePrompter(AlpacaPrompter):
101
+ """
102
+ Prompter for multiple choice concise
103
+ """
104
+
105
  prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
106
 
107
 
108
  class SummarizeTLDRPrompter(AlpacaPrompter):
109
+ """
110
+ Prompter for summarize TLDR
111
+ """
112
+
113
  prompt_no_input = (
114
  "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
115
  )
116
 
117
 
118
  class CompletionPrompter:
119
+ """
120
+ Prompter for completion
121
+ """
122
+
123
  def build_prompt(
124
+ self,
125
+ instruction: str,
126
+ input=None, # pylint: disable=redefined-builtin, unused-argument
127
+ output=None, # pylint: disable=unused-argument
128
  ) -> Generator[str, None, None]:
129
  yield instruction
130
 
 
133
 
134
 
135
  class GPTeacherPrompter(AlpacaPrompter):
136
+ """
137
+ Prompter for GPTeacher
138
+ """
139
 
140
 
141
  class NomicGPT4AllPrompter(AlpacaPrompter):
142
+ """
143
+ Prompter for NomicGPT4All
144
+ """
145
 
146
 
147
  class ReflectAlpacaPrompter:
148
+ """
149
+ Prompter for ReflectAlpaca
150
+ """
151
+
152
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
153
  system_no_input_prompt = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
154
 
 
164
  self.match_prompt_style()
165
 
166
  def match_prompt_style(self):
167
+ if self.prompt_style == PromptStyle.INSTRUCT.value:
168
  self.prompt_input = (
169
  self.system_prompt
170
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
 
175
  )
176
  self.agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
177
  self.response_split = "### Final Response:"
178
+ if self.prompt_style == PromptStyle.CHAT.value:
179
  self.prompt_input = (
180
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
181
  )
 
190
  def build_prompt(
191
  self,
192
  instruction: str,
193
+ input: Union[None, str] = None, # pylint: disable=redefined-builtin
194
  output: Union[None, str] = None,
195
  reflection: Union[None, str] = None,
196
  corrected: Union[None, str] = None,
 
203
  res = self.prompt_no_input.format(instruction=instruction)
204
  if output and reflection and corrected:
205
  label = self.agent_label.format(
206
+ output=output,
207
+ reflection=reflection,
208
+ corrected=corrected,
209
  )
210
  res = f"{res}{label}"
211
  yield res
 
233
  offset: int
234
  sep_style: SeparatorStyle = SeparatorStyle.SINGLE
235
  sep: str = "###"
236
+ sep2: Optional[str] = None
237
 
238
+ def get_prompt(self) -> Generator[Tuple[str, str], None, None]:
239
+ # seps = [self.sep, self.sep2]
240
+ preamble = self.system + self.sep
241
+ yield ("SYSTEM:", preamble)
242
+ for _, (role, message) in enumerate(self.messages):
243
  if message:
244
  yield (role + ":", " " + message)
245
  else:
246
+ logging.warning(f"role with empty message: {role}")
247
+ yield (role + ":", "")
248
 
249
  def copy(self):
250
  return Conversation(
 
273
  )
274
 
275
 
276
+ class ShareGPTPrompter: # pylint: disable=too-few-public-methods
277
+ """
278
+ A prompter that generates prompts for the ShareGPT
279
+ """
280
+
281
  def __init__(self, prompt_style=None):
282
+ if prompt_style != PromptStyle.CHAT.value:
283
+ raise ValueError(
284
  f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
285
  )
286
 
 
290
  # self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
291
  # self.response_split = "ASSISTANT:"
292
 
293
+ def build_prompt(self, source) -> Generator[str, None, None]:
294
  # ignore the system prompt if provided
295
  if source[0]["from"] == "system":
296
  source.pop(0)
 
311
  ):
312
  # Skip the first one if it is not from human
313
  source = source[1:]
314
+ except IndexError as err:
315
  # sometimes there is a bing or system chat
316
+ raise err
317
 
318
  conv.messages = []
319
  for j, sentence in enumerate(source):
src/axolotl/utils/callbacks.py CHANGED
@@ -1,16 +1,19 @@
 
 
1
  import os
2
 
3
  from transformers import (
4
- Seq2SeqTrainer,
5
  TrainerCallback,
6
- TrainingArguments,
7
- TrainerState,
8
  TrainerControl,
 
 
9
  )
10
  from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
11
 
12
 
13
- class SavePeftModelCallback(TrainerCallback):
 
 
14
  def on_save(
15
  self,
16
  args: TrainingArguments,
@@ -19,7 +22,8 @@ class SavePeftModelCallback(TrainerCallback):
19
  **kwargs,
20
  ):
21
  checkpoint_folder = os.path.join(
22
- args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}"
 
23
  )
24
 
25
  peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
 
1
+ """Callbacks for Trainer class"""
2
+
3
  import os
4
 
5
  from transformers import (
 
6
  TrainerCallback,
 
 
7
  TrainerControl,
8
+ TrainerState,
9
+ TrainingArguments,
10
  )
11
  from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
12
 
13
 
14
+ class SavePeftModelCallback(TrainerCallback): # pylint: disable=too-few-public-methods
15
+ """Callback to save the PEFT adapter"""
16
+
17
  def on_save(
18
  self,
19
  args: TrainingArguments,
 
22
  **kwargs,
23
  ):
24
  checkpoint_folder = os.path.join(
25
+ args.output_dir,
26
+ f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}",
27
  )
28
 
29
  peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
src/axolotl/utils/data.py CHANGED
@@ -1,42 +1,37 @@
 
 
1
  import logging
2
  from hashlib import md5
3
  from pathlib import Path
4
- from typing import Union
5
 
6
- from datasets import (
7
- load_from_disk,
8
- load_dataset,
9
- IterableDataset,
10
- Dataset,
11
- concatenate_datasets,
12
- DatasetDict,
13
- )
14
  from huggingface_hub import hf_hub_download
15
  from transformers import PreTrainedTokenizerBase
16
 
17
- from axolotl.datasets import TokenizedPromptDataset, ConstantLengthDataset
18
  from axolotl.prompt_strategies import load
19
  from axolotl.prompt_tokenizers import (
 
20
  AlpacaPromptTokenizingStrategy,
 
 
21
  GPTeacherPromptTokenizingStrategy,
 
22
  OpenAssistantPromptTokenizingStrategy,
23
- AlpacaReflectionPTStrategy,
24
  ShareGPTPromptTokenizingStrategy,
25
- JeopardyPromptTokenizingStrategy,
26
- CompletionPromptTokenizingStrategy,
27
- AlpacaMultipleChoicePromptTokenizingStrategy,
28
  SummarizeTLDRPromptTokenizingStrategy,
29
  )
30
  from axolotl.prompters import (
31
  AlpacaPrompter,
 
32
  GPTeacherPrompter,
33
- ReflectAlpacaPrompter,
34
- ShareGPTPrompter,
35
  JeopardyPrompter,
36
- CompletionPrompter,
37
  MultipleChoiceExplainPrompter,
 
 
38
  SummarizeTLDRPrompter,
39
- MultipleChoiceConcisePrompter,
40
  )
41
 
42
 
@@ -45,11 +40,13 @@ def load_tokenized_prepared_datasets(
45
  ) -> DatasetDict:
46
  tokenizer_name = tokenizer.__class__.__name__
47
  ds_hash = str(
48
- md5(
49
  (
50
  str(cfg.sequence_len)
51
  + "@"
52
- + "|".join(sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets]))
 
 
53
  + "|"
54
  + tokenizer_name
55
  ).encode("utf-8")
@@ -65,10 +62,11 @@ def load_tokenized_prepared_datasets(
65
  try:
66
  if cfg.push_dataset_to_hub:
67
  dataset = load_dataset(
68
- f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=use_auth_token
 
69
  )
70
  dataset = dataset["train"]
71
- except:
72
  pass
73
 
74
  if dataset:
@@ -81,43 +79,59 @@ def load_tokenized_prepared_datasets(
81
  logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
82
  logging.info("Loading raw datasets...")
83
  datasets = []
 
84
  for d in cfg.datasets:
85
  ds: Union[Dataset, DatasetDict] = None
86
  ds_from_hub = False
87
  try:
88
- load_dataset(d.path, streaming=True, use_auth_token=use_auth_token)
 
 
 
 
89
  ds_from_hub = True
90
  except FileNotFoundError:
91
  pass
92
 
93
  # prefer local dataset, even if hub exists
94
  if Path(d.path).exists():
95
- ds: Dataset = load_dataset(
96
- "json", data_files=d.path, streaming=False, split=None
 
 
 
97
  )
98
  elif ds_from_hub:
99
  if d.data_files:
100
- ds: Dataset = load_dataset(
101
  d.path,
102
  streaming=False,
103
  data_files=d.data_files,
104
  use_auth_token=use_auth_token,
105
  )
106
  else:
107
- ds: Dataset = load_dataset(d.path, streaming=False, use_auth_token=use_auth_token)
 
 
 
 
108
  else:
109
  fp = hf_hub_download(
110
- repo_id=d.path, repo_type="dataset", filename=d.data_files
 
 
111
  )
112
- ds: Dataset = load_dataset("json", data_files=fp, streaming=False, split=None)
113
  if not ds:
114
- raise Exception("unhandled dataset load")
115
  # support for using a subset of the data
116
  if d.shards:
117
  if "train" in ds:
118
- ds: DatasetDict = ds.shuffle(seed=42)["train"].shard(num_shards=d.shards, index=0)
 
 
119
  else:
120
- ds: Dataset = ds.shuffle(seed=42).shard(num_shards=d.shards, index=0)
121
  d_type = d.type
122
  d_type_split = d_type.split(":")
123
  d_base_type = d_type_split[0]
@@ -221,9 +235,9 @@ def load_tokenized_prepared_datasets(
221
  logging.error(f"unhandled prompt tokenization strategy: {d.type}")
222
  logging.info("tokenizing, merging, and shuffling master dataset")
223
 
224
- samples = []
225
  for d in datasets:
226
- samples = samples + [i for i in d]
227
  dataset = Dataset.from_list(samples).shuffle(seed=42)
228
  if cfg.local_rank == 0:
229
  logging.info(
@@ -242,8 +256,10 @@ def load_tokenized_prepared_datasets(
242
 
243
 
244
  def load_prepare_datasets(
245
- tokenizer: PreTrainedTokenizerBase, cfg, default_dataset_prepared_path
246
- ) -> (Dataset, Dataset):
 
 
247
  max_packed_sequence_len = (
248
  cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
249
  )
@@ -256,13 +272,15 @@ def load_prepare_datasets(
256
  # see if we can go ahead and load the stacked dataset
257
  seed = f"@{str(cfg.seed)}" if cfg.seed else ""
258
  ds_hash = str(
259
- md5(
260
  (
261
  str(cfg.sequence_len)
262
  + "@"
263
  + str(max_packed_sequence_len)
264
  + seed
265
- + "|".join(sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets]))
 
 
266
  + "|"
267
  + tokenizer_name
268
  ).encode("utf-8")
@@ -282,10 +300,11 @@ def load_prepare_datasets(
282
  f"Checking for packed prepared dataset from hub... {cfg.push_dataset_to_hub}/{ds_hash}"
283
  )
284
  dataset = load_dataset(
285
- f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=use_auth_token
 
286
  )
287
  dataset = dataset["train"]
288
- except:
289
  pass
290
 
291
  if dataset:
@@ -319,7 +338,7 @@ def load_prepare_datasets(
319
  logging.info(
320
  f"packing master dataset to len: {cfg.max_packed_sequence_len}"
321
  )
322
- dataset = Dataset.from_list([_ for _ in constant_len_dataset])
323
 
324
  # filter out bad data
325
  dataset = Dataset.from_list(
@@ -343,7 +362,8 @@ def load_prepare_datasets(
343
  f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
344
  )
345
  dataset.push_to_hub(
346
- f"{cfg.push_dataset_to_hub}/{ds_hash}", private=True
 
347
  )
348
  else:
349
  dataset = load_tokenized_prepared_datasets(
@@ -355,7 +375,8 @@ def load_prepare_datasets(
355
  f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards"
356
  )
357
  dataset = dataset.shard(
358
- num_shards=cfg.dataset_shard_num, index=cfg.dataset_shard_idx
 
359
  )
360
 
361
  dataset = dataset.train_test_split(test_size=cfg.val_set_size, shuffle=False)
 
1
+ """Module containing data utilities"""
2
+
3
  import logging
4
  from hashlib import md5
5
  from pathlib import Path
6
+ from typing import List, Tuple, Union
7
 
8
+ from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
 
 
 
 
 
 
 
9
  from huggingface_hub import hf_hub_download
10
  from transformers import PreTrainedTokenizerBase
11
 
12
+ from axolotl.datasets import ConstantLengthDataset, TokenizedPromptDataset
13
  from axolotl.prompt_strategies import load
14
  from axolotl.prompt_tokenizers import (
15
+ AlpacaMultipleChoicePromptTokenizingStrategy,
16
  AlpacaPromptTokenizingStrategy,
17
+ AlpacaReflectionPTStrategy,
18
+ CompletionPromptTokenizingStrategy,
19
  GPTeacherPromptTokenizingStrategy,
20
+ JeopardyPromptTokenizingStrategy,
21
  OpenAssistantPromptTokenizingStrategy,
 
22
  ShareGPTPromptTokenizingStrategy,
 
 
 
23
  SummarizeTLDRPromptTokenizingStrategy,
24
  )
25
  from axolotl.prompters import (
26
  AlpacaPrompter,
27
+ CompletionPrompter,
28
  GPTeacherPrompter,
 
 
29
  JeopardyPrompter,
30
+ MultipleChoiceConcisePrompter,
31
  MultipleChoiceExplainPrompter,
32
+ ReflectAlpacaPrompter,
33
+ ShareGPTPrompter,
34
  SummarizeTLDRPrompter,
 
35
  )
36
 
37
 
 
40
  ) -> DatasetDict:
41
  tokenizer_name = tokenizer.__class__.__name__
42
  ds_hash = str(
43
+ md5( # nosec
44
  (
45
  str(cfg.sequence_len)
46
  + "@"
47
+ + "|".join(
48
+ sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets])
49
+ )
50
  + "|"
51
  + tokenizer_name
52
  ).encode("utf-8")
 
62
  try:
63
  if cfg.push_dataset_to_hub:
64
  dataset = load_dataset(
65
+ f"{cfg.push_dataset_to_hub}/{ds_hash}",
66
+ use_auth_token=use_auth_token,
67
  )
68
  dataset = dataset["train"]
69
+ except Exception: # pylint: disable=broad-except # nosec
70
  pass
71
 
72
  if dataset:
 
79
  logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
80
  logging.info("Loading raw datasets...")
81
  datasets = []
82
+ # pylint: disable=invalid-name
83
  for d in cfg.datasets:
84
  ds: Union[Dataset, DatasetDict] = None
85
  ds_from_hub = False
86
  try:
87
+ load_dataset(
88
+ d.path,
89
+ streaming=True,
90
+ use_auth_token=use_auth_token,
91
+ )
92
  ds_from_hub = True
93
  except FileNotFoundError:
94
  pass
95
 
96
  # prefer local dataset, even if hub exists
97
  if Path(d.path).exists():
98
+ ds = load_dataset(
99
+ "json",
100
+ data_files=d.path,
101
+ streaming=False,
102
+ split=None,
103
  )
104
  elif ds_from_hub:
105
  if d.data_files:
106
+ ds = load_dataset(
107
  d.path,
108
  streaming=False,
109
  data_files=d.data_files,
110
  use_auth_token=use_auth_token,
111
  )
112
  else:
113
+ ds = load_dataset(
114
+ d.path,
115
+ streaming=False,
116
+ use_auth_token=use_auth_token,
117
+ )
118
  else:
119
  fp = hf_hub_download(
120
+ repo_id=d.path,
121
+ repo_type="dataset",
122
+ filename=d.data_files,
123
  )
124
+ ds = load_dataset("json", data_files=fp, streaming=False, split=None)
125
  if not ds:
126
+ raise ValueError("unhandled dataset load")
127
  # support for using a subset of the data
128
  if d.shards:
129
  if "train" in ds:
130
+ ds = ds.shuffle(seed=42)["train"].shard(
131
+ num_shards=d.shards, index=0
132
+ )
133
  else:
134
+ ds = ds.shuffle(seed=42).shard(num_shards=d.shards, index=0)
135
  d_type = d.type
136
  d_type_split = d_type.split(":")
137
  d_base_type = d_type_split[0]
 
235
  logging.error(f"unhandled prompt tokenization strategy: {d.type}")
236
  logging.info("tokenizing, merging, and shuffling master dataset")
237
 
238
+ samples: List[int] = []
239
  for d in datasets:
240
+ samples = samples + list(d)
241
  dataset = Dataset.from_list(samples).shuffle(seed=42)
242
  if cfg.local_rank == 0:
243
  logging.info(
 
256
 
257
 
258
  def load_prepare_datasets(
259
+ tokenizer: PreTrainedTokenizerBase,
260
+ cfg,
261
+ default_dataset_prepared_path,
262
+ ) -> Tuple[Dataset, Dataset]:
263
  max_packed_sequence_len = (
264
  cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
265
  )
 
272
  # see if we can go ahead and load the stacked dataset
273
  seed = f"@{str(cfg.seed)}" if cfg.seed else ""
274
  ds_hash = str(
275
+ md5( # nosec
276
  (
277
  str(cfg.sequence_len)
278
  + "@"
279
  + str(max_packed_sequence_len)
280
  + seed
281
+ + "|".join(
282
+ sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets])
283
+ )
284
  + "|"
285
  + tokenizer_name
286
  ).encode("utf-8")
 
300
  f"Checking for packed prepared dataset from hub... {cfg.push_dataset_to_hub}/{ds_hash}"
301
  )
302
  dataset = load_dataset(
303
+ f"{cfg.push_dataset_to_hub}/{ds_hash}",
304
+ use_auth_token=use_auth_token,
305
  )
306
  dataset = dataset["train"]
307
+ except Exception: # pylint: disable=broad-except # nosec
308
  pass
309
 
310
  if dataset:
 
338
  logging.info(
339
  f"packing master dataset to len: {cfg.max_packed_sequence_len}"
340
  )
341
+ dataset = Dataset.from_list(list(constant_len_dataset))
342
 
343
  # filter out bad data
344
  dataset = Dataset.from_list(
 
362
  f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
363
  )
364
  dataset.push_to_hub(
365
+ f"{cfg.push_dataset_to_hub}/{ds_hash}",
366
+ private=True,
367
  )
368
  else:
369
  dataset = load_tokenized_prepared_datasets(
 
375
  f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards"
376
  )
377
  dataset = dataset.shard(
378
+ num_shards=cfg.dataset_shard_num,
379
+ index=cfg.dataset_shard_idx,
380
  )
381
 
382
  dataset = dataset.train_test_split(test_size=cfg.val_set_size, shuffle=False)
src/axolotl/utils/dict.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  from addict import Dict
2
 
3
 
 
1
+ """Module containing the DictDefault class"""
2
+
3
  from addict import Dict
4
 
5
 
src/axolotl/utils/models.py CHANGED
@@ -1,26 +1,22 @@
 
 
 
1
  import logging
2
  import math
3
  import os
4
  from pathlib import Path
5
- from typing import Optional, Tuple, TYPE_CHECKING
6
 
7
  import bitsandbytes as bnb
8
  import torch
9
  import transformers
10
- from transformers import (
11
- AutoModelForCausalLM,
12
- AutoTokenizer,
13
- PreTrainedModel,
14
- AutoConfig,
15
- BitsAndBytesConfig,
16
- )
17
 
18
  try:
19
- from transformers import (
20
- LlamaForCausalLM,
21
- LlamaTokenizer,
22
- )
23
- except:
24
  logging.warning(
25
  "This version of transformers does not support Llama. Consider upgrading."
26
  )
@@ -28,9 +24,10 @@ except:
28
  from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
29
 
30
  if TYPE_CHECKING:
31
- from peft import PeftModel, PeftConfig
32
- from axolotl.utils.dict import DictDefault
33
- from transformers import PreTrainedTokenizer
 
34
 
35
 
36
  def load_tokenizer(
@@ -54,7 +51,10 @@ def load_tokenizer(
54
  logging.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
55
  logging.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
56
 
57
- if tokenizer.__class__.__name__ in ["LlamaTokenizer", "LlamaTokenizerFast"]:
 
 
 
58
  tokenizer.pad_token = LLAMA_DEFAULT_PAD_TOKEN
59
 
60
  if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
@@ -62,8 +62,8 @@ def load_tokenizer(
62
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
63
 
64
  if cfg.special_tokens:
65
- for k, v in cfg.special_tokens.items():
66
- tokenizer.add_special_tokens({k: v})
67
  if cfg.tokens:
68
  tokenizer.add_tokens(list(cfg.tokens))
69
 
@@ -79,7 +79,10 @@ def load_model(
79
  adapter="lora",
80
  inference=False,
81
  ):
82
- # type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, PreTrainedTokenizer, Optional[PeftConfig]]
 
 
 
83
 
84
  # TODO refactor as a kwarg
85
  load_in_8bit = cfg.load_in_8bit
@@ -115,9 +118,9 @@ def load_model(
115
 
116
  replace_peft_model_with_int4_lora_model()
117
  from peft import prepare_model_for_int8_training
118
- except Exception as e:
119
- logging.exception(e)
120
- raise e
121
 
122
  model_kwargs = {}
123
  if cfg.adapter == "qlora" and cfg.load_in_4bit:
@@ -155,7 +158,7 @@ def load_model(
155
  "unable to find a cached model file, this will likely fail..."
156
  )
157
  model_path = str(cache_model_path)
158
- except:
159
  model_path = cfg.base_model
160
  model, _ = load_llama_model_4bit_low_ram(
161
  base_model_config if base_model_config else base_model,
@@ -210,13 +213,13 @@ def load_model(
210
  load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
211
  torch_dtype=torch_dtype,
212
  device_map=cfg.device_map,
213
- trust_remote_code=True if cfg.trust_remote_code is True else False,
214
  **model_kwargs,
215
  )
216
  else:
217
  config = AutoConfig.from_pretrained(
218
  base_model,
219
- trust_remote_code=True if cfg.trust_remote_code is True else False,
220
  )
221
  model = AutoModelForCausalLM.from_pretrained(
222
  base_model,
@@ -225,30 +228,29 @@ def load_model(
225
  load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
226
  torch_dtype=torch_dtype,
227
  device_map=cfg.device_map,
228
- trust_remote_code=True if cfg.trust_remote_code is True else False,
229
  **model_kwargs,
230
  )
231
- except Exception as e:
232
  logging.error(
233
  "Exception raised attempting to load model, retrying with AutoModelForCausalLM"
234
  )
235
- logging.exception(e)
236
  model = AutoModelForCausalLM.from_pretrained(
237
  base_model,
238
  load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
239
  torch_dtype=torch_dtype,
240
  device_map=cfg.device_map,
241
- trust_remote_code=True if cfg.trust_remote_code is True else False,
242
  **model_kwargs,
243
  )
244
 
245
  embeddings_len = math.ceil(len(tokenizer) / 32) * 32
246
  model.resize_token_embeddings(embeddings_len)
247
 
248
- if (
249
- ((cfg.adapter == "lora" and load_in_8bit) or cfg.adapter == "qlora")
250
- and not cfg.gptq
251
- and (load_in_8bit or cfg.load_in_4bit)
252
  ):
253
  logging.info("converting PEFT model w/ prepare_model_for_int8_training")
254
  model = prepare_model_for_int8_training(model)
@@ -261,14 +263,14 @@ def load_model(
261
  if cfg.gptq:
262
  # Scales to half
263
  logging.info("Fitting 4bit scales and zeros to half")
264
- for n, m in model.named_modules():
265
- if "Autograd4bitQuantLinear" in str(type(m)) or "Linear4bitLt" in str(
266
- type(m)
267
  ):
268
- if hasattr(m, "is_v1_model") and m.is_v1_model:
269
- m.zeros = m.zeros.half()
270
- m.scales = m.scales.half()
271
- m.bias = m.bias.half()
272
 
273
  if (
274
  torch.cuda.device_count() > 1
@@ -278,8 +280,8 @@ def load_model(
278
  # llama is PROBABLY model parallelizable, but the default isn't that it is
279
  # so let's only set it for the 4bit, see
280
  # https://github.com/johnsmith0031/alpaca_lora_4bit/blob/08b3fca4a4a9e0d3945be1bab4529f100a428636/finetune.py#L130-L133
281
- setattr(model, 'is_parallelizable', True)
282
- setattr(model, 'model_parallel', True)
283
 
284
  requires_grad = []
285
  for name, param in model.named_parameters(recurse=True):
@@ -308,11 +310,7 @@ def load_adapter(model, cfg, adapter):
308
 
309
  def load_llama_adapter(model, cfg):
310
  # type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
311
- from peft import (
312
- AdaptionPromptConfig,
313
- get_peft_model,
314
- PeftModel,
315
- )
316
 
317
  peft_config = AdaptionPromptConfig(
318
  adapter_layers=cfg.peft_adapter.layers, # layers (L)
@@ -357,11 +355,7 @@ def find_all_linear_names(bits, model):
357
  def load_lora(model, cfg):
358
  # type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
359
 
360
- from peft import (
361
- LoraConfig,
362
- get_peft_model,
363
- PeftModel,
364
- )
365
 
366
  lora_target_modules = list(cfg.lora_target_modules or [])
367
 
 
1
+ """Module for models and model loading"""
2
+
3
+
4
  import logging
5
  import math
6
  import os
7
  from pathlib import Path
8
+ from typing import TYPE_CHECKING, Optional, Tuple # noqa: F401
9
 
10
  import bitsandbytes as bnb
11
  import torch
12
  import transformers
13
+ from transformers import AutoModelForCausalLM # noqa: F401
14
+ from transformers import PreTrainedModel # noqa: F401
15
+ from transformers import AutoConfig, AutoTokenizer, BitsAndBytesConfig
 
 
 
 
16
 
17
  try:
18
+ from transformers import LlamaForCausalLM
19
+ except ImportError:
 
 
 
20
  logging.warning(
21
  "This version of transformers does not support Llama. Consider upgrading."
22
  )
 
24
  from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
25
 
26
  if TYPE_CHECKING:
27
+ from peft import PeftConfig # noqa: F401
28
+ from transformers import PreTrainedTokenizer # noqa: F401
29
+
30
+ from axolotl.utils.dict import DictDefault # noqa: F401
31
 
32
 
33
  def load_tokenizer(
 
51
  logging.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
52
  logging.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
53
 
54
+ if tokenizer.__class__.__name__ in [
55
+ "LlamaTokenizer",
56
+ "LlamaTokenizerFast",
57
+ ]:
58
  tokenizer.pad_token = LLAMA_DEFAULT_PAD_TOKEN
59
 
60
  if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
 
62
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
63
 
64
  if cfg.special_tokens:
65
+ for k, val in cfg.special_tokens.items():
66
+ tokenizer.add_special_tokens({k: val})
67
  if cfg.tokens:
68
  tokenizer.add_tokens(list(cfg.tokens))
69
 
 
79
  adapter="lora",
80
  inference=False,
81
  ):
82
+ # type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
83
+ """
84
+ Load a model from a base model and a model type.
85
+ """
86
 
87
  # TODO refactor as a kwarg
88
  load_in_8bit = cfg.load_in_8bit
 
118
 
119
  replace_peft_model_with_int4_lora_model()
120
  from peft import prepare_model_for_int8_training
121
+ except Exception as err:
122
+ logging.exception(err)
123
+ raise err
124
 
125
  model_kwargs = {}
126
  if cfg.adapter == "qlora" and cfg.load_in_4bit:
 
158
  "unable to find a cached model file, this will likely fail..."
159
  )
160
  model_path = str(cache_model_path)
161
+ except Exception: # pylint: disable=broad-exception-caught
162
  model_path = cfg.base_model
163
  model, _ = load_llama_model_4bit_low_ram(
164
  base_model_config if base_model_config else base_model,
 
213
  load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
214
  torch_dtype=torch_dtype,
215
  device_map=cfg.device_map,
216
+ trust_remote_code=cfg.trust_remote_code or False,
217
  **model_kwargs,
218
  )
219
  else:
220
  config = AutoConfig.from_pretrained(
221
  base_model,
222
+ trust_remote_code=cfg.trust_remote_code or False,
223
  )
224
  model = AutoModelForCausalLM.from_pretrained(
225
  base_model,
 
228
  load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
229
  torch_dtype=torch_dtype,
230
  device_map=cfg.device_map,
231
+ trust_remote_code=cfg.trust_remote_code or False,
232
  **model_kwargs,
233
  )
234
+ except Exception as err: # pylint: disable=broad-exception-caught
235
  logging.error(
236
  "Exception raised attempting to load model, retrying with AutoModelForCausalLM"
237
  )
238
+ logging.exception(err)
239
  model = AutoModelForCausalLM.from_pretrained(
240
  base_model,
241
  load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
242
  torch_dtype=torch_dtype,
243
  device_map=cfg.device_map,
244
+ trust_remote_code=cfg.trust_remote_code or False,
245
  **model_kwargs,
246
  )
247
 
248
  embeddings_len = math.ceil(len(tokenizer) / 32) * 32
249
  model.resize_token_embeddings(embeddings_len)
250
 
251
+ if not cfg.gptq and (
252
+ (cfg.adapter == "lora" and load_in_8bit)
253
+ or (cfg.adapter == "qlora" and cfg.load_in_4bit)
 
254
  ):
255
  logging.info("converting PEFT model w/ prepare_model_for_int8_training")
256
  model = prepare_model_for_int8_training(model)
 
263
  if cfg.gptq:
264
  # Scales to half
265
  logging.info("Fitting 4bit scales and zeros to half")
266
+ for _, module in model.named_modules():
267
+ if "Autograd4bitQuantLinear" in str(type(module)) or "Linear4bitLt" in str(
268
+ type(module)
269
  ):
270
+ if hasattr(module, "is_v1_model") and module.is_v1_model:
271
+ module.zeros = module.zeros.half()
272
+ module.scales = module.scales.half()
273
+ module.bias = module.bias.half()
274
 
275
  if (
276
  torch.cuda.device_count() > 1
 
280
  # llama is PROBABLY model parallelizable, but the default isn't that it is
281
  # so let's only set it for the 4bit, see
282
  # https://github.com/johnsmith0031/alpaca_lora_4bit/blob/08b3fca4a4a9e0d3945be1bab4529f100a428636/finetune.py#L130-L133
283
+ setattr(model, "is_parallelizable", True)
284
+ setattr(model, "model_parallel", True)
285
 
286
  requires_grad = []
287
  for name, param in model.named_parameters(recurse=True):
 
310
 
311
  def load_llama_adapter(model, cfg):
312
  # type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
313
+ from peft import AdaptionPromptConfig, PeftModel, get_peft_model
 
 
 
 
314
 
315
  peft_config = AdaptionPromptConfig(
316
  adapter_layers=cfg.peft_adapter.layers, # layers (L)
 
355
  def load_lora(model, cfg):
356
  # type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
357
 
358
+ from peft import LoraConfig, PeftModel, get_peft_model
 
 
 
 
359
 
360
  lora_target_modules = list(cfg.lora_target_modules or [])
361
 
src/axolotl/utils/schedulers.py CHANGED
@@ -1,7 +1,13 @@
 
 
1
  from torch.optim.lr_scheduler import LRScheduler
2
 
3
 
4
  class InterpolatingLogScheduler(LRScheduler):
 
 
 
 
5
  def __init__(self, optimizer, num_steps, min_lr, max_lr, last_epoch=-1):
6
  """A scheduler that interpolates learning rates in a logarithmic fashion
7
 
@@ -19,7 +25,9 @@ class InterpolatingLogScheduler(LRScheduler):
19
  self.num_steps = num_steps
20
  self.min_lr = min_lr
21
  self.max_lr = max_lr
22
- self.q = (max_lr / min_lr) ** (1 / (num_steps - 1))
 
 
23
  super().__init__(optimizer, last_epoch)
24
 
25
  def get_lr(self):
 
1
+ """Module for custom LRScheduler class"""
2
+
3
  from torch.optim.lr_scheduler import LRScheduler
4
 
5
 
6
  class InterpolatingLogScheduler(LRScheduler):
7
+ """
8
+ A scheduler that interpolates learning rates in a logarithmic fashion
9
+ """
10
+
11
  def __init__(self, optimizer, num_steps, min_lr, max_lr, last_epoch=-1):
12
  """A scheduler that interpolates learning rates in a logarithmic fashion
13
 
 
25
  self.num_steps = num_steps
26
  self.min_lr = min_lr
27
  self.max_lr = max_lr
28
+ self.q = (max_lr / min_lr) ** ( # pylint: disable=invalid-name
29
+ 1 / (num_steps - 1)
30
+ )
31
  super().__init__(optimizer, last_epoch)
32
 
33
  def get_lr(self):
src/axolotl/utils/tokenization.py CHANGED
@@ -1,6 +1,10 @@
1
- from termcolor import colored
 
 
2
  import logging
3
 
 
 
4
 
5
  def check_dataset_labels(dataset, tokenizer):
6
  # the dataset is already shuffled, so let's just check the first 5 elements
@@ -17,7 +21,7 @@ def check_example_labels(example, tokenizer):
17
  # You can compare the input_ids and labels element-wise
18
  # Remember to ignore positions with IGNORE_TOKEN_ID (if you use it) or attention_mask equal to 0
19
  colored_tokens = []
20
- for i, (input_id, label_id, mask) in enumerate(
21
  zip(input_ids, labels, attention_mask)
22
  ):
23
  decoded_input_token = tokenizer.decode(input_id)
 
1
+ """Module for tokenization utilities"""
2
+
3
+
4
  import logging
5
 
6
+ from termcolor import colored
7
+
8
 
9
  def check_dataset_labels(dataset, tokenizer):
10
  # the dataset is already shuffled, so let's just check the first 5 elements
 
21
  # You can compare the input_ids and labels element-wise
22
  # Remember to ignore positions with IGNORE_TOKEN_ID (if you use it) or attention_mask equal to 0
23
  colored_tokens = []
24
+ for _, (input_id, label_id, mask) in enumerate(
25
  zip(input_ids, labels, attention_mask)
26
  ):
27
  decoded_input_token = tokenizer.decode(input_id)
src/axolotl/utils/trainer.py CHANGED
@@ -1,8 +1,11 @@
 
 
1
  import importlib
2
  import math
3
  import os
4
  import sys
5
  from pathlib import Path
 
6
 
7
  import bitsandbytes as bnb
8
  import torch.cuda
@@ -12,17 +15,26 @@ from torch.optim.lr_scheduler import OneCycleLR
12
  from transformers import EarlyStoppingCallback, Trainer
13
  from transformers.trainer_pt_utils import get_parameter_names
14
 
15
- from axolotl.utils.schedulers import InterpolatingLogScheduler
16
  from axolotl.utils.callbacks import SavePeftModelCallback
 
17
 
18
 
19
  class OneCycleLRSchedulerTrainer(Trainer):
 
 
 
 
 
 
 
 
20
  def create_scheduler(
21
- self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
 
 
22
  ):
23
  optimizer = self.optimizer if optimizer is None else optimizer
24
  num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
25
- num_training_steps = num_training_steps
26
  pct_start = num_warmup_steps / num_training_steps
27
 
28
  self.lr_scheduler = OneCycleLR(
@@ -58,11 +70,11 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
58
  training_arguments_kwargs["bf16_full_eval"] = True
59
  else:
60
  training_arguments_kwargs["bf16"] = cfg.bf16
61
- training_arguments_kwargs["fp16"] = True if cfg.fp16 and not cfg.bf16 else False
62
  training_arguments_kwargs["tf32"] = cfg.tf32
63
  training_arguments_kwargs["warmup_steps"] = warmup_steps
64
  training_arguments_kwargs["logging_steps"] = logging_steps
65
- if cfg.gradient_checkpointing is not None:
66
  if cfg.gptq:
67
  from alpaca_lora_4bit.gradient_checkpointing import (
68
  apply_gradient_checkpointing,
@@ -112,13 +124,14 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
112
  save_steps=save_steps,
113
  output_dir=cfg.output_dir,
114
  save_total_limit=3,
115
- load_best_model_at_end=True
116
- if cfg.load_best_model_at_end is not False # if explicitly set to False, it should be resort to False
117
- and cfg.val_set_size > 0
118
- and save_steps is not None
119
- and save_steps % eval_steps == 0
120
- and cfg.load_in_8bit is not True
121
- else False,
 
122
  ddp_find_unused_parameters=False if cfg.ddp else None,
123
  group_by_length=cfg.group_by_length,
124
  report_to="wandb" if cfg.use_wandb else None,
@@ -140,7 +153,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
140
  if (
141
  cfg.optimizer == "adamw_bnb_8bit"
142
  and not cfg.gptq
143
- and not "deepspeed" in training_arguments_kwargs
144
  and not cfg.fsdp
145
  ):
146
  decay_parameters = get_parameter_names(model, [nn.LayerNorm])
@@ -206,7 +219,10 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
206
  )
207
  callbacks.append(early_stop_cb)
208
 
209
- if cfg.local_rank == 0 and cfg.adapter in ["lora", "qlora"]: # only save in rank 0
 
 
 
210
  callbacks.append(SavePeftModelCallback)
211
 
212
  data_collator_kwargs = {
 
1
+ """Module containing the Trainer class and related functions"""
2
+
3
  import importlib
4
  import math
5
  import os
6
  import sys
7
  from pathlib import Path
8
+ from typing import Optional
9
 
10
  import bitsandbytes as bnb
11
  import torch.cuda
 
15
  from transformers import EarlyStoppingCallback, Trainer
16
  from transformers.trainer_pt_utils import get_parameter_names
17
 
 
18
  from axolotl.utils.callbacks import SavePeftModelCallback
19
+ from axolotl.utils.schedulers import InterpolatingLogScheduler
20
 
21
 
22
  class OneCycleLRSchedulerTrainer(Trainer):
23
+ """
24
+ Trainer subclass that uses the OneCycleLR scheduler
25
+ """
26
+
27
+ def __init__(self, *args, **kwargs):
28
+ super().__init__(*args, **kwargs)
29
+ self.lr_scheduler = None
30
+
31
  def create_scheduler(
32
+ self,
33
+ num_training_steps: int,
34
+ optimizer: Optional[torch.optim.Optimizer] = None,
35
  ):
36
  optimizer = self.optimizer if optimizer is None else optimizer
37
  num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
 
38
  pct_start = num_warmup_steps / num_training_steps
39
 
40
  self.lr_scheduler = OneCycleLR(
 
70
  training_arguments_kwargs["bf16_full_eval"] = True
71
  else:
72
  training_arguments_kwargs["bf16"] = cfg.bf16
73
+ training_arguments_kwargs["fp16"] = (cfg.fp16 and not cfg.bf16) or False
74
  training_arguments_kwargs["tf32"] = cfg.tf32
75
  training_arguments_kwargs["warmup_steps"] = warmup_steps
76
  training_arguments_kwargs["logging_steps"] = logging_steps
77
+ if cfg.gradient_checkpointing:
78
  if cfg.gptq:
79
  from alpaca_lora_4bit.gradient_checkpointing import (
80
  apply_gradient_checkpointing,
 
124
  save_steps=save_steps,
125
  output_dir=cfg.output_dir,
126
  save_total_limit=3,
127
+ load_best_model_at_end=(
128
+ cfg.load_best_model_at_end is not False
129
+ and cfg.val_set_size > 0
130
+ and save_steps
131
+ and save_steps % eval_steps == 0
132
+ and cfg.load_in_8bit is not True
133
+ )
134
+ or False,
135
  ddp_find_unused_parameters=False if cfg.ddp else None,
136
  group_by_length=cfg.group_by_length,
137
  report_to="wandb" if cfg.use_wandb else None,
 
153
  if (
154
  cfg.optimizer == "adamw_bnb_8bit"
155
  and not cfg.gptq
156
+ and "deepspeed" not in training_arguments_kwargs
157
  and not cfg.fsdp
158
  ):
159
  decay_parameters = get_parameter_names(model, [nn.LayerNorm])
 
219
  )
220
  callbacks.append(early_stop_cb)
221
 
222
+ if cfg.local_rank == 0 and cfg.adapter in [
223
+ "lora",
224
+ "qlora",
225
+ ]: # only save in rank 0
226
  callbacks.append(SavePeftModelCallback)
227
 
228
  data_collator_kwargs = {
src/axolotl/utils/validation.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import logging
2
 
3
 
@@ -38,7 +40,9 @@ def validate_config(cfg):
38
  )
39
 
40
  if cfg.push_dataset_to_hub and cfg.hf_use_auth_token is not True:
41
- raise ValueError("Require cfg.hf_use_auth_token to be True for push_dataset_to_hub")
 
 
42
 
43
  # TODO
44
  # MPT 7b
 
1
+ """Module for validating config files"""
2
+
3
  import logging
4
 
5
 
 
40
  )
41
 
42
  if cfg.push_dataset_to_hub and cfg.hf_use_auth_token is not True:
43
+ raise ValueError(
44
+ "Require cfg.hf_use_auth_token to be True for push_dataset_to_hub"
45
+ )
46
 
47
  # TODO
48
  # MPT 7b
src/axolotl/utils/wandb.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import os
2
 
3
 
 
1
+ """Module for wandb utilities"""
2
+
3
  import os
4
 
5
 
tests/fixtures/conversation.tokenized.json CHANGED
@@ -1 +1 @@
1
- {"input_ids": [1, 319, 13563, 1546, 263, 12758, 1404, 322, 385, 23116, 21082, 20255, 29889, 450, 20255, 4076, 8444, 29892, 13173, 29892, 322, 1248, 568, 6089, 304, 278, 1404, 29915, 29879, 5155, 29889, 3148, 1001, 29901, 920, 1033, 474, 2334, 263, 29086, 705, 11356, 5687, 393, 3667, 4637, 21531, 20159, 304, 4505, 1045, 3163, 29973, 319, 1799, 9047, 13566, 29901, 739, 29915, 29879, 1950, 304, 671, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29892, 541, 372, 723, 5517, 367, 3755, 4280, 29889, 838, 5869, 293, 20159, 338, 263, 5443, 310, 23964, 393, 11898, 278, 4426, 310, 8162, 393, 526, 21634, 1090, 9126, 316, 689, 800, 29892, 1316, 408, 16116, 292, 322, 289, 2548, 29889, 512, 278, 3030, 310, 341, 1475, 705, 11356, 29892, 445, 1033, 367, 1304, 304, 27599, 278, 3829, 310, 278, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 2398, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 739, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, 3148, 1001, 29901, 1033, 366, 2367, 592, 278, 330, 391, 310, 920, 372, 1033, 2466, 29973, 319, 1799, 9047, 13566, 29901, 18585, 29889, 450, 6996, 2969, 723, 367, 304, 671, 21531, 20159, 304, 27599, 278, 3829, 310, 278, 341, 1475, 705, 11356, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 910, 1033, 25135, 15783, 278, 3748, 7613, 408, 263, 25002, 2913, 322, 773, 25002, 15939, 1934, 1316, 408, 3632, 327, 2270, 6471, 322, 350, 9890, 3694, 304, 10115, 2472, 1048, 278, 4423, 310, 29086, 373, 278, 7613, 29889, 2398, 29892, 408, 306, 5276, 1434, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 577, 372, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, 3148, 1001, 29901, 2367, 592, 263, 2702, 1342, 319, 1799, 9047, 13566, 29901, 1094, 263, 2702, 1342, 29892, 7755, 591, 505, 263, 341, 1475, 705, 11356, 3748, 7613, 411, 278, 1494, 5285, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 797, 445, 1206, 29892, 278, 3748, 7613, 508, 367, 9875, 408, 263, 25002, 2913, 29892, 988, 1269, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29889, 450, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 5293, 21531, 20159, 29892, 591, 508, 27599, 278, 3829, 310, 445, 2913, 322, 8161, 278, 14354, 310, 278, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 13, 13, 2831, 2777, 29892, 697, 982, 304, 437, 445, 338, 304, 10272, 278, 3632, 327, 2270, 6471, 310, 278, 2913, 29889, 15089, 327, 2270, 6471, 526, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 29892, 322, 896, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 512, 278, 1206, 310, 278, 7613, 2038, 29892, 591, 508, 10272, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 29892, 607, 16612, 278, 1353, 322, 376, 12181, 29908, 310, 278, 26532, 297, 278, 2913, 29889, 512, 445, 1206, 29892, 278, 937, 3632, 327, 2270, 2318, 723, 2649, 502, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29889, 13, 13, 29933, 1463, 373, 445, 2472, 29892, 591, 508, 10115, 393, 278, 29086, 297, 278, 3748, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29892, 408, 4318, 2400, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 341, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 341, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1342, 310, 920, 21531, 20159, 1033, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 526, 1784, 916, 1950, 13501, 29892, 322, 278, 2702, 4902, 310, 920, 304, 2334, 963, 723, 8839, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 1094, 306, 5276, 1434, 29892, 16049, 263, 1650, 763, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 10241, 474, 471, 4856, 411, 263, 6483, 8004, 310, 1716, 29892, 1033, 366, 5649, 278, 1021, 6964, 304, 592, 411, 393, 11833, 29973, 319, 1799, 9047, 13566, 29901, 18585, 29889, 17090, 366, 505, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 1244, 338, 920, 366, 1795, 671, 21531, 20159, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 4803, 25002, 15939, 1934, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1670, 526, 1784, 1950, 25002, 15939, 1934, 393, 1033, 367, 1304, 363, 445, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29941, 29889, 4803, 278, 2472, 10115, 1127, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 21531, 20159, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 920, 723, 474, 437, 372, 411, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 29973, 319, 1799, 9047, 13566, 29901, 1763, 671, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29892, 366, 723, 1101, 278, 1021, 6996, 6576, 408, 5439, 2038, 29892, 541, 411, 777, 12651, 297, 278, 2702, 4902, 29889, 13, 13, 2831, 2777, 29892, 1244, 338, 920, 366, 1795, 671, 28152, 3632, 3002, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 11796, 29872, 278, 28152, 3632, 3002, 310, 278, 2913, 29889, 9034, 9696, 3632, 3002, 338, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 975, 931, 29892, 322, 372, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 13, 29941, 29889, 4803, 278, 2472, 515, 278, 28152, 3632, 3002, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 28152, 3632, 3002, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 2702, 4902, 310, 278, 28152, 3632, 3002, 16287, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 28152, 3632, 3002, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 920, 1033, 474, 2334, 445, 297, 3017, 29973, 319, 1799, 9047, 13566, 29901, 1763, 2334, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29892, 366, 723, 817, 304, 437, 278, 1494, 29901, 13, 13, 29896, 29889, 16052, 263, 5132, 3577, 363, 21531, 20159, 29889, 1670, 526, 3196, 3987, 3625, 29892, 1316, 408, 402, 566, 2918, 29892, 360, 291, 952, 375, 29892, 470, 4560, 7354, 29899, 29873, 1388, 29889, 4525, 9741, 3867, 14009, 322, 848, 12286, 363, 20602, 25002, 15939, 1934, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29906, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 910, 1033, 367, 2309, 773, 263, 848, 3829, 4944, 491, 278, 21531, 20159, 3577, 29892, 1316, 408, 263, 3053, 506, 616, 4280, 470, 263, 478, 2035, 29367, 29899, 29934, 4512, 4280, 29889, 7806, 6862, 373, 278, 7613, 723, 3928, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 723, 367, 9875, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29889, 13, 29941, 29889, 11796, 29872, 278, 25002, 15939, 1934, 310, 278, 2913, 29889, 910, 1033, 367, 2309, 773, 14009, 4944, 491, 278, 21531, 20159, 3577, 29889, 1152, 1342, 29892, 366, 1033, 10272, 278, 28152, 3632, 3002, 310, 278, 2913, 773, 278, 28152, 3632, 3002, 5687, 29889, 13, 29946, 29889, 4803, 278, 2472, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 910, 1033, 367, 2309, 773, 263, 10296, 310, 19475, 24481, 322, 8720, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 366, 1033, 671, 263, 8424, 310, 775, 304, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29945, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 1033, 671, 263, 8424, 310, 775, 304, 6336, 2828, 373, 278, 9109, 25256, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 16049, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 4902, 310, 278, 5314, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 304, 679, 4687, 411, 1316, 263, 2060, 29889, 2], "attention_mask": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "labels": [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 739, 29915, 29879, 1950, 304, 671, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29892, 541, 372, 723, 5517, 367, 3755, 4280, 29889, 838, 5869, 293, 20159, 338, 263, 5443, 310, 23964, 393, 11898, 278, 4426, 310, 8162, 393, 526, 21634, 1090, 9126, 316, 689, 800, 29892, 1316, 408, 16116, 292, 322, 289, 2548, 29889, 512, 278, 3030, 310, 341, 1475, 705, 11356, 29892, 445, 1033, 367, 1304, 304, 27599, 278, 3829, 310, 278, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 2398, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 739, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 18585, 29889, 450, 6996, 2969, 723, 367, 304, 671, 21531, 20159, 304, 27599, 278, 3829, 310, 278, 341, 1475, 705, 11356, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 910, 1033, 25135, 15783, 278, 3748, 7613, 408, 263, 25002, 2913, 322, 773, 25002, 15939, 1934, 1316, 408, 3632, 327, 2270, 6471, 322, 350, 9890, 3694, 304, 10115, 2472, 1048, 278, 4423, 310, 29086, 373, 278, 7613, 29889, 2398, 29892, 408, 306, 5276, 1434, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 577, 372, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1094, 263, 2702, 1342, 29892, 7755, 591, 505, 263, 341, 1475, 705, 11356, 3748, 7613, 411, 278, 1494, 5285, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 797, 445, 1206, 29892, 278, 3748, 7613, 508, 367, 9875, 408, 263, 25002, 2913, 29892, 988, 1269, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29889, 450, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 5293, 21531, 20159, 29892, 591, 508, 27599, 278, 3829, 310, 445, 2913, 322, 8161, 278, 14354, 310, 278, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 13, 13, 2831, 2777, 29892, 697, 982, 304, 437, 445, 338, 304, 10272, 278, 3632, 327, 2270, 6471, 310, 278, 2913, 29889, 15089, 327, 2270, 6471, 526, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 29892, 322, 896, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 512, 278, 1206, 310, 278, 7613, 2038, 29892, 591, 508, 10272, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 29892, 607, 16612, 278, 1353, 322, 376, 12181, 29908, 310, 278, 26532, 297, 278, 2913, 29889, 512, 445, 1206, 29892, 278, 937, 3632, 327, 2270, 2318, 723, 2649, 502, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29889, 13, 13, 29933, 1463, 373, 445, 2472, 29892, 591, 508, 10115, 393, 278, 29086, 297, 278, 3748, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29892, 408, 4318, 2400, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 341, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 341, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1342, 310, 920, 21531, 20159, 1033, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 526, 1784, 916, 1950, 13501, 29892, 322, 278, 2702, 4902, 310, 920, 304, 2334, 963, 723, 8839, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 1094, 306, 5276, 1434, 29892, 16049, 263, 1650, 763, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 18585, 29889, 17090, 366, 505, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 1244, 338, 920, 366, 1795, 671, 21531, 20159, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 4803, 25002, 15939, 1934, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1670, 526, 1784, 1950, 25002, 15939, 1934, 393, 1033, 367, 1304, 363, 445, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29941, 29889, 4803, 278, 2472, 10115, 1127, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 21531, 20159, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1763, 671, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29892, 366, 723, 1101, 278, 1021, 6996, 6576, 408, 5439, 2038, 29892, 541, 411, 777, 12651, 297, 278, 2702, 4902, 29889, 13, 13, 2831, 2777, 29892, 1244, 338, 920, 366, 1795, 671, 28152, 3632, 3002, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 11796, 29872, 278, 28152, 3632, 3002, 310, 278, 2913, 29889, 9034, 9696, 3632, 3002, 338, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 975, 931, 29892, 322, 372, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 13, 29941, 29889, 4803, 278, 2472, 515, 278, 28152, 3632, 3002, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 28152, 3632, 3002, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 2702, 4902, 310, 278, 28152, 3632, 3002, 16287, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 28152, 3632, 3002, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1763, 2334, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29892, 366, 723, 817, 304, 437, 278, 1494, 29901, 13, 13, 29896, 29889, 16052, 263, 5132, 3577, 363, 21531, 20159, 29889, 1670, 526, 3196, 3987, 3625, 29892, 1316, 408, 402, 566, 2918, 29892, 360, 291, 952, 375, 29892, 470, 4560, 7354, 29899, 29873, 1388, 29889, 4525, 9741, 3867, 14009, 322, 848, 12286, 363, 20602, 25002, 15939, 1934, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29906, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 910, 1033, 367, 2309, 773, 263, 848, 3829, 4944, 491, 278, 21531, 20159, 3577, 29892, 1316, 408, 263, 3053, 506, 616, 4280, 470, 263, 478, 2035, 29367, 29899, 29934, 4512, 4280, 29889, 7806, 6862, 373, 278, 7613, 723, 3928, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 723, 367, 9875, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29889, 13, 29941, 29889, 11796, 29872, 278, 25002, 15939, 1934, 310, 278, 2913, 29889, 910, 1033, 367, 2309, 773, 14009, 4944, 491, 278, 21531, 20159, 3577, 29889, 1152, 1342, 29892, 366, 1033, 10272, 278, 28152, 3632, 3002, 310, 278, 2913, 773, 278, 28152, 3632, 3002, 5687, 29889, 13, 29946, 29889, 4803, 278, 2472, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 910, 1033, 367, 2309, 773, 263, 10296, 310, 19475, 24481, 322, 8720, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 366, 1033, 671, 263, 8424, 310, 775, 304, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29945, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 1033, 671, 263, 8424, 310, 775, 304, 6336, 2828, 373, 278, 9109, 25256, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 16049, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 4902, 310, 278, 5314, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 304, 679, 4687, 411, 1316, 263, 2060, 29889, 2]}
 
1
+ {"input_ids": [1, 319, 13563, 1546, 263, 12758, 1404, 322, 385, 23116, 21082, 20255, 29889, 450, 20255, 4076, 8444, 29892, 13173, 29892, 322, 1248, 568, 6089, 304, 278, 1404, 29915, 29879, 5155, 29889, 3148, 1001, 29901, 920, 1033, 474, 2334, 263, 29086, 705, 11356, 5687, 393, 3667, 4637, 21531, 20159, 304, 4505, 1045, 3163, 29973, 319, 1799, 9047, 13566, 29901, 739, 29915, 29879, 1950, 304, 671, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29892, 541, 372, 723, 5517, 367, 3755, 4280, 29889, 838, 5869, 293, 20159, 338, 263, 5443, 310, 23964, 393, 11898, 278, 4426, 310, 8162, 393, 526, 21634, 1090, 9126, 316, 689, 800, 29892, 1316, 408, 16116, 292, 322, 289, 2548, 29889, 512, 278, 3030, 310, 341, 1475, 705, 11356, 29892, 445, 1033, 367, 1304, 304, 27599, 278, 3829, 310, 278, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 2398, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 739, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, 3148, 1001, 29901, 1033, 366, 2367, 592, 278, 330, 391, 310, 920, 372, 1033, 2466, 29973, 319, 1799, 9047, 13566, 29901, 18585, 29889, 450, 6996, 2969, 723, 367, 304, 671, 21531, 20159, 304, 27599, 278, 3829, 310, 278, 341, 1475, 705, 11356, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 910, 1033, 25135, 15783, 278, 3748, 7613, 408, 263, 25002, 2913, 322, 773, 25002, 15939, 1934, 1316, 408, 3632, 327, 2270, 6471, 322, 350, 9890, 3694, 304, 10115, 2472, 1048, 278, 4423, 310, 29086, 373, 278, 7613, 29889, 2398, 29892, 408, 306, 5276, 1434, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 577, 372, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, 3148, 1001, 29901, 2367, 592, 263, 2702, 1342, 319, 1799, 9047, 13566, 29901, 1094, 263, 2702, 1342, 29892, 7755, 591, 505, 263, 341, 1475, 705, 11356, 3748, 7613, 411, 278, 1494, 5285, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 797, 445, 1206, 29892, 278, 3748, 7613, 508, 367, 9875, 408, 263, 25002, 2913, 29892, 988, 1269, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29889, 450, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 5293, 21531, 20159, 29892, 591, 508, 27599, 278, 3829, 310, 445, 2913, 322, 8161, 278, 14354, 310, 278, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 13, 13, 2831, 2777, 29892, 697, 982, 304, 437, 445, 338, 304, 10272, 278, 3632, 327, 2270, 6471, 310, 278, 2913, 29889, 15089, 327, 2270, 6471, 526, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 29892, 322, 896, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 512, 278, 1206, 310, 278, 7613, 2038, 29892, 591, 508, 10272, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 29892, 607, 16612, 278, 1353, 322, 376, 12181, 29908, 310, 278, 26532, 297, 278, 2913, 29889, 512, 445, 1206, 29892, 278, 937, 3632, 327, 2270, 2318, 723, 2649, 502, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29889, 13, 13, 29933, 1463, 373, 445, 2472, 29892, 591, 508, 10115, 393, 278, 29086, 297, 278, 3748, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29892, 408, 4318, 2400, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 341, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 341, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1342, 310, 920, 21531, 20159, 1033, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 526, 1784, 916, 1950, 13501, 29892, 322, 278, 2702, 4902, 310, 920, 304, 2334, 963, 723, 8839, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 1094, 306, 5276, 1434, 29892, 16049, 263, 1650, 763, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 10241, 474, 471, 4856, 411, 263, 6483, 8004, 310, 1716, 29892, 1033, 366, 5649, 278, 1021, 6964, 304, 592, 411, 393, 11833, 29973, 319, 1799, 9047, 13566, 29901, 18585, 29889, 17090, 366, 505, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 1244, 338, 920, 366, 1795, 671, 21531, 20159, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 4803, 25002, 15939, 1934, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1670, 526, 1784, 1950, 25002, 15939, 1934, 393, 1033, 367, 1304, 363, 445, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29941, 29889, 4803, 278, 2472, 10115, 1127, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 21531, 20159, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 920, 723, 474, 437, 372, 411, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 29973, 319, 1799, 9047, 13566, 29901, 1763, 671, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29892, 366, 723, 1101, 278, 1021, 6996, 6576, 408, 5439, 2038, 29892, 541, 411, 777, 12651, 297, 278, 2702, 4902, 29889, 13, 13, 2831, 2777, 29892, 1244, 338, 920, 366, 1795, 671, 28152, 3632, 3002, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 11796, 29872, 278, 28152, 3632, 3002, 310, 278, 2913, 29889, 9034, 9696, 3632, 3002, 338, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 975, 931, 29892, 322, 372, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 13, 29941, 29889, 4803, 278, 2472, 515, 278, 28152, 3632, 3002, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 28152, 3632, 3002, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 2702, 4902, 310, 278, 28152, 3632, 3002, 16287, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 28152, 3632, 3002, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, 3148, 1001, 29901, 920, 1033, 474, 2334, 445, 297, 3017, 29973, 319, 1799, 9047, 13566, 29901, 1763, 2334, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29892, 366, 723, 817, 304, 437, 278, 1494, 29901, 13, 13, 29896, 29889, 16052, 263, 5132, 3577, 363, 21531, 20159, 29889, 1670, 526, 3196, 3987, 3625, 29892, 1316, 408, 402, 566, 2918, 29892, 360, 291, 952, 375, 29892, 470, 4560, 7354, 29899, 29873, 1388, 29889, 4525, 9741, 3867, 14009, 322, 848, 12286, 363, 20602, 25002, 15939, 1934, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29906, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 910, 1033, 367, 2309, 773, 263, 848, 3829, 4944, 491, 278, 21531, 20159, 3577, 29892, 1316, 408, 263, 3053, 506, 616, 4280, 470, 263, 478, 2035, 29367, 29899, 29934, 4512, 4280, 29889, 7806, 6862, 373, 278, 7613, 723, 3928, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 723, 367, 9875, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29889, 13, 29941, 29889, 11796, 29872, 278, 25002, 15939, 1934, 310, 278, 2913, 29889, 910, 1033, 367, 2309, 773, 14009, 4944, 491, 278, 21531, 20159, 3577, 29889, 1152, 1342, 29892, 366, 1033, 10272, 278, 28152, 3632, 3002, 310, 278, 2913, 773, 278, 28152, 3632, 3002, 5687, 29889, 13, 29946, 29889, 4803, 278, 2472, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 910, 1033, 367, 2309, 773, 263, 10296, 310, 19475, 24481, 322, 8720, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 366, 1033, 671, 263, 8424, 310, 775, 304, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29945, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 1033, 671, 263, 8424, 310, 775, 304, 6336, 2828, 373, 278, 9109, 25256, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 16049, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 4902, 310, 278, 5314, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 304, 679, 4687, 411, 1316, 263, 2060, 29889, 2], "attention_mask": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "labels": [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 739, 29915, 29879, 1950, 304, 671, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29892, 541, 372, 723, 5517, 367, 3755, 4280, 29889, 838, 5869, 293, 20159, 338, 263, 5443, 310, 23964, 393, 11898, 278, 4426, 310, 8162, 393, 526, 21634, 1090, 9126, 316, 689, 800, 29892, 1316, 408, 16116, 292, 322, 289, 2548, 29889, 512, 278, 3030, 310, 341, 1475, 705, 11356, 29892, 445, 1033, 367, 1304, 304, 27599, 278, 3829, 310, 278, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 2398, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 739, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 18585, 29889, 450, 6996, 2969, 723, 367, 304, 671, 21531, 20159, 304, 27599, 278, 3829, 310, 278, 341, 1475, 705, 11356, 3748, 7613, 322, 8161, 278, 14354, 310, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 910, 1033, 25135, 15783, 278, 3748, 7613, 408, 263, 25002, 2913, 322, 773, 25002, 15939, 1934, 1316, 408, 3632, 327, 2270, 6471, 322, 350, 9890, 3694, 304, 10115, 2472, 1048, 278, 4423, 310, 29086, 373, 278, 7613, 29889, 2398, 29892, 408, 306, 5276, 1434, 29892, 16049, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 577, 372, 29915, 29879, 451, 1554, 393, 508, 367, 10824, 297, 263, 2560, 1234, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1094, 263, 2702, 1342, 29892, 7755, 591, 505, 263, 341, 1475, 705, 11356, 3748, 7613, 411, 278, 1494, 5285, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 797, 445, 1206, 29892, 278, 3748, 7613, 508, 367, 9875, 408, 263, 25002, 2913, 29892, 988, 1269, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29889, 450, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 5293, 21531, 20159, 29892, 591, 508, 27599, 278, 3829, 310, 445, 2913, 322, 8161, 278, 14354, 310, 278, 29086, 2729, 373, 3058, 25002, 15939, 1934, 29889, 13, 13, 2831, 2777, 29892, 697, 982, 304, 437, 445, 338, 304, 10272, 278, 3632, 327, 2270, 6471, 310, 278, 2913, 29889, 15089, 327, 2270, 6471, 526, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 29892, 322, 896, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 512, 278, 1206, 310, 278, 7613, 2038, 29892, 591, 508, 10272, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 29892, 607, 16612, 278, 1353, 322, 376, 12181, 29908, 310, 278, 26532, 297, 278, 2913, 29889, 512, 445, 1206, 29892, 278, 937, 3632, 327, 2270, 2318, 723, 2649, 502, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29889, 13, 13, 29933, 1463, 373, 445, 2472, 29892, 591, 508, 10115, 393, 278, 29086, 297, 278, 3748, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29892, 408, 4318, 2400, 29901, 13, 28956, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 29871, 29896, 29871, 341, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 1577, 259, 29896, 13, 29871, 29896, 29871, 1577, 29871, 1577, 29871, 341, 259, 29896, 13, 29871, 29896, 259, 29896, 259, 29896, 259, 29896, 259, 29896, 13, 28956, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1342, 310, 920, 21531, 20159, 1033, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 526, 1784, 916, 1950, 13501, 29892, 322, 278, 2702, 4902, 310, 920, 304, 2334, 963, 723, 8839, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 1094, 306, 5276, 1434, 29892, 16049, 263, 1650, 763, 445, 723, 1996, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 18585, 29889, 17090, 366, 505, 263, 6483, 8004, 310, 1716, 21531, 20159, 322, 278, 3748, 310, 341, 1475, 705, 11356, 29892, 1244, 338, 920, 366, 1795, 671, 21531, 20159, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 4803, 25002, 15939, 1934, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1670, 526, 1784, 1950, 25002, 15939, 1934, 393, 1033, 367, 1304, 363, 445, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29941, 29889, 4803, 278, 2472, 10115, 1127, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 937, 3632, 327, 2270, 2318, 310, 278, 2913, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 21531, 20159, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 25002, 15939, 1934, 1641, 1304, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 21531, 20159, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1763, 671, 28152, 3632, 3002, 470, 738, 310, 278, 916, 25002, 15939, 1934, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29892, 366, 723, 1101, 278, 1021, 6996, 6576, 408, 5439, 2038, 29892, 541, 411, 777, 12651, 297, 278, 2702, 4902, 29889, 13, 13, 2831, 2777, 29892, 1244, 338, 920, 366, 1795, 671, 28152, 3632, 3002, 304, 4505, 263, 341, 1475, 705, 11356, 3748, 7613, 29901, 13, 13, 29896, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 7806, 6862, 373, 278, 7613, 16161, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 508, 367, 2714, 310, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29892, 607, 526, 3291, 393, 526, 451, 760, 310, 278, 2913, 29889, 13, 29906, 29889, 11796, 29872, 278, 28152, 3632, 3002, 310, 278, 2913, 29889, 9034, 9696, 3632, 3002, 338, 263, 982, 310, 20766, 278, 26532, 297, 263, 2913, 975, 931, 29892, 322, 372, 508, 367, 1304, 304, 10115, 2472, 1048, 278, 14354, 310, 278, 29086, 373, 278, 341, 1475, 705, 11356, 7613, 29889, 13, 29941, 29889, 4803, 278, 2472, 515, 278, 28152, 3632, 3002, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 591, 508, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29946, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 508, 671, 445, 2472, 304, 4772, 963, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 773, 28152, 3632, 3002, 304, 4505, 341, 1475, 705, 11356, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 3748, 7613, 322, 278, 2702, 4902, 310, 278, 28152, 3632, 3002, 16287, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 28152, 3632, 3002, 508, 367, 1304, 304, 4505, 341, 1475, 705, 11356, 29889, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 319, 1799, 9047, 13566, 29901, 1763, 2334, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29892, 366, 723, 817, 304, 437, 278, 1494, 29901, 13, 13, 29896, 29889, 16052, 263, 5132, 3577, 363, 21531, 20159, 29889, 1670, 526, 3196, 3987, 3625, 29892, 1316, 408, 402, 566, 2918, 29892, 360, 291, 952, 375, 29892, 470, 4560, 7354, 29899, 29873, 1388, 29889, 4525, 9741, 3867, 14009, 322, 848, 12286, 363, 20602, 25002, 15939, 1934, 29892, 1316, 408, 3632, 327, 2270, 6471, 29892, 350, 9890, 3694, 29892, 470, 28152, 3632, 3002, 29889, 13, 29906, 29889, 16314, 278, 341, 1475, 705, 11356, 3748, 7613, 408, 263, 25002, 2913, 29889, 910, 1033, 367, 2309, 773, 263, 848, 3829, 4944, 491, 278, 21531, 20159, 3577, 29892, 1316, 408, 263, 3053, 506, 616, 4280, 470, 263, 478, 2035, 29367, 29899, 29934, 4512, 4280, 29889, 7806, 6862, 373, 278, 7613, 723, 3928, 304, 263, 1298, 297, 278, 2913, 29892, 322, 278, 29086, 373, 278, 7613, 723, 367, 9875, 408, 376, 5391, 267, 29908, 297, 278, 2913, 29889, 13, 29941, 29889, 11796, 29872, 278, 25002, 15939, 1934, 310, 278, 2913, 29889, 910, 1033, 367, 2309, 773, 14009, 4944, 491, 278, 21531, 20159, 3577, 29889, 1152, 1342, 29892, 366, 1033, 10272, 278, 28152, 3632, 3002, 310, 278, 2913, 773, 278, 28152, 3632, 3002, 5687, 29889, 13, 29946, 29889, 4803, 278, 2472, 515, 278, 25002, 15939, 1934, 304, 8161, 278, 14354, 310, 278, 29086, 373, 278, 7613, 29889, 910, 1033, 367, 2309, 773, 263, 10296, 310, 19475, 24481, 322, 8720, 29889, 1152, 1342, 29892, 565, 278, 28152, 3632, 3002, 14088, 393, 727, 526, 3023, 26532, 297, 278, 2913, 29892, 21050, 297, 263, 6862, 8267, 29892, 769, 366, 1033, 671, 263, 8424, 310, 775, 304, 10115, 393, 278, 29086, 526, 5982, 297, 278, 3023, 26995, 310, 278, 7613, 29889, 13, 29945, 29889, 4803, 445, 2472, 304, 4505, 278, 341, 1475, 705, 11356, 3748, 29889, 9038, 366, 1073, 278, 14354, 310, 278, 29086, 29892, 366, 1033, 671, 263, 8424, 310, 775, 304, 6336, 2828, 373, 278, 9109, 25256, 322, 2821, 278, 7613, 29889, 13, 13, 2776, 3236, 29892, 445, 338, 925, 697, 1950, 2948, 304, 16049, 263, 341, 1475, 705, 11356, 899, 369, 393, 3913, 21531, 20159, 297, 5132, 29889, 1670, 1122, 367, 916, 5837, 304, 437, 372, 29892, 8679, 373, 278, 2702, 4902, 310, 278, 5314, 29889, 2398, 29892, 445, 881, 2367, 366, 263, 2498, 2969, 310, 920, 304, 679, 4687, 411, 1316, 263, 2060, 29889, 2]}
tests/test_dict.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import unittest
2
 
3
  import pytest
@@ -6,6 +9,10 @@ from axolotl.utils.dict import DictDefault
6
 
7
 
8
  class DictDefaultTest(unittest.TestCase):
 
 
 
 
9
  def test_dict_default(self):
10
  cfg = DictDefault(
11
  {
@@ -41,7 +48,9 @@ class DictDefaultTest(unittest.TestCase):
41
  }
42
  )
43
 
44
- cfg = cfg | DictDefault({"key_a": {"key_b": "value_b"}, "key_f": "value_g"})
 
 
45
 
46
  assert (
47
  cfg.key_a.key_b == "value_b"
@@ -73,7 +82,7 @@ class DictDefaultTest(unittest.TestCase):
73
  AttributeError,
74
  match=r"'NoneType' object has no attribute 'another_random_key'",
75
  ):
76
- cfg.random_key.another_random_key
77
 
78
  def test_dict_shorthand_assignment(self):
79
  """
 
1
+ """Module for testing DictDefault class"""
2
+
3
+
4
  import unittest
5
 
6
  import pytest
 
9
 
10
 
11
  class DictDefaultTest(unittest.TestCase):
12
+ """
13
+ Test DictDefault class
14
+ """
15
+
16
  def test_dict_default(self):
17
  cfg = DictDefault(
18
  {
 
48
  }
49
  )
50
 
51
+ cfg = cfg | DictDefault( # pylint: disable=unsupported-binary-operation
52
+ {"key_a": {"key_b": "value_b"}, "key_f": "value_g"}
53
+ )
54
 
55
  assert (
56
  cfg.key_a.key_b == "value_b"
 
82
  AttributeError,
83
  match=r"'NoneType' object has no attribute 'another_random_key'",
84
  ):
85
+ cfg.random_key.another_random_key = "value"
86
 
87
  def test_dict_shorthand_assignment(self):
88
  """
tests/test_prompt_tokenizers.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import json
2
  import logging
3
  import unittest
@@ -12,6 +13,10 @@ logging.basicConfig(level="INFO")
12
 
13
 
14
  class TestPromptTokenizationStrategies(unittest.TestCase):
 
 
 
 
15
  def setUp(self) -> None:
16
  self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
17
  self.tokenizer.add_special_tokens(
@@ -24,10 +29,15 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
24
 
25
  def test_sharegpt_integration(self):
26
  print(Path(__file__).parent)
27
- with open(Path(__file__).parent / "fixtures/conversation.json", "r") as fin:
 
 
28
  data = fin.read()
29
  conversation = json.loads(data)
30
- with open(Path(__file__).parent / "fixtures/conversation.tokenized.json", "r") as fin:
 
 
 
31
  data = fin.read()
32
  tokenized_conversation = json.loads(data)
33
  prompter = ShareGPTPrompter("chat")
 
1
+ """Module for testing prompt tokenizers."""
2
  import json
3
  import logging
4
  import unittest
 
13
 
14
 
15
  class TestPromptTokenizationStrategies(unittest.TestCase):
16
+ """
17
+ Test class for prompt tokenization strategies.
18
+ """
19
+
20
  def setUp(self) -> None:
21
  self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
22
  self.tokenizer.add_special_tokens(
 
29
 
30
  def test_sharegpt_integration(self):
31
  print(Path(__file__).parent)
32
+ with open(
33
+ Path(__file__).parent / "fixtures/conversation.json", encoding="utf-8"
34
+ ) as fin:
35
  data = fin.read()
36
  conversation = json.loads(data)
37
+ with open(
38
+ Path(__file__).parent / "fixtures/conversation.tokenized.json",
39
+ encoding="utf-8",
40
+ ) as fin:
41
  data = fin.read()
42
  tokenized_conversation = json.loads(data)
43
  prompter = ShareGPTPrompter("chat")
tests/test_prompters.py CHANGED
@@ -1,9 +1,15 @@
 
 
1
  import unittest
2
 
3
  from axolotl.prompters import AlpacaPrompter, PromptStyle
4
 
5
 
6
  class AlpacaPrompterTest(unittest.TestCase):
 
 
 
 
7
  def test_prompt_style_w_none(self):
8
  prompter = AlpacaPrompter(prompt_style=None)
9
  res = next(prompter.build_prompt("tell me a joke"))
@@ -11,8 +17,10 @@ class AlpacaPrompterTest(unittest.TestCase):
11
  assert "### Instruction:" in res
12
 
13
  def test_prompt_style_w_instruct(self):
14
- prompter = AlpacaPrompter(prompt_style=PromptStyle.instruct.value)
15
- res = next(prompter.build_prompt("tell me a joke about the following", "alpacas"))
 
 
16
  assert "Below is an instruction" in res
17
  assert "### Instruction:" in res
18
  assert "### Input:" in res
@@ -29,8 +37,10 @@ class AlpacaPrompterTest(unittest.TestCase):
29
  assert "ASSISTANT:" not in res
30
 
31
  def test_prompt_style_w_chat(self):
32
- prompter = AlpacaPrompter(prompt_style=PromptStyle.chat.value)
33
- res = next(prompter.build_prompt("tell me a joke about the following", "alpacas"))
 
 
34
  assert "Below is an instruction" in res
35
  assert "### Instruction:" not in res
36
  assert "### Input:" not in res
@@ -45,5 +55,3 @@ class AlpacaPrompterTest(unittest.TestCase):
45
  assert "### Response:" not in res
46
  assert "USER:" in res
47
  assert "ASSISTANT:" in res
48
-
49
-
 
1
+ """Module testing prompters"""
2
+
3
  import unittest
4
 
5
  from axolotl.prompters import AlpacaPrompter, PromptStyle
6
 
7
 
8
  class AlpacaPrompterTest(unittest.TestCase):
9
+ """
10
+ Test AlpacaPrompter
11
+ """
12
+
13
  def test_prompt_style_w_none(self):
14
  prompter = AlpacaPrompter(prompt_style=None)
15
  res = next(prompter.build_prompt("tell me a joke"))
 
17
  assert "### Instruction:" in res
18
 
19
  def test_prompt_style_w_instruct(self):
20
+ prompter = AlpacaPrompter(prompt_style=PromptStyle.INSTRUCT.value)
21
+ res = next(
22
+ prompter.build_prompt("tell me a joke about the following", "alpacas")
23
+ )
24
  assert "Below is an instruction" in res
25
  assert "### Instruction:" in res
26
  assert "### Input:" in res
 
37
  assert "ASSISTANT:" not in res
38
 
39
  def test_prompt_style_w_chat(self):
40
+ prompter = AlpacaPrompter(prompt_style=PromptStyle.CHAT.value)
41
+ res = next(
42
+ prompter.build_prompt("tell me a joke about the following", "alpacas")
43
+ )
44
  assert "Below is an instruction" in res
45
  assert "### Instruction:" not in res
46
  assert "### Input:" not in res
 
55
  assert "### Response:" not in res
56
  assert "USER:" in res
57
  assert "ASSISTANT:" in res
 
 
tests/test_validation.py CHANGED
@@ -1,12 +1,18 @@
 
 
1
  import unittest
2
 
3
  import pytest
4
 
5
- from axolotl.utils.validation import validate_config
6
  from axolotl.utils.dict import DictDefault
 
7
 
8
 
9
  class ValidationTest(unittest.TestCase):
 
 
 
 
10
  def test_load_4bit_deprecate(self):
11
  cfg = DictDefault(
12
  {
@@ -24,7 +30,7 @@ class ValidationTest(unittest.TestCase):
24
  }
25
  )
26
 
27
- cfg = base_cfg | DictDefault(
28
  {
29
  "load_in_8bit": True,
30
  }
@@ -33,7 +39,7 @@ class ValidationTest(unittest.TestCase):
33
  with pytest.raises(ValueError, match=r".*8bit.*"):
34
  validate_config(cfg)
35
 
36
- cfg = base_cfg | DictDefault(
37
  {
38
  "gptq": True,
39
  }
@@ -42,7 +48,7 @@ class ValidationTest(unittest.TestCase):
42
  with pytest.raises(ValueError, match=r".*gptq.*"):
43
  validate_config(cfg)
44
 
45
- cfg = base_cfg | DictDefault(
46
  {
47
  "load_in_4bit": False,
48
  }
@@ -51,7 +57,7 @@ class ValidationTest(unittest.TestCase):
51
  with pytest.raises(ValueError, match=r".*4bit.*"):
52
  validate_config(cfg)
53
 
54
- cfg = base_cfg | DictDefault(
55
  {
56
  "load_in_4bit": True,
57
  }
@@ -67,7 +73,7 @@ class ValidationTest(unittest.TestCase):
67
  }
68
  )
69
 
70
- cfg = base_cfg | DictDefault(
71
  {
72
  "load_in_8bit": True,
73
  }
@@ -76,7 +82,7 @@ class ValidationTest(unittest.TestCase):
76
  with pytest.raises(ValueError, match=r".*8bit.*"):
77
  validate_config(cfg)
78
 
79
- cfg = base_cfg | DictDefault(
80
  {
81
  "gptq": True,
82
  }
@@ -85,7 +91,7 @@ class ValidationTest(unittest.TestCase):
85
  with pytest.raises(ValueError, match=r".*gptq.*"):
86
  validate_config(cfg)
87
 
88
- cfg = base_cfg | DictDefault(
89
  {
90
  "load_in_4bit": True,
91
  }
@@ -111,4 +117,3 @@ class ValidationTest(unittest.TestCase):
111
  }
112
  )
113
  validate_config(cfg)
114
-
 
1
+ """Module for testing the validation module"""
2
+
3
  import unittest
4
 
5
  import pytest
6
 
 
7
  from axolotl.utils.dict import DictDefault
8
+ from axolotl.utils.validation import validate_config
9
 
10
 
11
  class ValidationTest(unittest.TestCase):
12
+ """
13
+ Test the validation module
14
+ """
15
+
16
  def test_load_4bit_deprecate(self):
17
  cfg = DictDefault(
18
  {
 
30
  }
31
  )
32
 
33
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
34
  {
35
  "load_in_8bit": True,
36
  }
 
39
  with pytest.raises(ValueError, match=r".*8bit.*"):
40
  validate_config(cfg)
41
 
42
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
43
  {
44
  "gptq": True,
45
  }
 
48
  with pytest.raises(ValueError, match=r".*gptq.*"):
49
  validate_config(cfg)
50
 
51
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
52
  {
53
  "load_in_4bit": False,
54
  }
 
57
  with pytest.raises(ValueError, match=r".*4bit.*"):
58
  validate_config(cfg)
59
 
60
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
61
  {
62
  "load_in_4bit": True,
63
  }
 
73
  }
74
  )
75
 
76
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
77
  {
78
  "load_in_8bit": True,
79
  }
 
82
  with pytest.raises(ValueError, match=r".*8bit.*"):
83
  validate_config(cfg)
84
 
85
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
86
  {
87
  "gptq": True,
88
  }
 
91
  with pytest.raises(ValueError, match=r".*gptq.*"):
92
  validate_config(cfg)
93
 
94
+ cfg = base_cfg | DictDefault( # pylint: disable=unsupported-binary-operation
95
  {
96
  "load_in_4bit": True,
97
  }
 
117
  }
118
  )
119
  validate_config(cfg)