|
import argparse |
|
import glob |
|
import json |
|
import os |
|
import random |
|
from typing import List |
|
from concurrent.futures import ProcessPoolExecutor |
|
from functools import partial |
|
|
|
import numpy as np |
|
import requests |
|
import sentencepiece as spm |
|
import torch |
|
import torch.distributed as dist |
|
from tqdm import tqdm |
|
|
|
from tokenizer import Tokenizer |
|
|
|
DATA_CACHE_DIR = "data" |
|
|
|
|
|
|
|
def process_shard(args, vocab_size): |
|
shard_id, shard = args |
|
tokenizer_model = get_tokenizer_model_path(vocab_size) |
|
enc = Tokenizer(tokenizer_model) |
|
|
|
try: |
|
print(f"Processing shard {shard_id} - {shard}") |
|
|
|
with open(shard, "r") as f: |
|
data = json.load(f) |
|
|
|
all_tokens = [] |
|
for example in tqdm(data, position=shard_id): |
|
text = example["story"] |
|
text = text.strip() |
|
tokens = enc.encode(text, bos=True, eos=False) |
|
all_tokens.extend(tokens) |
|
|
|
|
|
all_tokens = np.array(all_tokens, dtype=np.uint16) |
|
|
|
if vocab_size == 0: |
|
|
|
tokenized_filename = shard.replace(".json", ".bin") |
|
else: |
|
|
|
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") |
|
shard_basename = os.path.basename(shard) |
|
bin_basename = shard_basename.replace(".json", ".bin") |
|
tokenized_filename = os.path.join(bin_dir, bin_basename) |
|
|
|
|
|
with open(tokenized_filename, "wb") as f: |
|
f.write(all_tokens.tobytes()) |
|
|
|
|
|
avg_seq_len = all_tokens.size / ((all_tokens == 1).sum()) |
|
print(f"Saved {tokenized_filename}, average seqlen: {avg_seq_len:.2f}") |
|
|
|
except Exception as e: |
|
print(f"Error processing shard {shard_id}: {str(e)}") |
|
|
|
def pretokenize(vocab_size): |
|
|
|
data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") |
|
shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json"))) |
|
|
|
if vocab_size > 0: |
|
|
|
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") |
|
os.makedirs(bin_dir, exist_ok=True) |
|
|
|
|
|
fun = partial(process_shard, vocab_size=vocab_size) |
|
with ProcessPoolExecutor() as executor: |
|
executor.map(fun, enumerate(shard_filenames)) |
|
|
|
print("Done.") |
|
|
|
|
|
|
|
|
|
|
|
class PretokDataset(torch.utils.data.IterableDataset): |
|
"""Loads pretokenized examples from disk and yields them as PyTorch tensors.""" |
|
|
|
def __init__(self, split, max_seq_len, vocab_size, vocab_source): |
|
super().__init__() |
|
self.split = split |
|
self.max_seq_len = max_seq_len |
|
self.vocab_size = vocab_size |
|
self.vocab_source = vocab_source |
|
|
|
def __iter__(self): |
|
|
|
worker_info = torch.utils.data.get_worker_info() |
|
worker_id = worker_info.id if worker_info else 0 |
|
|
|
rank = dist.get_rank() if dist.is_initialized() else 0 |
|
|
|
seed = 42 + worker_id + 1337 * rank |
|
rng = random.Random(seed) |
|
print(f"Created a PretokDataset with rng seed {seed}") |
|
if self.vocab_source == "llama2": |
|
|
|
bin_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") |
|
shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin"))) |
|
elif self.vocab_source == "custom": |
|
|
|
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{self.vocab_size}") |
|
shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin"))) |
|
|
|
shard_filenames = shard_filenames[1:] if self.split == "train" else shard_filenames[:1] |
|
assert len(shard_filenames)>0, f"No bin files found in {bin_dir}" |
|
while True: |
|
rng.shuffle(shard_filenames) |
|
for shard in shard_filenames: |
|
|
|
m = np.memmap(shard, dtype=np.uint16, mode="r") |
|
num_batches = len(m) // self.max_seq_len |
|
num_batches -= 1 |
|
assert num_batches > 0, "this shard is way too small? investigate." |
|
ixs = list(range(num_batches)) |
|
rng.shuffle(ixs) |
|
for ix in ixs: |
|
start = ix * self.max_seq_len |
|
end = start + self.max_seq_len + 1 |
|
|
|
chunk = torch.from_numpy((m[start:end]).astype(np.int64)) |
|
x = chunk[:-1] |
|
y = chunk[1:] |
|
yield x, y |
|
|
|
|
|
|
|
|
|
def get_tokenizer_model_path(vocab_size): |
|
""" |
|
Returns path to the sentencepiece tokenizer model for a given vocab size |
|
vocab_size = 0 designates the default Llama 2 tokenizer, in that case |
|
None is returned. |
|
""" |
|
if vocab_size == 0: |
|
return None |
|
else: |
|
return os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}.model") |
|
|
|
class Task: |
|
|
|
@staticmethod |
|
def iter_batches(batch_size, device, num_workers=0, **dataset_kwargs): |
|
ds = PretokDataset(**dataset_kwargs) |
|
dl = torch.utils.data.DataLoader( |
|
ds, batch_size=batch_size, pin_memory=True, num_workers=num_workers |
|
) |
|
for x, y in dl: |
|
x = x.to(device, non_blocking=True) |
|
y = y.to(device, non_blocking=True) |
|
yield x, y |
|
|
|
if __name__ == '__main__': |
|
pretokenize(vocab_size=0) |
|
|