imdb_dutch / src /translate_imdb_flax.py
yhavinga's picture
Add files
22a96ca
import functools
import json
import logging
import os
import pprint
from typing import Tuple
from datasets import get_dataset_config_names, load_dataset, get_dataset_split_names
import jax
import numpy as np
from flax import jax_utils
from flax.jax_utils import pad_shard_unpad
from transformers import AutoTokenizer, FlaxAutoModelForSeq2SeqLM
import pandas as pd
logger = logging.getLogger(__name__)
DATASET_NAME = "imdb"
OUTPUT_DIR = "./imdb_dutch"
MODEL_370 = "yhavinga/ul2-large-en-nl"
# BATCH_SIZE = 64
BATCH_SIZE = 32
# BATCH_SIZE = 2
MODEL_MAX_LENGTH = 370
MAX_WORDS = int(MODEL_MAX_LENGTH / 3)
END_MARKS = (".", "?", "!", '"', "'", "\n")
class FlaxModel:
def __init__(self, model_name: str, tokenizer_name: str, tokenizer_args={}):
"""
Initializes the FlaxModel with the specified model and tokenizer names, as well as tokenizer arguments.
"""
self.model = FlaxAutoModelForSeq2SeqLM.from_pretrained(
model_name, use_auth_token=True
)
self.model.params = self.model.to_fp32(self.model.params, mask=None)
self.tokenizer_args = {
# "model_max_length": self.model.config.max_length,
**tokenizer_args,
}
self.tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name, use_auth_token=True, **self.tokenizer_args
)
# if not (
# self.model.config.max_length
# == self.tokenizer.model_max_length
# == self.tokenizer_args.get("model_max_length")
# ):
# print(
# f"Warning: model max length {self.model.config.max_length} != tokenizer max length {self.tokenizer.model_max_length} != tokenizer_args max length {tokenizer_args.get('model_max_length')}"
# )
# raise ValueError("Model and tokenizer max_length should be equal")
self.params = jax_utils.replicate(self.model.params)
kwargs = {
"max_length": self.tokenizer.model_max_length,
"length_penalty": 1.0,
"num_beams": 4,
"early_stopping": True,
}
def shard(xs):
local_device_count = jax.local_device_count()
return jax.tree_map(
lambda x: x.reshape((local_device_count, -1) + x.shape[1:]), xs
)
def generate_step(params, batch):
self.model.params = params
output_ids = self.model.generate(
batch["input_ids"], attention_mask=batch["attention_mask"], **kwargs
)
return output_ids.sequences
self.p_generate_step = jax.pmap(generate_step, "batch")
@functools.lru_cache()
def translate_batch(self, texts: Tuple[str]):
overflowed = False
texts = list(texts)
if self.model.config.prefix:
texts = [self.model.config.prefix + x for x in texts]
texts = [x.replace("\n", "<n>").replace("<br />", "<n>") for x in texts]
inputs = self.tokenizer(
texts,
max_length=self.tokenizer_args.get("model_max_length"),
truncation=True,
padding="max_length",
return_tensors="np",
)
if not np.array_equal(
inputs.data["input_ids"][:, self.tokenizer.model_max_length - 1],
np.zeros(BATCH_SIZE),
):
overflowed = True
return BATCH_SIZE * [""], overflowed
batch = inputs.data
print(f"Batch inputs shape is {batch['input_ids'].shape}")
translated = pad_shard_unpad(self.p_generate_step)(self.params, batch)
predictions = jax.device_get(
translated.reshape(-1, self.tokenizer.model_max_length)
)
if not np.array_equal(
predictions[:, self.tokenizer.model_max_length - 1],
np.zeros(BATCH_SIZE),
):
overflowed = True
output = [
self.tokenizer.decode(t, skip_special_tokens=False) for t in predictions
]
# If there is <extra_id in the output, remove it and everything after it
output = [
x.replace("<pad>", "").replace("</s>", "").split("<extra_id")[0]
for x in output
]
output = [x.replace("<n>", "<br />").strip() for x in output]
return output, overflowed
def split_text(text):
text_parts = []
current_part = ""
def split_on_end_marks(text):
sentences = []
current_sentence = ""
for char in text:
if char in END_MARKS:
sentences.append(current_sentence + char)
current_sentence = ""
else:
current_sentence += char
# Add the final sentence if it wasn't ended by an end of line mark
if current_sentence:
sentences.append(current_sentence)
return sentences
text_lines = split_on_end_marks(text)
for line in text_lines:
# If adding the line to the current part would not exceed MAX_WORDS words, add it to the current part
if len((current_part + line).split()) <= MAX_WORDS:
current_part += line
# If adding the line to the current part would exceed 200 characters, add the current part to the list and reset the current part
else:
if len(current_part) > 0:
text_parts.append(current_part)
while len(line.split()) > MAX_WORDS:
# print(f"Line {line} is longer than MAX_WORDS words")
current_part = " ".join(line.split()[:MAX_WORDS])
text_parts.append(current_part + " ")
line = " ".join(line.split()[MAX_WORDS:])
current_part = line
# Add the final part to the list
text_parts.append(current_part)
text_parts[-1] = text_parts[-1].rstrip()
return text_parts
def test_split_text():
# Test with single line that is less than MAX_WORDS words
text = " ".join([f"n{i}" for i in range(MAX_WORDS - 20)])
a = list(text)
a[150] = END_MARKS[0]
text = "".join(a)
text_parts = split_text(text)
assert text_parts == [text]
# Test with single line that is exactly MAX_WORDS words
text = " ".join([f"n{i}" for i in range(MAX_WORDS)])
a = list(text)
a[10] = END_MARKS[0]
text = "".join(a)
text_parts = split_text(text)
assert text_parts == [text]
# Test with single line that is more than MAX_WORDS words
text = " ".join([f"n{i}" for i in range(MAX_WORDS + 1)])
a = list(text)
a[150] = END_MARKS[0]
text = "".join(a)
text_parts = split_text(text)
assert text_parts == [text[:151], text[151:]]
# Test with multiple lines, none of which are more than 200 characters
text = "\n".join([f"n{i}" for i in range(10)])
text_parts = split_text(text)
assert text_parts == [text]
# Test with 500 words
text = " ".join([f"n{i}" for i in range(500)])
a = list(text)
a[150] = END_MARKS[0]
a[300] = END_MARKS[0]
a[550] = END_MARKS[0]
a[600] = END_MARKS[0]
a[750] = END_MARKS[0]
a[900] = END_MARKS[0]
a[950] = END_MARKS[0]
a[1000] = END_MARKS[0]
text = "".join(a)
text_parts = split_text(text)
assert all(
[len(x.split()) <= MAX_WORDS for x in text_parts]
), "Not all text parts are less than MAX_WORDS words"
assert "".join(text_parts) == text, "Text parts concatenated != original text"
test_split_text()
def get_file_lines(filename):
"""
Get the number of lines in a file, 0 if the file does not exist.
"""
lines = 0
if os.path.exists(filename):
with open(filename) as f:
with open(filename, "r") as f:
lines = len(f.readlines())
print(f"{filename} already has {lines} lines")
return lines
SEP = "\n"
# SEP="<unk>"
def main():
os.makedirs(OUTPUT_DIR, exist_ok=True)
model_370 = FlaxModel(
MODEL_370, MODEL_370, tokenizer_args={"model_max_length": MODEL_MAX_LENGTH}
)
for config in get_dataset_config_names(DATASET_NAME):
print(f"Processing config {config}")
ds = load_dataset(DATASET_NAME, config)
# for split in ["validation"]:
for split in get_dataset_split_names(DATASET_NAME, config):
output_file = f"{OUTPUT_DIR}/{DATASET_NAME}_dutch_{config}-{split}.json"
num_examples = len(ds[split])
# fn = partial(encode_in_single_text, validation=(split == "validation"))
# single_text_ds = ds[split].map(fn, num_proc=6).sort("length", reverse=True)
# # fn = partial(batch_single_text_decode, validation=(split == "validation"))
# # decoded_ds = single_text_ds.map(fn, num_proc=6)
#
lines = get_file_lines(output_file)
start_batch_index = lines // BATCH_SIZE
with open(output_file, mode="ab" if lines else "wb") as writer:
for batch_index in range(start_batch_index, num_examples // BATCH_SIZE):
ds_split = ds[split]
batch = ds_split[
batch_index * BATCH_SIZE : (batch_index + 1) * BATCH_SIZE
]
print(
f"Translating batch {batch_index} of {num_examples // BATCH_SIZE}"
)
translated, overflow = model_370.translate_batch(
tuple(batch["text"])
)
translated_batch = [{"text": x} for x in translated]
if overflow:
batch_text_splitted = [
split_text(text) for text in batch["text"]
]
max_parts = max(
[len(text) for text in batch_text_splitted]
)
text_translated = [""] * BATCH_SIZE
for part_index in range(max_parts):
text_parts_i = [
text[part_index] if part_index < len(text) else ""
for text in batch_text_splitted
]
(
text_part_translated,
overflow,
) = model_370.translate_batch(tuple(text_parts_i))
if overflow:
print(
f"This shouldn't happen, overflow on a splitted text: {text_parts_i}"
)
for bi in range(BATCH_SIZE):
text_translated[bi] += " " + text_part_translated[bi] if text_parts_i[bi] != "" else ""
for bi in range(BATCH_SIZE):
translated_batch[bi]["text"] = text_translated[bi].strip()
# write each object in the batch as a separate line
for bi in range(BATCH_SIZE):
example = {
"text": translated_batch[bi]["text"],
"text_en": batch["text"][bi],
"label": batch["label"][bi],
}
pprint.pprint(example)
writer.write(json.dumps(example).encode("utf-8"))
writer.write("\n".encode("utf-8"))
if __name__ == "__main__":
main()