{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "context:\n", "- All images from sakugabooru has been downloaded, with schema:\n", " - `./data/post_{id}/post_{id}.{ext}`\n", " - `./data/post_{id}/post_{id}.json`\n", "- Now we need to upload them to huggingface by tarring it.\n", "\n", "To install the libraries:\n", "\n", "```bash\n", "pip install unibox hfutils\n", "```" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import os\n", "import tarfile\n", "from pathlib import Path\n", "from typing import List\n", "import unibox as ub" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "get available files:" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " \r" ] } ], "source": [ "# Example usage:\n", "data_dir = \"/rmt/yada/dev/sakuga-scraper/data\"\n", "output_dir = \"/rmt/yada/dev/sakuga-scraper/tars\"\n", "\n", "# Collect all valid media files\n", "all_files = ub.traverses(\"/rmt/yada/dev/sakuga-scraper/data\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "all files: 404160\n", "image files: 8680, video files: 155238, json files: 240242\n", "total unique files: 404160\n" ] } ], "source": [ "# Convert extensions to sets for validation\n", "image_extensions = set(ub.IMG_FILES) - {'.gif'}\n", "video_extensions = {'.webm', '.mp4', '.gif'}\n", "\n", "# Categorize files\n", "image_files = [f for f in all_files if Path(f).suffix in image_extensions]\n", "video_files = [f for f in all_files if Path(f).suffix in video_extensions]\n", "json_files = [f for f in all_files if Path(f).suffix == '.json']\n", "\n", "# Recompute lengths\n", "total_unique_files = len(set(image_files + video_files + json_files))\n", "print(f\"all files: {len(all_files)}\\nimage files: {len(image_files)}, video files: {len(video_files)}, json files: {len(json_files)}\\ntotal unique files: {total_unique_files}\")" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(['/rmt/yada/dev/sakuga-scraper/data/post_100058/sankaku_100058.json'],\n", " ['/rmt/yada/dev/sakuga-scraper/data/post_105581/sankaku_105581.jpg'],\n", " ['/rmt/yada/dev/sakuga-scraper/data/post_102570/sankaku_102570.mp4'])" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "json_files[:1], image_files[:1], video_files[:1]" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "((27, 272758), (27, 273264))" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "def determine_tar_info(files: List[str], modulo: int = 10000):\n", " \"\"\"\n", " Determine how many full tars can be created and the current last post number.\n", "\n", " Args:\n", " files (List[str]): List of file paths (images/videos).\n", " modulo (int): Constant range of IDs per tar file (default: 10,000).\n", "\n", " Returns:\n", " tuple: (number_of_full_tars, last_post_id)\n", " \"\"\"\n", " post_ids = []\n", " for file_path in files:\n", " filename = os.path.basename(file_path)\n", " try:\n", " post_id = int(filename.split('_')[1].split('.')[0])\n", " post_ids.append(post_id)\n", " except (IndexError, ValueError):\n", " print(f\"Skipping file with invalid format: {filename}\")\n", " continue\n", "\n", " if not post_ids:\n", " return 0, None\n", "\n", " last_post_id = max(post_ids)\n", " number_of_full_tars = (last_post_id + 1) // modulo\n", "\n", " return number_of_full_tars, last_post_id\n", "\n", "\n", "# full tar count, last post id\n", "determine_tar_info(image_files), determine_tar_info(video_files)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "do the tarring:" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "We can make 27 full tars, last post id is 273264\n" ] } ], "source": [ "import os\n", "import tarfile\n", "from pathlib import Path\n", "from typing import List\n", "\n", "def generate_tar_from_files(\n", " files: List[str], \n", " output_dir: str, \n", " id: int, \n", " modulo: int = 10000\n", ") -> List[str]:\n", " \"\"\"\n", " Create tar files containing files (images/videos) grouped by post ID ranges.\n", "\n", " Args:\n", " files (List[str]): List of file paths (images/videos).\n", " output_dir (str): Directory to store the tar files.\n", " id (int): The ID used to determine the range of files to include in the tar.\n", " modulo (int): Constant range of IDs per tar file (default: 10,000).\n", "\n", " Returns:\n", " List[str]: List of created tar file paths.\n", " \"\"\"\n", " output_dir = Path(output_dir)\n", " output_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " # Determine ID range\n", " range_start = id * modulo\n", " range_end = (id + 1) * modulo\n", "\n", " # Filter files by ID range\n", " files_in_range = []\n", " for file_path in files:\n", " filename = os.path.basename(file_path)\n", " try:\n", " post_id = int(filename.split('_')[1].split('.')[0])\n", " except (IndexError, ValueError):\n", " print(f\"Skipping file with invalid format: {filename}\")\n", " continue\n", "\n", " if range_start <= post_id < range_end:\n", " files_in_range.append(file_path)\n", "\n", " if not files_in_range:\n", " print(f\"No files found in range {range_start}-{range_end}.\")\n", " return []\n", "\n", " tar_file_path = output_dir / f\"{id}.tar\"\n", "\n", " # Create the tar\n", " with tarfile.open(tar_file_path, \"w\") as tar:\n", " for file_path in files_in_range:\n", " tar.add(file_path, arcname=os.path.basename(file_path))\n", "\n", " print(f\"Created tar: {tar_file_path} with {len(files_in_range)} files.\")\n", "\n", " return [str(tar_file_path)]\n", "\n", "# Example usage:\n", "# Suppose we have a collection of images & videos:\n", "# files = [\n", "# \"/path/to/sankaku_16465.jpg\",\n", "# \"/path/to/sankaku_20000.mp4\",\n", "# ...\n", "# ]\n", "\n", "# See how many tars and the highest post ID:\n", "media_files = video_files + image_files\n", "tar_count, last_id = determine_tar_info(media_files)\n", "print(\"We can make\", tar_count, \"full tars, last post id is\", last_id)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Created tar: output/0.tar with 1827 files.\n", "['output/0.tar']\n" ] } ], "source": [ "# Create the tar for the first batch (id=0):\n", "tars_created = generate_tar_from_files(media_files, output_dir=\"output\", id=0)\n", "print(tars_created)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "make indexes:" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "525bf979329a498b90ce44405f13238a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Indexing tar file '/rmt/yada/dev/sakuga-scraper/tars/0.tar' ...: 0it [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [ "'/rmt/yada/dev/sakuga-scraper/tars/0.json'" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from hfutils.index.make import tar_create_index\n", "\n", "# Create index for the generated tars\n", "tar_create_index(src_tar_file=\"/rmt/yada/dev/sakuga-scraper/tars/0.tar\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "testing partial tar read:" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "All files verified successfully via local partial-read checks.\n" ] } ], "source": [ "import hashlib\n", "import json\n", "import os\n", "\n", "def test_local_partial_read(tar_path: str, index_json_path: str):\n", " \"\"\"\n", " Perform local partial reads of an uncompressed tar file to verify offsets and sizes\n", " match the metadata in the JSON index.\n", "\n", " :param tar_path: Path to the tar file.\n", " :param index_json_path: Path to the JSON index (the .tar.json file).\n", " \"\"\"\n", " with open(index_json_path, 'r', encoding='utf-8') as jf:\n", " index_data = json.load(jf)\n", "\n", " # Sanity check: Compare tar size\n", " reported_size = index_data.get(\"filesize\", None)\n", " actual_size = os.path.getsize(tar_path)\n", " if reported_size and reported_size != actual_size:\n", " raise ValueError(f\"Tar size mismatch! JSON says {reported_size} bytes, actual is {actual_size} bytes.\")\n", "\n", " with open(tar_path, \"rb\") as tar_f:\n", " for fname, info in index_data[\"files\"].items():\n", " offset = info[\"offset\"]\n", " size = info[\"size\"]\n", " expected_sha256 = info[\"sha256\"]\n", "\n", " # Seek to offset and read exactly `size` bytes\n", " tar_f.seek(offset, 0)\n", " data = tar_f.read(size)\n", " if len(data) != size:\n", " raise ValueError(f\"Read {len(data)} bytes instead of {size} for '{fname}'.\")\n", "\n", " # Compute SHA-256 and compare\n", " actual_sha256 = hashlib.sha256(data).hexdigest()\n", " if actual_sha256 != expected_sha256:\n", " raise ValueError(f\"SHA mismatch for '{fname}'. Expected {expected_sha256}, got {actual_sha256}.\")\n", "\n", " print(\"All files verified successfully via local partial-read checks.\")\n", "\n", "\n", "test_local_partial_read(\"../tars/0.tar\", \"../tars/0.json\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "tarring the entire set:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0557e54d0bce48f3aba219ebf8b470c5", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/26 [00:00