Datasets:

License:
VALID / README.md
huu-ontocord's picture
Upload dataset
fff4bde verified
|
raw
history blame
15.3 kB
metadata
license: cc-by-sa-4.0
dataset_info:
  features:
    - name: video_id
      dtype: string
    - name: chunk_idx
      dtype: int64
    - name: chunk_text
      dtype: string
    - name: video_metadata
      dtype: string
    - name: video_language
      dtype: string
    - name: chunk_media
      dtype: string
  splits:
    - name: shard_0
      num_bytes: 2152532
      num_examples: 694
    - name: shard_1
      num_bytes: 2039321
      num_examples: 628
    - name: shard_10
      num_bytes: 1711625
      num_examples: 502
    - name: shard_100
      num_bytes: 1879092
      num_examples: 608
    - name: shard_1000
      num_bytes: 2554377
      num_examples: 631
    - name: shard_10000
      num_bytes: 1436826
      num_examples: 409
    - name: shard_10001
      num_bytes: 2566374
      num_examples: 919
    - name: shard_10002
      num_bytes: 1441850
      num_examples: 416
    - name: shard_10003
      num_bytes: 1479331
      num_examples: 453
    - name: shard_10004
      num_bytes: 2304946
      num_examples: 665
    - name: shard_10005
      num_bytes: 2326767
      num_examples: 765
    - name: shard_10008
      num_bytes: 2405272
      num_examples: 769
    - name: shard_10006
      num_bytes: 2272052
      num_examples: 667
    - name: shard_10007
      num_bytes: 2369366
      num_examples: 632
    - name: shard_10009
      num_bytes: 2081310
      num_examples: 626
    - name: shard_1001
      num_bytes: 2383462
      num_examples: 664
    - name: shard_10010
      num_bytes: 4633710
      num_examples: 1011
    - name: shard_10011
      num_bytes: 2031992
      num_examples: 572
    - name: shard_10016
      num_bytes: 1524141
      num_examples: 440
    - name: shard_10027
      num_bytes: 2009449
      num_examples: 561
    - name: shard_1004
      num_bytes: 2236232
      num_examples: 679
    - name: shard_10015
      num_bytes: 1936158
      num_examples: 651
    - name: shard_10022
      num_bytes: 1375721
      num_examples: 381
    - name: shard_10020
      num_bytes: 1851431
      num_examples: 572
    - name: shard_10024
      num_bytes: 2066917
      num_examples: 621
    - name: shard_10012
      num_bytes: 2046815
      num_examples: 626
    - name: shard_10013
      num_bytes: 2377377
      num_examples: 691
    - name: shard_10014
      num_bytes: 1775675
      num_examples: 492
    - name: shard_10017
      num_bytes: 3541944
      num_examples: 1225
    - name: shard_1002
      num_bytes: 2343929
      num_examples: 603
    - name: shard_10039
      num_bytes: 2087969
      num_examples: 600
    - name: shard_10033
      num_bytes: 2335915
      num_examples: 676
    - name: shard_10031
      num_bytes: 1783883
      num_examples: 478
    - name: shard_10036
      num_bytes: 1701763
      num_examples: 490
    - name: shard_10026
      num_bytes: 1930478
      num_examples: 585
    - name: shard_10060
      num_bytes: 2259114
      num_examples: 677
    - name: shard_1005
      num_bytes: 2555364
      num_examples: 580
    - name: shard_10035
      num_bytes: 1755575
      num_examples: 572
    - name: shard_10021
      num_bytes: 2182556
      num_examples: 599
    - name: shard_10025
      num_bytes: 1763936
      num_examples: 547
    - name: shard_10057
      num_bytes: 1655171
      num_examples: 514
    - name: shard_10071
      num_bytes: 2342632
      num_examples: 668
    - name: shard_10046
      num_bytes: 1849419
      num_examples: 521
    - name: shard_10082
      num_bytes: 2396177
      num_examples: 690
    - name: shard_10093
      num_bytes: 1926455
      num_examples: 618
  download_size: 120269273
  dataset_size: 95682401
configs:
  - config_name: default
    data_files:
      - split: shard_0
        path: data/shard_0-*
      - split: shard_1
        path: data/shard_1-*
      - split: shard_10
        path: data/shard_10-*
      - split: shard_100
        path: data/shard_100-*
      - split: shard_1000
        path: data/shard_1000-*
      - split: shard_10000
        path: data/shard_10000-*
      - split: shard_10001
        path: data/shard_10001-*
      - split: shard_10002
        path: data/shard_10002-*
      - split: shard_10003
        path: data/shard_10003-*
      - split: shard_10004
        path: data/shard_10004-*
      - split: shard_10005
        path: data/shard_10005-*
      - split: shard_10011
        path: data/shard_10011-*
      - split: shard_10008
        path: data/shard_10008-*
      - split: shard_10010
        path: data/shard_10010-*
      - split: shard_10013
        path: data/shard_10013-*
      - split: shard_10006
        path: data/shard_10006-*
      - split: shard_10012
        path: data/shard_10012-*
      - split: shard_10007
        path: data/shard_10007-*
      - split: shard_10009
        path: data/shard_10009-*
      - split: shard_1001
        path: data/shard_1001-*
      - split: shard_10014
        path: data/shard_10014-*
      - split: shard_10016
        path: data/shard_10016-*
      - split: shard_10015
        path: data/shard_10015-*
      - split: shard_10022
        path: data/shard_10022-*
      - split: shard_10025
        path: data/shard_10025-*
      - split: shard_10020
        path: data/shard_10020-*
      - split: shard_10027
        path: data/shard_10027-*
      - split: shard_10031
        path: data/shard_10031-*
      - split: shard_10024
        path: data/shard_10024-*
      - split: shard_10046
        path: data/shard_10046-*
      - split: shard_1004
        path: data/shard_1004-*
      - split: shard_10039
        path: data/shard_10039-*
      - split: shard_10033
        path: data/shard_10033-*
      - split: shard_10017
        path: data/shard_10017-*
      - split: shard_1002
        path: data/shard_1002-*
      - split: shard_10036
        path: data/shard_10036-*
      - split: shard_1005
        path: data/shard_1005-*
      - split: shard_10026
        path: data/shard_10026-*
      - split: shard_10060
        path: data/shard_10060-*
      - split: shard_10035
        path: data/shard_10035-*
      - split: shard_10021
        path: data/shard_10021-*
      - split: shard_10057
        path: data/shard_10057-*
      - split: shard_10071
        path: data/shard_10071-*
      - split: shard_10082
        path: data/shard_10082-*
      - split: shard_10093
        path: data/shard_10093-*

VALID Dataset

VALID (Video-Audio Large Interleaved Dataset)

Overview

The VALID (Video-Audio Large Interleaved Dataset) is a multimodal dataset comprising approximately 720,000 Creative Commons licensed videos crawled from YouTube, and processed into audio-video-text data records for machine learning research. The dataset provides a unique opportunity for training models to understand relationships between modalities such as video frames, audio clips, and multilingual textual data, making it suitable for applications like multimodal representation learning.

Features

  • Audio-Video-Text Format: A combination of:
<video>
    <caption><image> the caption </caption>
    <caption><image> the caption </caption>
    <caption><image> the caption </caption>
</video>
<transcript> <audio> multi-lingual transcript </transcript>
English text
  • The non-text multimodal portion begins the data item and can include multiple media. Some snippets may have more than one audio, and more than one video. Others may have only images/videos or only audio paired with English text. Each video contains multiple frames stored as images, and text captions for each image. There can also be standalone images interleaved as well. Even though each audio video snippets are no more than 10 seconds (e.g., if a data item has two 10 second videos, then the corresponding English

  • Data Components:

    • Images: PNG format, phashed to ensure variability, with 0–10 images per audio snippet. Each image includes a caption created with Florence-2.
    • Audio: OGG format, multilingual, ~10 seconds per snippet, with shorter sound or music snippets (1–3 seconds) to minimize copyright issues. Each audio snippet is transcribed either with Whisper for non-English, or with the original Youtube ASR for English.
    • Text: Not including the captions and transcripts, the “text” portion is a concatenation of Youtube’s original English transcripts associated with the above media of around 1–40 words per data record.
  • Dataset Size:

    • About 15,000,000 images.
    • About 30,000,000 audio snippets.

File Organization

  • Each data entry follows the <video><image(s)><audio><text> structure as described above.
  • Metadata includes timestamps and alignment between modalities.

Multimodal Details

  • Audio-Video Alignment: Snippets allow learning temporal relationships between audio and visual elements.
  • Text Annotations: Text descriptions, including captions and contextual keywords, provide linguistic alignment.

Preprocessing

  • Phashing for Images: Ensures that images within the dataset are dynamic and non-static.
  • Audio Snippet Lengths: Music and sound effects are clipped to 1–3 seconds to minimize copyright concerns.

Licenses

All videos in VALID are CC BY, as declared by their original uploaders on YouTube. We publish the snippets of these videos here under these rights and under the principles of fair use. However, we cannot guarantee that original uploaders had the rights to share the content. [Todo: Put in AS-IS WHERE-AS usage disclaimer]

Intended Uses

  • Primary Use Case: Training models for multimodal understanding, such as contrastive multimodal learning (e.g., CLIP).
  • Not Recommended For: Generation tasks, as the dataset's quality may not meet generative model requirements.

Dataset Limitations

  • Quality: Images and audio are sourced from YouTube and may vary in resolution and clarity.
  • Rights Uncertainty: While videos are marked as CC-BY by the third party authors of the videos, original rights may not be verifiable.
  • Biases: The dataset's multilingual audio paired with English-only text may introduce linguistic biases. The large variety of videos may introduce bias.

Ethical Considerations

The dataset was built under the principles of fair use and CC BY-SA licensing. Its creation strives to align with the spirit of the EU AI Act, emphasizing transparency and safety in AI model development. Users must exercise caution and adhere to copyright and licensing rules when using VALID.


Policy for Managing Video Deletion Requests

Our goal is to establish a clear process for removing videos from our dataset when requested by users or required by external factors, while balancing the rights of content owners, compliance with CC-BY licenses, and the community's ability to utilize the dataset for training and research purposes.

  • 1. Respecting Content Owners' Rights: All videos in the dataset are under the CC-BY license. As such, proper attribution will always be maintained as required by the license. If a content owner requests the removal of a video from the dataset, we will balance this request with the community's ability to train on the data, considering the original intent of the CC-BY license.

  • 2. Deletion Request Process:

    • Content owners or users can request the removal of a video by FIRST requesting it be removed from Youtube: Here and Here.
    • Then verifying that it has been removed from YouTube and providing this feedback to us Here.
    • Requests must demonstrate that the video is no longer publicly available on YouTube.
    • We will remove the confirmed videos in the next release of this dataset.
  • 3. Verification and Balancing Interests: All deletion requests will be verified by checking YouTube to ensure the video is no longer available. We may also remove a video in our sole discretion. Decisions on video removal will take into account: The rights and wishes of content owners, including their ability to remove their videos from public availability. The community's need for robust datasets for training and research. The spirit of the CC-BY license, which permits redistribution and use with proper attribution.

  • 4. Responsibilities for Derivative Datasets: Users creating derivative datasets must ensure compliance by deleting videos listed in delete_these_videos.json.

  • 5. Proactive Deletion: Videos may be removed proactively under the following circumstances:

  • Requests from the hosting provider (e.g., Hugging Face).

  • Legal requirements or enforcement actions.

  • Internal decisions.

  • 6. Community Considerations:

  • The community is encouraged to respect the balance between individual content owners’ wishes and the public benefit derived from open access datasets.

  • Efforts will be made to keep the dataset robust while honoring legitimate requests for content removal.

  • 7. Updates: Users are encouraged to check the delete_these_videos.json, from time to time to ensure their copy of the dataset is up to date.


Related Materials:

Acknowledgement and Thanks

This dataset was built by Ontocord.AI in cooperation with Grass and LAION.AI. It was created as part of the EUHPC grant EUHPC_E03_068 for the Leonardo supercomputers resources in order to build safe multimodal models that comply with the EU AI Act. This dataset was built on a subset of the Grass Video Repository, a massive video dataset of creative commons videos. We deeply thank EuroHPC and Cineca, as well as Huggingface and the open source community for their support.

About the Contributors:

  • Grass is committed to making the public web accessible again. Through its network of millions of globally distributed nodes, it is capable of collecting petabyte-scale datasets for a variety of use cases, including training AI models. The network is run exclusively by users who have downloaded an application to their devices, allowing them to contribute their unused internet bandwidth to the network. On X: @getgrass_io
  • LAION, is a non-profit organization, that provides datasets, tools and models to liberate machine learning research. By doing so, we encourage open public education and a more environment-friendly use of resources by reusing existing datasets and models.
  • Ontocord is a technology company focused on legally compliant AI. Our mission is to make our AGI future lawful and accessible to everyone.
  • Alignment Lab AI: Our mission is to build a future leveraging AI as a force for good and as a tool that enhances human lives. We believe everyone deserves to harness the power of personal intelligence.
  • And many others ...

Citation

@misc{Huu2024VALID,
title = {VALID (Video-Audio Large Interleaved Dataset)},
author = {Huu Nguyen, Ken Tsui, Andrej Radonjic, Christoph Schuhmann},
year = {2024}
url = {https://huggingface.co/datasets/ontocord/VALID},
}