|
# Base Dataset: wikimedia/wikipedia (https://huggingface.co/datasets/wikimedia/wikipedia) |
|
# Extract only Korean data |
|
|
|
--- |
|
license: cc-by-nc-sa-3.0 |
|
language: |
|
|
|
- ko |
|
dataset_info: |
|
features: |
|
- name: title |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1213180436.2364573 |
|
num_examples: 583107 |
|
- name: test |
|
num_bytes: 134798519.76354265 |
|
num_examples: 64790 |
|
download_size: 799654121 |
|
dataset_size: 1347978956.0 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: test |
|
path: data/test-* |
|
--- |
|
|