upload
Browse files- README.md +2 -0
- prepare_data.py +17 -0
- test.jsonl +0 -0
- train.jsonl +0 -0
README.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# hate_speech_offensive
|
2 |
+
This dataset is a version from [hate_speech_offensive](https://huggingface.co/datasets/hate_speech_offensive), splitted into train and test set.
|
prepare_data.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
|
5 |
+
dataset = load_dataset("hate_speech_offensive")
|
6 |
+
id2label = dataset['train'].features['class'].names
|
7 |
+
|
8 |
+
rows = [{'text': row['tweet'], 'label': row['class'], 'label_text': id2label[row['class']]} for row in dataset['train']]
|
9 |
+
|
10 |
+
random.shuffle(rows)
|
11 |
+
num_test = 2000
|
12 |
+
data_splits = {'test': rows[0:num_test], 'train': rows[num_test:]}
|
13 |
+
|
14 |
+
for split in data_splits.keys():
|
15 |
+
with open(f'{split}.jsonl', 'w') as fOut:
|
16 |
+
for row in data_splits[split]:
|
17 |
+
fOut.write(json.dumps(row)+"\n")
|
test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|