Datasets:

Modalities:
Text
Libraries:
Datasets
License:
parquet-converter commited on
Commit
15f0e9b
·
1 Parent(s): 581a78c

Update parquet files

Browse files
.DS_Store DELETED
Binary file (6.15 kB)
 
Medication/wl-medication-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d46f28a21b66a56e814597d3cc80f393942dabca7a0d75974cfd75da2af97f2
3
+ size 138614
Medication/wl-medication-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4615f1960a23447c3b800c0282f4d6a2eda086ae1a61f64a0004cd4a4c37e85
3
+ size 1111486
Medication/wl-medication-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13917e0b5a5b6bf6e95493a2b7a238897884c1c0f85a3355df6cfce354eab2af
3
+ size 128109
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
dev.conll DELETED
The diff for this file is too large to render. See raw diff
 
test.conll DELETED
The diff for this file is too large to render. See raw diff
 
train.conll DELETED
The diff for this file is too large to render. See raw diff
 
wl-medication.py DELETED
@@ -1,96 +0,0 @@
1
-
2
- import datasets
3
-
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
-
7
-
8
- _LICENSE = "Creative Commons Attribution 4.0 International"
9
-
10
- _VERSION = "1.1.0"
11
-
12
- _URL = "https://huggingface.co/datasets/plncmm/wl-medication/resolve/main/"
13
- _TRAINING_FILE = "train.conll"
14
- _DEV_FILE = "dev.conll"
15
- _TEST_FILE = "test.conll"
16
-
17
- class MedicationConfig(datasets.BuilderConfig):
18
- """BuilderConfig for Disease dataset."""
19
-
20
- def __init__(self, **kwargs):
21
- super(MedicationConfig, self).__init__(**kwargs)
22
-
23
-
24
- class Medication(datasets.GeneratorBasedBuilder):
25
- """Medication dataset."""
26
-
27
- BUILDER_CONFIGS = [
28
- MedicationConfig(
29
- name="Medication",
30
- version=datasets.Version(_VERSION),
31
- description="Medication dataset"),
32
- ]
33
-
34
- def _info(self):
35
- return datasets.DatasetInfo(
36
- features=datasets.Features(
37
- {
38
- "id": datasets.Value("string"),
39
- "tokens": datasets.Sequence(datasets.Value("string")),
40
- "ner_tags": datasets.Sequence(
41
- datasets.features.ClassLabel(
42
- names=[
43
- "O",
44
- "B-Medication",
45
- "I-Medication",
46
- ]
47
- )
48
- ),
49
- }
50
- ),
51
- supervised_keys=None,
52
- )
53
-
54
- def _split_generators(self, dl_manager):
55
- """Returns SplitGenerators."""
56
- urls_to_download = {
57
- "train": f"{_URL}{_TRAINING_FILE}",
58
- "dev": f"{_URL}{_DEV_FILE}",
59
- "test": f"{_URL}{_TEST_FILE}",
60
- }
61
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
62
-
63
- return [
64
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
65
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
66
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
67
- ]
68
-
69
- def _generate_examples(self, filepath):
70
- logger.info("⏳ Generating examples from = %s", filepath)
71
- with open(filepath, encoding="utf-8") as f:
72
- guid = 0
73
- tokens = []
74
- pos_tags = []
75
- ner_tags = []
76
- for line in f:
77
- if line == "\n":
78
- if tokens:
79
- yield guid, {
80
- "id": str(guid),
81
- "tokens": tokens,
82
- "ner_tags": ner_tags,
83
- }
84
- guid += 1
85
- tokens = []
86
- ner_tags = []
87
- else:
88
- splits = line.split(" ")
89
- tokens.append(splits[0])
90
- ner_tags.append(splits[-1].rstrip())
91
- # last example
92
- yield guid, {
93
- "id": str(guid),
94
- "tokens": tokens,
95
- "ner_tags": ner_tags,
96
- }