Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K - 10K
License:
removed huggingface req
Browse files
scoring-scripts/compute_seqeval.py
CHANGED
@@ -1,28 +1,49 @@
|
|
1 |
-
from
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# check that tokens match
|
14 |
-
assert(
|
15 |
-
|
16 |
-
# ensure IOB2?
|
17 |
-
|
18 |
-
# compute scores
|
19 |
-
seqeval_results = seqeval.compute(predictions = predictions_dataset[pred_col],
|
20 |
-
references = references_dataset[ref_col],
|
21 |
-
scheme = 'IOB2',
|
22 |
-
suffix = False,
|
23 |
-
)
|
24 |
|
25 |
-
# change all values to regular (not numpy) floats (otherwise cannot be serialized to json)
|
26 |
-
seqeval_results = literal_eval(str(seqeval_results))
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from seqeval.metrics import classification_report, f1_score, precision_score, recall_score, accuracy_score
|
2 |
+
from seqeval.scheme import IOB2
|
3 |
+
import numpy as np
|
4 |
+
def compute_seqeval_jsonl(references_jsonl, predictions_jsonl, ref_col='ner_tags', pred_col='pred_ner_tags'):
|
5 |
+
'''
|
6 |
+
Computes the seqeval scores between two datasets loaded from jsonl (list of dicts with same keys).
|
7 |
+
Sorts the datasets by 'unique_id' and verifies that the tokens match.
|
8 |
+
'''
|
9 |
+
# extract the tags and reverse the dict
|
10 |
+
ref_dict = {k:[e[k] for e in references_jsonl] for k in references_jsonl[0].keys()}
|
11 |
+
pred_dict = {k:[e[k] for e in predictions_jsonl] for k in predictions_jsonl[0].keys()}
|
12 |
+
|
13 |
+
# sort by unique_id
|
14 |
+
ref_idx = np.argsort(ref_dict['unique_id'])
|
15 |
+
pred_idx = np.argsort(pred_dict['unique_id'])
|
16 |
+
ref_ner_tags = np.array(ref_dict[ref_col], dtype=object)[ref_idx]
|
17 |
+
pred_ner_tags = np.array(pred_dict[pred_col], dtype=object)[pred_idx]
|
18 |
+
ref_tokens = np.array(ref_dict['tokens'], dtype=object)[ref_idx]
|
19 |
+
pred_tokens = np.array(pred_dict['tokens'], dtype=object)[pred_idx]
|
20 |
|
21 |
# check that tokens match
|
22 |
+
assert((ref_tokens==pred_tokens).all())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
|
|
|
|
24 |
|
25 |
+
# get report
|
26 |
+
report = classification_report(y_true=ref_ner_tags, y_pred=pred_ner_tags,
|
27 |
+
scheme=IOB2, output_dict=True,
|
28 |
+
)
|
29 |
+
|
30 |
+
# extract values we care about
|
31 |
+
report.pop("macro avg")
|
32 |
+
report.pop("weighted avg")
|
33 |
+
overall_score = report.pop("micro avg")
|
34 |
+
|
35 |
+
seqeval_results = {
|
36 |
+
type_name: {
|
37 |
+
"precision": score["precision"],
|
38 |
+
"recall": score["recall"],
|
39 |
+
"f1": score["f1-score"],
|
40 |
+
"suport": score["support"],
|
41 |
+
}
|
42 |
+
for type_name, score in report.items()
|
43 |
+
}
|
44 |
+
seqeval_results["overall_precision"] = overall_score["precision"]
|
45 |
+
seqeval_results["overall_recall"] = overall_score["recall"]
|
46 |
+
seqeval_results["overall_f1"] = overall_score["f1-score"]
|
47 |
+
seqeval_results["overall_accuracy"] = accuracy_score(y_true=ref_ner_tags, y_pred=pred_ner_tags)
|
48 |
+
|
49 |
+
return(seqeval_results)
|