|
"""The Anno CTR Dataset""" |
|
|
|
|
|
import datasets |
|
import json |
|
import requests |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """\ |
|
AnnoCTR consists of 400 cyber threat reports that have been obtained from commercial CTI vendors. The reports describe threat-related information such as tactics, techniques, actors, tools, and targeted industries. The reports have been annotated by a domain expert with named entities, temporal expressions, and cybersecurity-specific concepts. The annotations include mentions of organizations, locations, industry sectors, time expressions, code snippets, hacker groups, malware, tools, tactics, and techniques. |
|
|
|
The dataset is split into three parts: train, dev, and test, with 60%, 15%, and 25% of the documents, respectively. The train set is used for model training, the dev set is used for model selection, and the test set is used for evaluation. |
|
|
|
For further information on the annotation scheme, please refer to our paper and the annotation guidelines for the general concepts and cybersecurity-specific concepts. |
|
""" |
|
|
|
|
|
|
|
_HOMEPAGE = "https://github.com/boschresearch/anno-ctr-lrec-coling-2024" |
|
|
|
|
|
_LICENSE = "The AnnoCTR corpus located in the folder AnnoCTR is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (CC-BY-SA 4.0)." |
|
|
|
_CITATION = """\ |
|
|
|
Lukas Lange, Marc Müller, Ghazaleh Haratinezhad Torbati, Dragan Milchevski, Patrick Grau, Subhash Pujari, Annemarie Friedrich. AnnoCTR: A Dataset for Detecting and Linking Entities, Tactics, and Techniques in Cyber Threat Reports. LREC-COLING 2024. |
|
|
|
""" |
|
|
|
|
|
_URL = "https://raw.githubusercontent.com/boschresearch/anno-ctr-lrec-coling-2024/d510b6949e1938d47c93a43eedd562dc538439dc/AnnoCTR/ner_json/" |
|
|
|
_TRAINING_FILE="train.json" |
|
_TEST_FILE = "test.json" |
|
_DEV_FILE = "dev.json" |
|
|
|
class AnnoCTRConfig(datasets.BuilderConfig): |
|
"""The AnnoCTR dataset configuration""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for Anno CTR dataset. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(AnnoCTRConfig, self).__init__(**kwargs) |
|
|
|
|
|
class AnnoCTRDataset(datasets.GeneratorBasedBuilder): |
|
"""The Open NER dataset Entities Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
AnnoCTRConfig(name="all_tags", version=VERSION, description="Include all tags"), |
|
AnnoCTRConfig(name="everything", version=VERSION, description="Everything"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all_tags" |
|
|
|
|
|
def _get_tags(self,url,col="all_tags"): |
|
names = set() |
|
r = requests.get(url) |
|
for line in r.text.splitlines(): |
|
data = json.loads(line) |
|
tags = data[col] |
|
|
|
names = names.union(set(tags)) |
|
|
|
return list(names) |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "all_tags": |
|
the_url = f"{_URL}{_TRAINING_FILE}" |
|
|
|
logger.info("Loading the %s" % the_url) |
|
all_tags = self._get_tags(the_url,col="all_tags") |
|
|
|
logger.info("Found %d tags" % len(all_tags)) |
|
|
|
features=datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"text": datasets.Value("string"), |
|
"all_tags": datasets.Sequence(datasets.features.ClassLabel(names=all_tags)) |
|
} |
|
) |
|
|
|
else: |
|
features=datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"text": datasets.Value("string"), |
|
"ne_tags": datasets.Sequence(datasets.Value("string")), |
|
"nc_tags": datasets.Sequence(datasets.Value("string")), |
|
"te_tags": datasets.Sequence(datasets.Value("string")), |
|
"ce_tags": datasets.Sequence(datasets.Value("string")), |
|
"ci_tags": datasets.Sequence(datasets.Value("string")), |
|
"all_tags": datasets.Sequence(datasets.Value("string")) |
|
} |
|
) |
|
|
|
|
|
|
|
dinfo = datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
logger.info(dinfo) |
|
|
|
return dinfo |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
urls_to_download = { |
|
"train": f"{_URL}{_TRAINING_FILE}", |
|
"dev": f"{_URL}{_DEV_FILE}", |
|
"test": f"{_URL}{_TEST_FILE}", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
logger.info("⏳ Generating examples from = %s", filepath) |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
|
|
if self.config.name == "all_tags": |
|
|
|
yield key, { |
|
"tokens": data["tokens"], |
|
"text": data['text'], |
|
"all_tags": data['all_tags'] |
|
} |
|
|
|
if self.config.name == "everything": |
|
|
|
data_new = data.copy() |
|
data_new.pop("id") |
|
|
|
yield key, data_new |
|
|
|
|