|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
import ast |
|
import csv |
|
import datasets |
|
import function_parser |
|
import json |
|
import os |
|
import sys |
|
|
|
csv.field_size_limit(sys.maxsize) |
|
|
|
import pandas as pd |
|
|
|
from function_parser.language_data import LANGUAGE_METADATA |
|
from function_parser.parsers.java_parser import JavaParser |
|
from function_parser.process import DataProcessor |
|
from git import Git, Repo |
|
from glob import glob |
|
from tree_sitter import Language |
|
from subprocess import check_output |
|
|
|
LANG = "java" |
|
JAVA_LANG = Language( |
|
os.path.join(function_parser.__path__[0], "tree-sitter-languages.so"), LANG |
|
) |
|
DataProcessor.PARSER.set_language(JAVA_LANG) |
|
FUNC_PROCESSOR = DataProcessor( |
|
language=LANG, language_parser=LANGUAGE_METADATA[LANG]["language_parser"] |
|
) |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/ncoop57/athena_data/resolve/main/repos-commits.zip" |
|
|
|
|
|
|
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="meta_data", version=VERSION, description="This part of my dataset covers a first domain"), |
|
datasets.BuilderConfig(name="repos_commits", version=VERSION, description="This part of my dataset covers a second domain"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "meta_data" |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "meta_data": |
|
features = datasets.Features( |
|
{ |
|
"repo": datasets.Value("string"), |
|
"parent_commit": datasets.Value("string"), |
|
"commit": datasets.Value("string"), |
|
"changes": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), |
|
|
|
} |
|
) |
|
elif self.config.name == "repos_commits": |
|
features = datasets.Features( |
|
{ |
|
"repo": datasets.Value("string"), |
|
"parent_commit": datasets.Value("string"), |
|
"commit": datasets.Value("string"), |
|
"changes": datasets.Sequence(datasets.Value("string")), |
|
"file_path": datasets.Value("string"), |
|
"code": datasets.Value("string"), |
|
"code_tokens": datasets.Sequence(datasets.Value("string")), |
|
"docstring": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URL) |
|
data_dir = os.path.join(data_dir, "repos-commits") |
|
if self.config.name == "repos_commits" and not os.path.exists(os.path.join(data_dir, "repos")): |
|
|
|
output = check_output( |
|
[ |
|
"bash", |
|
"clone.sh", |
|
"repos.txt", |
|
], |
|
cwd=data_dir, |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"data_dir": data_dir, |
|
"file_path": os.path.join(data_dir, "processed_impact_methods.csv"), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, data_dir, file_path |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
with open(file_path, encoding="utf-8") as f: |
|
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) |
|
next(csv_reader, None) |
|
row_id = -1 |
|
for _, row in enumerate(csv_reader): |
|
row_id += 1 |
|
repo, parent_commit, commit, changes = row |
|
changes = ast.literal_eval(changes) |
|
|
|
if self.config.name == "meta_data": |
|
yield row_id, { |
|
"repo": repo, |
|
"parent_commit": parent_commit, |
|
"commit": commit, |
|
"changes": changes, |
|
} |
|
elif self.config.name == "repos_commits": |
|
repo_path = os.path.join(data_dir, "repos", repo) |
|
try: |
|
|
|
g = Git(repo_path) |
|
g.clean(force=True, d=True) |
|
g.checkout(commit) |
|
except Exception as e: |
|
print(e) |
|
continue |
|
|
|
indexes = [] |
|
files = glob(f"{repo_path}/**/*.{LANGUAGE_METADATA[LANG]['ext']}", recursive=True) |
|
sha = None |
|
for f in files: |
|
definitions = FUNC_PROCESSOR.get_function_definitions(f) |
|
if definitions is None: |
|
continue |
|
|
|
nwo, path, functions = definitions |
|
indexes.extend( |
|
( |
|
FUNC_PROCESSOR.extract_function_data(func, nwo, path, sha) |
|
for func in functions |
|
if len(func["function_tokens"]) > 1 |
|
) |
|
) |
|
|
|
df = pd.DataFrame(indexes)[ |
|
["path", "function", "function_tokens", "docstring"] |
|
].rename( |
|
columns={ |
|
"path": "file_path", |
|
"function": "code", |
|
"function_tokens": "code_tokens", |
|
"docstring": "docstring", |
|
} |
|
) |
|
for _, row in df.iterrows(): |
|
row_id += 1 |
|
yield row_id, { |
|
"repo": repo, |
|
"parent_commit": parent_commit, |
|
"commit": commit, |
|
"changes": changes, |
|
"file_path": row["file_path"], |
|
"code": row["code"], |
|
"code_tokens": row["code_tokens"], |
|
"docstring": row["docstring"], |
|
} |