trl-metrics / README.md
qgallouedec's picture
qgallouedec HF staff
Upload dataset
dd1f958 verified
metadata
dataset_info:
  - config_name: issue_comments
    features:
      - name: user
        dtype: string
      - name: created_at
        dtype: timestamp[us]
      - name: body
        dtype: string
      - name: issue_number
        dtype: int64
    splits:
      - name: train
        num_bytes: 4153842
        num_examples: 8476
    download_size: 1656492
    dataset_size: 4153842
  - config_name: issues
    features:
      - name: number
        dtype: int64
      - name: title
        dtype: string
      - name: user
        dtype: string
      - name: state
        dtype: string
      - name: created_at
        dtype: timestamp[us]
      - name: closed_at
        dtype: timestamp[us]
      - name: comments_count
        dtype: int64
    splits:
      - name: train
        num_bytes: 268570
        num_examples: 2521
    download_size: 157626
    dataset_size: 268570
  - config_name: models
    features:
      - name: id
        dtype: string
      - name: created_at
        dtype: timestamp[us, tz=UTC]
      - name: likes
        dtype: int64
      - name: downloads
        dtype: int64
      - name: tags
        sequence: string
    splits:
      - name: train
        num_bytes: 13718429
        num_examples: 42961
    download_size: 2023895
    dataset_size: 13718429
  - config_name: models_likes
    features:
      - name: user
        dtype: string
      - name: model_id
        dtype: string
      - name: liked_at
        dtype: timestamp[s, tz=UTC]
    splits:
      - name: train
        num_bytes: 312436
        num_examples: 5077
    download_size: 141415
    dataset_size: 312436
  - config_name: pypi_downloads
    features:
      - name: day
        dtype: date32
      - name: num_downloads
        dtype: int64
    splits:
      - name: train
        num_bytes: 19428
        num_examples: 1619
    download_size: 14949
    dataset_size: 19428
  - config_name: stargazers
    features:
      - name: starred_at
        dtype: timestamp[s, tz=UTC]
      - name: user
        dtype: string
    splits:
      - name: train
        num_bytes: 224610
        num_examples: 10508
    download_size: 217947
    dataset_size: 224610
configs:
  - config_name: issue_comments
    data_files:
      - split: train
        path: issue_comments/train-*
  - config_name: issues
    data_files:
      - split: train
        path: issues/train-*
  - config_name: models
    data_files:
      - split: train
        path: models/train-*
  - config_name: models_likes
    data_files:
      - split: train
        path: models_likes/train-*
  - config_name: pypi_downloads
    data_files:
      - split: train
        path: pypi_downloads/train-*
  - config_name: stargazers
    data_files:
      - split: train
        path: stargazers/train-*

Stars

import requests
from datetime import datetime
from datasets import Dataset
import pyarrow as pa
import os

def get_stargazers(owner, repo, token):
    # Initialize the count and the page number
    page = 1
    stargazers = []
    while True:
        # Construct the URL for the stargazers with pagination
        stargazers_url = f"https://api.github.com/repos/{owner}/{repo}/stargazers?page={page}&per_page=100"

        # Send the request to GitHub API with appropriate headers
        headers = {"Accept": "application/vnd.github.v3.star+json", "Authorization": "token " + token}
        response = requests.get(stargazers_url, headers=headers)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch stargazers with status code {response.status_code}: {response.text}")

        stargazers_page = response.json()

        if not stargazers_page:  # Exit the loop if there are no more stargazers to process
            break

        stargazers.extend(stargazers_page)
        page += 1  # Move to the next page

    return stargazers

token = os.environ.get("GITHUB_PAT")
stargazers = get_stargazers("huggingface", "trl", token)
stargazers = {key: [stargazer[key] for stargazer in stargazers] for key in stargazers[0].keys()}
dataset = Dataset.from_dict(stargazers)

def clean(example):
    starred_at = datetime.strptime(example["starred_at"], "%Y-%m-%dT%H:%M:%SZ")
    starred_at = pa.scalar(starred_at, type=pa.timestamp("s", tz="UTC"))
    return {"starred_at": starred_at, "user": example["user"]["login"]}

dataset = dataset.map(clean, remove_columns=dataset.column_names)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="stargazers")

Pypi downloads

from datasets import Dataset
from google.cloud import bigquery
import os

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "propane-tree-432413-4c3e2b5e6b3c.json"

# Initialize a BigQuery client
client = bigquery.Client()

# Define your query
query = """
#standardSQL
WITH daily_downloads AS (
  SELECT
    DATE(timestamp) AS day,
    COUNT(*) AS num_downloads
  FROM
    `bigquery-public-data.pypi.file_downloads`
  WHERE
    file.project = 'trl'
    -- Filter for the last 12 months
    AND DATE(timestamp) BETWEEN DATE_SUB(CURRENT_DATE(), INTERVAL 54 MONTH) AND CURRENT_DATE()
  GROUP BY
    day
)
SELECT
  day,
  num_downloads
FROM
  daily_downloads
ORDER BY
  day DESC
"""

# Execute the query
query_job = client.query(query)

# Fetch the results
results = query_job.result()

# Convert the results to a pandas DataFrame and then to a Dataset
df = results.to_dataframe()
dataset = Dataset.from_pandas(df)

dataset.push_to_hub("qgallouedec/trl-metrics", config_name="pypi_downloads")

Models tagged

from huggingface_hub import HfApi
from datasets import Dataset

api = HfApi()
models = api.list_models(tags="trl")
dataset_list = [{"id": model.id, "created_at": model.created_at, "likes": model.likes, "downloads": model.downloads, "tags": model.tags} for model in models]
dataset_dict = {key: [d[key] for d in dataset_list] for key in dataset_list[0].keys()}
dataset = Dataset.from_dict(dataset_dict)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="models")

Issues and comments

import requests
from datetime import datetime
import os
from datasets import Dataset
from tqdm import tqdm

token = os.environ.get("GITHUB_PAT")

def get_full_response(url, headers, params=None):
    page = 1
    output = []
    params = params or {}
    while True:
        params = {**params, "page": page, "per_page": 100}
        response = requests.get(url, headers=headers, params=params)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch issues: {response.text}")

        batch = response.json()
        if len(batch) == 0:
            break
        output.extend(batch)
        page += 1
    return output

# GitHub API URL for issues (closed and open)
issues_url = f"https://api.github.com/repos/huggingface/trl/issues"

# Set up headers for authentication
headers = {"Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json"}

# Make the request
issues = get_full_response(issues_url, headers, params={"state": "all"})

issues_dataset_dict = {
    "number": [],
    "title": [],
    "user": [],
    "state": [],
    "created_at": [],
    "closed_at": [],
    "comments_count": [],
}
comments_dataset_dict = {
    "user": [],
    "created_at": [],
    "body": [],
    "issue_number": [],
}
for issue in tqdm(issues):
    # Extract relevant information
    issue_number = issue["number"]
    title = issue["title"]
    created_at = datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ")
    comments_count = issue["comments"]
    comments_url = issue["comments_url"]

    comments = get_full_response(comments_url, headers=headers)
    for comment in comments:
        comments_dataset_dict["user"].append(comment["user"]["login"])
        comments_dataset_dict["created_at"].append(datetime.strptime(comment["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
        comments_dataset_dict["body"].append(comment["body"])
        comments_dataset_dict["issue_number"].append(issue_number)

    issues_dataset_dict["number"].append(issue_number)
    issues_dataset_dict["title"].append(title)
    issues_dataset_dict["user"].append(issue["user"]["login"])
    issues_dataset_dict["state"].append(issue["state"])
    issues_dataset_dict["created_at"].append(datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
    issues_dataset_dict["closed_at"].append(datetime.strptime(issue["closed_at"], "%Y-%m-%dT%H:%M:%SZ") if issue["closed_at"] else None)
    issues_dataset_dict["comments_count"].append(comments_count)

issues_dataset = Dataset.from_dict(issues_dataset_dict)
comments_dataset = Dataset.from_dict(comments_dataset_dict)

issues_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issues")
comments_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issue_comments")