Spaces:
Running
Running
import ast | |
from collections import defaultdict | |
from functools import partial | |
import itertools | |
import os | |
import re | |
from concurrent.futures import ThreadPoolExecutor | |
import numpy as np | |
from datetime import datetime | |
import gradio as gr | |
import pandas as pd | |
from datatrove.io import DataFolder | |
FALLBACK_TOKEN_NAME = "HF_TOKEN" | |
def is_arary_like(x): | |
return isinstance(x, list) or isinstance(x, tuple) or isinstance(x, np.ndarray) | |
def get_task_type(df): | |
if all(isinstance(pred, str) for pred in df['predictions'].iloc[0]): | |
return "generative" | |
if all(is_arary_like(pred) and all(isinstance(item, float) for item in pred) for pred in df['predictions'].iloc[0]): | |
return "multiple_choice" | |
return "mixed" | |
def fix_df(df): | |
# For some reason some metrics and predictions are stored as strings | |
for col in ["predictions", "metrics", "choices", "gold", "gold_index"]: | |
if col in df.columns: | |
df[col] = [ast.literal_eval(x) if isinstance(x, str) else x for x in df[col].values] | |
return df | |
def get_run_name_seed(run_name): | |
if "-seed-" not in run_name: | |
return run_name, 5 | |
run_name, seed = run_name.split("-seed-") | |
return run_name, int(seed) | |
def fetch_repo_structure(results_uri, oauth_token: gr.OAuthToken | None = None): | |
token = os.environ.get(FALLBACK_TOKEN_NAME) | |
if oauth_token: | |
token = oauth_token.token | |
data_folder = DataFolder(results_uri, token=token) | |
runs = [f.removeprefix("details/") for f in data_folder.list_files("details", recursive=False, include_directories=True) if f != "details"] | |
if not runs: | |
return {}, gr.update(choices=[], value=None) | |
def process_run(run): | |
run_files = [f.removeprefix(f"details/{run}/") for f in data_folder.list_files(f"details/{run}", recursive=False, include_directories=True) if f != f"details/{run}"] | |
return run, run_files | |
with ThreadPoolExecutor() as executor: | |
results = list(executor.map(process_run, runs)) | |
checkpoints_dict = dict(results) | |
return checkpoints_dict, gr.update(choices=list(checkpoints_dict), value=None) | |
def update_checkpoints(selected_runs, checkpoints): | |
if not selected_runs: | |
return gr.update(choices=[], value=None) | |
common_checkpoints = set(checkpoints[selected_runs[0]]) | |
for run in selected_runs[1:]: | |
common_checkpoints.intersection_update(set(checkpoints[run])) | |
common_checkpoints = sorted(list(common_checkpoints)) | |
return gr.update(choices=common_checkpoints, value=common_checkpoints[0] if common_checkpoints else None) | |
def select_runs_by_regex(runs, current_selected, regex_to_select): | |
comp_re = re.compile(regex_to_select) | |
return list(sorted(set((current_selected if current_selected else []) + | |
[run for run in runs if comp_re.fullmatch(run)]))) | |
def select_runs_by_language(runs, current_selected, language): | |
if language: | |
return select_runs_by_regex(runs, current_selected, f".*-{language}-.*") | |
return current_selected | |
def fetch_available_tasks(results_uri, runs_to_fetch, checkpoint) -> dict[str, dict[str, str]]: | |
token = os.environ.get(FALLBACK_TOKEN_NAME) | |
data_folder = DataFolder(results_uri, token=token) | |
all_tasks = defaultdict(lambda: defaultdict(dict)) | |
for run in runs_to_fetch: | |
try: | |
files = data_folder.list_files(f"details/{run}/{checkpoint}", recursive=False) | |
parquet_files = [f.split("/")[-1] for f in files if f.endswith('.parquet')] | |
for full_filename in parquet_files: | |
task_name, date_str = full_filename.replace('.parquet', '').rsplit('_', 1) | |
date = datetime.strptime(date_str, '%Y-%m-%dT%H-%M-%S.%f') | |
if run not in all_tasks[task_name] or date > all_tasks[task_name][run]['date']: | |
all_tasks[task_name][run] = {'filename': full_filename, 'date': date} | |
except FileNotFoundError: | |
print(f"Checkpoint not found for run: {run}") | |
print(all_tasks) | |
available_tasks = { | |
task: {run: info['filename'] for run, info in runs.items()} | |
for task, runs in all_tasks.items() | |
if set(runs.keys()) == set(runs_to_fetch) | |
} | |
return available_tasks | |
def fetch_run_results(results_uri, runs_to_fetch, checkpoint, | |
oauth_token: gr.OAuthToken | None = None, progress=gr.Progress()): | |
task_runs_dict = fetch_available_tasks(results_uri, runs_to_fetch, checkpoint) | |
task_names = list(task_runs_dict.keys()) | |
return gr.update(choices=task_names, value=task_names[0] if task_names else None), task_runs_dict | |
def render_table(df, selected_runs, metric_names): | |
if df is None or not selected_runs or not metric_names: | |
return None, "0" | |
kept_metrics = [f"metric_{metric_name}_{run_name}" for run_name in selected_runs for metric_name in metric_names] | |
other_metrics = [col for col in df.columns if col.startswith(f"metric_") and col not in kept_metrics] | |
df = df.drop(columns=other_metrics) | |
# widths = get_column_widths(df) | |
df = shorten_column_names(df, selected_runs, metric_names) | |
# Sample 100 | |
n_samples = len(df) | |
df = df.sample(n=min(100, len(df)), random_state=42) | |
return df, n_samples | |
def get_column_widths(df): | |
column_widths = [] | |
for col in df.columns: | |
if col == "full_prompt": | |
column_widths.append("300px") | |
elif col in ["choices", "gold"]: | |
column_widths.append("250px") | |
elif col.startswith("metric_"): | |
column_widths.append("50px") | |
else: | |
column_widths.append("200px") # Default width for other columns | |
return column_widths | |
def shorten_column_names(df, run_names: list[str], metric_names: list[str]): | |
""" | |
Turns metric columns (metric_{metric}_{run_name}) into {metric}_i | |
Turns generation_{run_name} into generation_i | |
""" | |
# Handle metric columns | |
# Aggregate columns to rename | |
columns_to_rename = {} | |
for idx, run_name in enumerate(run_names): | |
for metric_name in metric_names: | |
original_metric_column = f"metric_{metric_name}_{run_name}" | |
if original_metric_column in df.columns: | |
columns_to_rename[original_metric_column] = f"{metric_name}_{idx}" | |
original_generation_column = f"generation_{run_name}" | |
if original_generation_column in df.columns: | |
columns_to_rename[original_generation_column] = f"generation_{idx}" | |
# Rename columns in a single operation | |
df = df.rename(columns=columns_to_rename) | |
return df | |
def load_task_data(results_uri, runs_to_fetch, checkpoint, task_name, tasks_files, progress=gr.Progress()): | |
token = os.environ.get(FALLBACK_TOKEN_NAME) | |
if not runs_to_fetch or not task_name: | |
return None, None, None | |
print(runs_to_fetch) | |
data_folder = DataFolder(f"filecache::{results_uri}", token=token, cache_storage="./results-cache") | |
print(tasks_files) | |
def fetch_run_file(run_to_fetch): | |
file_path = f"details/{run_to_fetch}/{checkpoint}/{tasks_files[task_name][run_to_fetch]}" | |
try: | |
with data_folder.open(file_path, "rb") as f: | |
df = pd.read_parquet(f) | |
return df, run_to_fetch | |
except FileNotFoundError: | |
print(f"File not found: {tasks_files[task_name][run_to_fetch]}") | |
return None, run_to_fetch | |
with ThreadPoolExecutor() as pool: | |
results = list(progress.tqdm(pool.map(fetch_run_file, runs_to_fetch), total=len(runs_to_fetch), | |
desc="Fetching run data...")) | |
dfs = [fix_df(df) for df, _ in results if df is not None] | |
run_names = [run for _, run in results if run is not None] | |
if not dfs: | |
return None, None, gr.update(choices=[], value=None) | |
task_type = get_task_type(dfs[0]) | |
def prepare_df(df, run_name, task_type): | |
def get_choice_predictions(df, task_type): | |
# For some evals it's string for other it's list | |
predictions = df['predictions'] | |
if task_type == "generative": | |
return predictions | |
if task_type == "multiple_choice": | |
n_choices = len(df['choices']) | |
return [pred[0] for pred in predictions[:n_choices]] | |
if task_type == "mixed": | |
return predictions[0] | |
return predictions | |
generative_columns = { | |
f"generation_{run_name}": df.apply(partial(get_choice_predictions, task_type=task_type), axis=1) | |
} if task_type == "generative" or task_type == "mixed" else {} | |
prepared_df = pd.DataFrame({ | |
'full_prompt': df['full_prompt'], | |
**generative_columns, | |
}) | |
# For some reason some metrics are stored as strings | |
metrics = df['metrics'] | |
# Assume all metrics are the same | |
for metric_key in metrics[0].keys(): | |
prepared_df[f'metric_{metric_key}_{run_name}'] = [metric[metric_key] for metric in metrics] | |
return prepared_df.set_index('full_prompt') | |
def get_gold_label(df, task_type): | |
if task_type == "generative": | |
return df['gold'] | |
return df['gold_index'] | |
# Prepare the first DataFrame with choices and gold | |
combined_df = dfs[0][['full_prompt']].set_index('full_prompt') | |
if task_type in ["multiple_choice", "mixed"]: | |
combined_df["choices"] = dfs[0]["choices"].values | |
combined_df['gold'] = dfs[0].apply(lambda row: get_gold_label(row, task_type), axis=1).values | |
# Join all prepared DataFrames | |
for df, run_name in zip(dfs, run_names): | |
prepared_df = prepare_df(df, run_name, task_type) | |
combined_df = combined_df.join(prepared_df, how='outer') | |
available_metrics = list(set("_".join(col.split('_')[1:-1]) for col in combined_df.columns if col.startswith("metric_"))) | |
combined_df = combined_df.reset_index() | |
chosen_metrics = available_metrics[:1] | |
return combined_df, gr.update(choices=available_metrics, value=chosen_metrics) | |
with gr.Blocks() as demo: | |
runs_checkpoints = gr.State({}) | |
results_df_full = gr.State(None) | |
tasks_files = gr.State({}) | |
login_button = gr.LoginButton(visible=False) | |
results_uri = gr.Textbox(label="Results URI", value="s3://fineweb-multilingual-v1/evals/test/", visible=True) | |
with gr.Column(): | |
gr.Markdown("# FineWeb experiments results explorer") | |
with gr.Row(): | |
with gr.Column(): | |
select_by_regex_text = gr.Textbox(label="Regex to select runs", | |
value="ind_minhash(-CC-MAIN-|_)\\d{4}-\\d{2}-seed.*") | |
select_by_regex_button = gr.Button("Select matching runs") | |
with gr.Column(): | |
select_by_language = gr.Dropdown(choices=["ar", "fr", "ru", "hi", "th", "tr", "zh", "sw", "te"], | |
interactive=True, label="Select by language", | |
info="Choose a language to prefill the regex") | |
selected_runs = gr.Dropdown(choices=[], interactive=True, multiselect=True, label="Selected runs") | |
checkpoint = gr.Dropdown(choices=[], interactive=True, label="Checkpoint") | |
fetch_res = gr.Button("Fetch results") | |
task_name = gr.Dropdown(choices=[], interactive=True, label="Task name") | |
metric_names = gr.Dropdown(choices=[], interactive=True, multiselect=True, label="Metric") | |
results_df = gr.Dataframe(interactive=False, wrap=True) | |
with gr.Row(): | |
with gr.Column(): | |
num_samples = gr.Text(interactive=False, label="# Samples") | |
# Run selection | |
gr.on( | |
triggers=[results_uri.change], | |
fn=fetch_repo_structure, inputs=[results_uri], outputs=[runs_checkpoints, selected_runs], | |
) | |
gr.on( | |
triggers=[select_by_regex_button.click], | |
fn=select_runs_by_regex, | |
inputs=[runs_checkpoints, selected_runs, select_by_regex_text], outputs=[selected_runs] | |
) | |
gr.on( | |
triggers=[select_by_language.change], | |
fn=select_runs_by_language, | |
inputs=[runs_checkpoints, selected_runs, select_by_language], outputs=[selected_runs] | |
) | |
# Update checkpoints based on selected runs | |
gr.on( | |
triggers=[selected_runs.change], | |
fn=update_checkpoints, | |
inputs=[selected_runs, runs_checkpoints], | |
outputs=[checkpoint] | |
) | |
# Fetch available tasks | |
gr.on( | |
triggers=[fetch_res.click], | |
fn=fetch_run_results, | |
inputs=[results_uri, selected_runs, checkpoint], | |
outputs=[task_name, tasks_files] | |
).then( | |
fn=load_task_data, | |
inputs=[results_uri, selected_runs, checkpoint, task_name, tasks_files], | |
outputs=[results_df_full, metric_names] | |
).then( | |
fn=render_table, | |
inputs=[results_df_full, selected_runs, metric_names], | |
outputs=[results_df, num_samples] | |
) | |
# Update results when task name or metric changes | |
gr.on( | |
triggers=[task_name.input], | |
fn=load_task_data, | |
inputs=[results_uri, selected_runs, checkpoint, task_name, tasks_files], | |
outputs=[results_df_full, metric_names] | |
).then( | |
fn=render_table, | |
inputs=[results_df_full, selected_runs, metric_names], | |
outputs=[results_df, num_samples] | |
) | |
gr.on( | |
triggers=[metric_names.input], | |
fn=render_table, | |
inputs=[results_df_full, selected_runs, metric_names], | |
outputs=[results_df, num_samples] | |
) | |
demo.load(fn=fetch_repo_structure, inputs=[results_uri], outputs=[runs_checkpoints, selected_runs]) | |
demo.launch() |