admin / viewer /results.py
hynky's picture
hynky HF staff
add viewer + update plots
27411b0
import re
import itertools
import json
import os
from concurrent.futures import ThreadPoolExecutor
from typing import get_args, Literal, List
import pandas as pd
import time
from collections import defaultdict, Counter
from datetime import datetime
import gradio as gr
from huggingface_hub import cached_assets_path
from viewer.agg_score_metrics import agg_score_metrics_dict_prob, custom_task_aggregate_groups, agg_score_metrics
from viewer.literals import BASLINE_RUN_NAME, CHECK_MISSING_DATAPOINTS_BUTTON_CLOSE_LABEL, \
CHECK_MISSING_DATAPOINTS_BUTTON_LABEL, \
FALLBACK_TOKEN_NAME, REFERENCE_RUNS
from viewer.utils import BASELINE_GROUPING_MODE, create_df_from_run_data, get_run_name_seed, RunInfo, TaskInfo, get_groupped_score, RunData, is_aggregate_column, is_baseline_run, is_reference_run, is_task_column, rescale_scores, select_runs, z_score_normalize
from datatrove.io import DataFolder
from viewer.task_type_mapping import get_task_type, TASK_TYPE
import tqdm as progress
NormalizationMode = Literal["No adjustment", "Rescale", "Z-norm"]
def fetch_run_results(results_uri, runs_to_fetch, steps_to_fetch,
oauth_token: gr.OAuthToken | None = None):
token = os.environ.get(FALLBACK_TOKEN_NAME)
if oauth_token:
token = oauth_token.token
if not runs_to_fetch:
return None, None
steps_to_fetch_list = steps_to_fetch.split(",")
data_folder = DataFolder(results_uri, token=token)
def fetch_run_files(run_to_fetch):
def filename_to_steps_timestamp(fn):
step, ts = fn.split("/results_")
dt = datetime.strptime(ts.split(".")[0], "%Y-%m-%dT%H-%M-%S")
return int(step), dt
run_path = f"results/{run_to_fetch}"
try:
eval_files = [f for f in data_folder.list_files(run_path, recursive=True)]
except FileNotFoundError:
return []
# Group files by step
step_files = defaultdict(list)
for fn in eval_files:
steps, ts = filename_to_steps_timestamp(os.path.relpath(fn, run_path))
step_files[steps].append((ts, fn))
# Sort files within each step by timestamp (newest first)
for step in step_files:
step_files[step].sort(reverse=True) # tuples are sorted element by element by default
# (run, steps, file_paths_in_repo)
results = []
for step, files in step_files.items():
if any(step_element_match(step, step_el) for step_el in steps_to_fetch_list):
results.append((run_to_fetch, step, files))
return results
def get_file_with_retry(data_folder: DataFolder, filename: str):
save_path = os.path.join(cached_assets_path(library_name="results-viewer",
namespace=data_folder.path), filename)
if os.path.exists(save_path):
with open(save_path, "rb") as f:
return f.read()
wait = 1.5
max_retries = 20
for attempt in range(max_retries):
try:
with data_folder.open(filename, "rb") as f:
data = f.read()
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, "wb") as f:
f.write(data)
return data
except Exception as e:
print(f"Error downloading (attempt {attempt + 1}/{max_retries}): {e}")
if attempt == max_retries - 1:
raise e
time.sleep(max(wait ** attempt, 40))
return None
def hot_fix_task_name(task_name: str):
"""
This is a hot fix as Hynek inocrrectly named the average collumns
"""
if task_name.endswith(":_average"):
return task_name.replace(":_average", ":_average|0")
return task_name
def load_run_file(run_info: tuple[str, str, list[tuple[datetime, str]]]):
run_to_fetch, step, file_data = run_info
aggregated_data = {}
latest_timestamps = {}
for timestamp, result_file in file_data:
file_data = get_file_with_retry(data_folder, result_file)
if not file_data:
raise Exception(f"File {result_file} not found")
json_data = json.loads(file_data)
for task, res in json_data["results"].items():
if task not in latest_timestamps or timestamp > latest_timestamps[task]:
latest_timestamps[task] = timestamp
# The aggregated tassks don't contain hashes, we thus use dummy values not to cause conflict
hashes = json_data["summary_tasks"].get(task, {}).get("hashes") or {
"hash_examples": "",
"hash_full_prompts": "",
"hash_input_tokens": "",
"hash_cont_tokens": ""
}
aggregated_data[task] = {
"metrics": res,
"hashes": hashes,
"filename": result_file
}
runname, seed = get_run_name_seed(run_to_fetch)
return RunInfo(runname, seed, int(step),
[TaskInfo(res["filename"], hot_fix_task_name(task), res["metrics"], res["hashes"]) for task, res in
aggregated_data.items()])
with ThreadPoolExecutor() as pool:
run_files = list(itertools.chain.from_iterable(
progress.tqdm(pool.map(fetch_run_files, runs_to_fetch), total=len(runs_to_fetch),
desc="Fetching datafiles...")))
run_data = list(
progress.tqdm(pool.map(load_run_file, run_files), total=len(run_files), desc="Loading evals data..."))
return run_data, None
def filter_run_list_for_language(all_runs, language):
if not language:
return []
return [
x for x in all_runs if f"-{language}-" in x
]
def fetch_run_list(results_uri, oauth_token: gr.OAuthToken | None = None, language=None):
token = os.environ.get(FALLBACK_TOKEN_NAME)
if oauth_token:
token = oauth_token.token
data_folder = DataFolder(results_uri, token=token)
# Ignore the root directory
list_of_runs = [f.removeprefix("results/") for f in
data_folder.list_files(subdirectory="results", recursive=False, include_directories=True)
if f != "results"]
return list_of_runs, gr.update(choices=filter_run_list_for_language(list_of_runs, language), value=None)
def select_runs_by_regex(runs, current_selected, regex_to_select, lang=None):
if lang and "{lang}" in regex_to_select:
regex_to_select = regex_to_select.replace("{lang}", lang)
comp_re = re.compile(regex_to_select)
return list(sorted(set((current_selected if current_selected else []) +
[run for run in runs if comp_re.fullmatch(run)])))
def add_baseline_rows(df: pd.DataFrame, baseline_runs: list[str], grouping_mode: BASELINE_GROUPING_MODE, baseline_name: str = BASLINE_RUN_NAME) -> pd.DataFrame:
if len(baseline_runs) == 0:
return df
baseline_df = select_runs(df, runs_to_include=baseline_runs)
baseline_values = get_groupped_score(baseline_df, baseline_runs, grouping_mode)
# If baseline values doesn't contain all available steps, we interpolate the baseline values
unique_steps = df["steps"].unique()
baseline_values = baseline_values.set_index("steps").reindex(index=unique_steps).interpolate().reset_index()
runname, seed = get_run_name_seed(baseline_name)
baseline_values['runname'] = runname
baseline_values['seed'] = seed
# Add the baseline values to the dataframe
df = pd.concat([df, baseline_values], ignore_index=True)
return df
def normalize_scores(df: pd.DataFrame, normalization_runs: list[str], clip_scores: bool, normalization_mode: NormalizationMode, variability_window: int = 1):
if not normalization_runs or len(normalization_runs) == 0:
return df
cols_to_process = [col for col in df.columns if is_task_column(col) and not col.endswith('_stderr') and ":_average|" not in col]
if normalization_mode == "Z-norm":
df = z_score_normalize(df, normalization_runs, cols_to_process, variability_window)
elif normalization_mode == "Rescale":
df = rescale_scores(df, normalization_runs, cols_to_process)
if clip_scores:
df[cols_to_process] = df[cols_to_process].clip(lower=0)
return df
def recompute_averages(df: pd.DataFrame) -> pd.DataFrame:
average_columns = [col for col in df.columns if ":_average|" in col]
tasks_with_averages = set(x.split(":_average|")[0] for x in average_columns)
values_to_average = defaultdict(list)
for col in df.columns:
if (task := col.split(":")[0]) in tasks_with_averages and (task_subset := col.split(":")[1].split("|")[0]) and task_subset != "_average":
task_group = custom_task_aggregate_groups.get(task)
# Only add the task to average is it exists in the task group
if not task_group or task_subset in task_group:
values_to_average[(task, col.split("|")[-1])].append(col) # task name and metric
for (task, metric), cols in values_to_average.items():
df[f"{task}:_average|{metric}"] = df[cols].mean(axis=1)
return df
def select_runs_by_language(runs, current_selected, language, selected_cols, mcq_type):
new_runs = current_selected
if language:
if language in agg_score_metrics[mcq_type]:
selected_cols = agg_score_metrics[mcq_type][language]
new_runs = select_runs_by_regex(runs, current_selected, ".*gemma.*-(" + "|".join(REFERENCE_RUNS) + ")-{lang}-.*", language)
return gr.update(value=new_runs, choices=filter_run_list_for_language(runs, language)), gr.update(value=selected_cols if selected_cols else [], choices=selected_cols if selected_cols else [])
def step_element_match(step_to_check, step_element):
step_element = step_element.strip().replace(" ", "")
if "-" in step_element:
a, b = step_element.split("-")
c = None
if "%" in b:
b, c = b.split("%")
return (int(a) <= step_to_check <= int(b) and
(c is None or (step_to_check - int(a)) % int(c) == 0))
elif "%" in step_element:
return step_to_check % int(step_element[1:]) == 0
else:
return step_to_check == int(step_element)
def init_input_component_values(run_data: RunData, normalization_mode: NormalizationMode, language: str | None = None):
task_metrics = set(metric for run in run_data for task in run.tasks for metric in task.metrics.keys())
initial_value = "agg_score_metrics" if language and language in agg_score_metrics_dict_prob else \
("acc_norm" if "acc_norm" in task_metrics else next(iter(task_metrics), None))
runs = set(run.full_name for run in run_data)
baseline_runs = [run for run in runs if is_baseline_run(run)]
return (gr.update(choices=["agg_score_metrics"] + sorted(task_metrics, key=lambda m: (m.endswith("_stderr"), m)), value=[initial_value]),
init_input_normalization_runs(run_data, normalization_mode),
gr.update(value=[] if not baseline_runs else [baseline_runs[0]], choices=sorted(runs)))
def init_input_normalization_runs(runs: RunData, normalization_mode: NormalizationMode):
run_names = set([run.full_name for run in runs])
if normalization_mode == "Z-norm":
referene_runs = [run for run in run_names if is_reference_run(run)]
return gr.update(value=referene_runs, choices=sorted(run_names))
elif normalization_mode == "Rescale":
baseline_runs = [run for run in run_names if is_baseline_run(run)]
return gr.update(value=baseline_runs, choices=sorted(run_names))
else:
return gr.update(value=[], choices=[])
def init_std_dev_runs(runs, current_val):
# sets to the run with the highest count of seeds, that has at least 2 seeds. name does not include the seed
value = current_val or "-"
seed_counter = Counter()
for run in runs or []:
seed_counter[run.split("-seed-")[0]] += 1
if seed_counter[value] <= 1: # can only select runs with at least 2 seeds
top_val, top_count = seed_counter.most_common(n=1)[0] if seed_counter else (None, 0)
value = top_val if top_count > 1 else "-"
return gr.update(value=value, choices=["-"] + sorted([val for val, count in seed_counter.items() if count > 1]))
def update_dropdown_choices(selected_choices, possible_choices):
selected_choices = [choice for choice in selected_choices if choice in possible_choices]
return gr.update(choices=possible_choices, value=selected_choices)
def render_results_table(df: pd.DataFrame, metrics, task_avg, normalization_runs: list[str], baseline_runs: list[str], baseline_mode: BASELINE_GROUPING_MODE, clip_scores: bool,
normalization_mode: NormalizationMode, aggregate_score_cols: list[str], language: str, variability_window: int = 1, mcq_type = "prob"):
# if not run_data:
# return None, gr.update(), gr.update()
# df = create_df_from_run_data(run_data)
# Create baseline rows
df = add_baseline_rows(df, baseline_runs, baseline_mode)
# it's important to first normalize scores, so that the _averages can be recomputed
df = normalize_scores(df, normalization_runs=normalization_runs, clip_scores=clip_scores, normalization_mode=normalization_mode, variability_window=variability_window)
df = recompute_averages(df)
# Remove baseline runs from the main DataFrame
df = select_runs(df, runs_to_exclude=baseline_runs)
to_drop = []
for col in df.columns:
if is_task_column(col):
# part of the agg score metrics
if "agg_score_metrics" in metrics and language in agg_score_metrics[mcq_type] and col in agg_score_metrics[mcq_type][language]:
continue
task, metric = col.split("/")
# If no metrics are selected, show all metrics
if ((metric not in metrics and len(metrics) > 0) or
(":_average|" in task and "show averages" not in task_avg) or
("|" in task and ":_average|" not in task and ":" in task.split("|")[
1] and "show expanded" not in task_avg)):
to_drop.append(col)
if to_drop:
df = df.drop(columns=to_drop)
df.sort_values(by=["runname", "seed", "steps"], inplace=True)
df = update_agg_score(df, aggregate_score_cols)
aggregate_columns = [col for col in df.columns if is_aggregate_column(col)]
# All task metrics contains /metric
task_columns = [col for col in df.columns if is_task_column(col)]
return df, update_dropdown_choices(aggregate_score_cols, task_columns), gr.update(aggregate_columns[0], choices=aggregate_columns + task_columns)
def get_type_tasks_dict(tasks: list[str]) -> dict[TASK_TYPE, list[str]]:
"""
Creates a dictionary mapping task types to lists of task names.
Args:
tasks (list[str]): List of task names.
Returns:
dict[TASK_TYPE, list[str]]: Dictionary with task types as keys and lists of task names as values.
"""
task_type_dict: dict[TASK_TYPE, list[str]] = defaultdict(list)
for task in tasks:
task_type = get_task_type(task)
if not task_type:
raise ValueError(f"Task {task} has no task type")
task_type_dict[task_type].append(task)
return task_type_dict
def update_agg_score(df: pd.DataFrame, agg_score_columns: list[str]) -> pd.DataFrame:
if not agg_score_columns or df is None or "steps" not in df:
return df
new_df = df.copy()
cols_to_avg = [col for col in agg_score_columns if col in new_df.columns]
if cols_to_avg:
# Calculate task type aggregates
task_type_dict = get_type_tasks_dict(cols_to_avg)
# Create a dict from task_type_list
for task_type, tasks in task_type_dict.items():
new_df[f'agg_score_{task_type}'] = new_df[tasks].mean(axis=1)
# Calculate agg_score_task_macro
new_df['agg_score_macro'] = new_df[[f'agg_score_{task_type}' for task_type in task_type_dict.keys()]].mean(axis=1)
# Update agg_score
new_df['agg_score_micro'] = new_df[cols_to_avg].mean(axis=1)
return new_df
def export_results_csv(df):
df.to_csv("output.csv", index=False)
return gr.update(value="output.csv", visible=True)
def check_missing_datapoints(runs, steps_to_check, run_data: RunData, check_missing_checkpoints):
if not runs or check_missing_checkpoints == CHECK_MISSING_DATAPOINTS_BUTTON_CLOSE_LABEL or not run_data or not steps_to_check:
return gr.Json(value={}, visible=False), gr.Button(value=CHECK_MISSING_DATAPOINTS_BUTTON_LABEL)
max_step = max(run.step for run in run_data)
steps_set = set()
for step_elem in steps_to_check.split(","):
step_element = step_elem.strip().replace(" ", "")
if "-" in step_element:
a, b = step_element.split("-")
c = None
if "%" in b:
b, c = b.split("%")
steps_set.update(range(int(a), int(b) + 1, int(c) if c else 1))
elif "%" in step_element:
steps_set.update(range(0, max_step + 1, int(step_element[1:])))
else:
steps_set.add(int(step_element))
existing_evals = {(run.name, run.seed, run.step) for run in run_data}
missing_evals = defaultdict(dict)
for run in runs:
runname, seed = get_run_name_seed(run)
missing_steps = [
step for step in sorted(steps_set) if (runname, seed, step) not in existing_evals
]
if missing_steps:
missing_evals[runname][str(seed)] = missing_steps
return gr.Json(value=missing_evals, visible=True), gr.Button(value=CHECK_MISSING_DATAPOINTS_BUTTON_CLOSE_LABEL)