admin / viewer /stats.py
hynky's picture
hynky HF staff
add viewer + update plots
27411b0
from collections import defaultdict
import gradio as gr
import numpy as np
import pandas as pd
from scipy import stats
from viewer.literals import BASLINE_RUN_NAME
import tqdm as progress
from viewer.results import NormalizationMode, add_baseline_rows
from viewer.utils import BASELINE_GROUPING_MODE, create_df_from_run_data, get_groupped_score, get_run_name_seed, RunData, is_aggregate_column, is_task_column, select_runs
def fast_kendall_tau_a(x, y):
x = np.array(x)
y = np.array(y)
n = len(x)
if n <= 1:
return 0.0
# Create matrices of pairwise differences
x_diff = x[:, np.newaxis] - x
y_diff = y[:, np.newaxis] - y
# Calculate concordant and discordant pairs
concordant = np.sum((x_diff * y_diff > 0) & (np.triu(np.ones((n, n)), k=1) == 1))
discordant = np.sum((x_diff * y_diff <= 0) & (np.triu(np.ones((n, n)), k=1) == 1))
# Calculate tau-a
tau_a = (concordant - discordant) / (n * (n - 1) / 2)
return tau_a
def calculate_statistics(df: pd.DataFrame, std_dev_run_name: str, column_name: str,
score_s: int, score_window: int, baseline_run: str = BASLINE_RUN_NAME) -> dict[str, float]:
if len(df) == 0 or not (is_task_column(column_name) or is_aggregate_column(column_name)) or column_name not in df.columns:
return defaultdict(lambda: np.nan)
# drop baseline_score and other columns
baseline_df = select_runs(df, runs_to_include=[baseline_run])
df = select_runs(df, runs_to_exclude=[baseline_run])
df = df[['runname', 'seed', 'steps', column_name]]
# mean over seeds
mean_over_seeds = df.groupby(['runname', 'steps'], as_index=False)[column_name].mean()
pivot_df = mean_over_seeds.pivot(index='steps', columns='runname', values=column_name).interpolate(method='linear')
# 1. monotonicity: Spearman Correlation
spearman_corrs = [stats.spearmanr(pivot_df[col].index, pivot_df[col], nan_policy="omit")[0] for col in
pivot_df.columns if len(np.unique(pivot_df[col])) > 1]
avg_spearman = np.mean([c for c in spearman_corrs if not np.isnan(c)]) if not all(
map(np.isnan, spearman_corrs)) else np.nan
# 2. ordering consistency: Average Kendall Tau-a
last_half = int(len(pivot_df.index) / 2)
step_pairs = list(zip(pivot_df.index[:-1], pivot_df.index[1:]))[last_half:]
kendall_tau_a_values = [fast_kendall_tau_a(pivot_df.loc[s1], pivot_df.loc[s2]) for s1, s2 in step_pairs]
avg_kendall_tau_a = np.mean(kendall_tau_a_values) if kendall_tau_a_values else np.nan
# 3. variability: Std dev
mean_std, min_std, min_std_step, max_std, max_std_step, snr, max_n_std = (
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
if std_dev_run_name and std_dev_run_name != "-":
grouped_std_runs = df[(df['runname'] == std_dev_run_name) & (df['steps'] != 0)] \
.groupby('steps')[column_name]
means = grouped_std_runs.mean()
stds = grouped_std_runs.std()
window_steps = means.index[means.index <= score_s][-score_window:]
pivot_df_window = pivot_df.loc[window_steps]
stds_window = stds[window_steps]
if not stds_window.empty:
max_std, max_std_step = stds_window.max(), stds_window.index[stds_window.argmax()]
min_std, min_std_step = stds_window.min(), stds_window.index[stds_window.argmin()]
mean_std = stds_window.mean()
score = pivot_df_window.loc[score_s]
full_mean_std = stds.mean()
if full_mean_std != 0.0 and full_mean_std != np.nan:
snr = score.mean() / full_mean_std
if not baseline_df.empty and mean_std != np.nan and mean_std != 0:
# 4. randomness
random_baseline_scores = baseline_df.set_index("steps")[column_name].reindex(
pd.concat([baseline_df["steps"], pivot_df_window.index.to_series()]).unique().sort()
).interpolate(method='linear')
baseline_score = random_baseline_scores.loc[score_s]
max_n_std = (score - baseline_score).max() / mean_std
# # 2. Standard Error, Mean, and Max
# summary_stats = [(df[col].std() / np.sqrt(df[col].count()),
# df[col].mean(),
# df[col].max()) for col in df.columns if df[col].count() > 1]
# avg_stderr, avg_mean, max_max = np.nan, np.nan, np.nan
# if summary_stats:
# avg_stderr = np.mean([s for s, _, _ in summary_stats])
# avg_mean = np.mean([m for _, m, _ in summary_stats])
# max_max = np.max([mx for _, _, mx in summary_stats])
return {
"avg_spearman": float(avg_spearman),
"avg_kendall_tau_a": float(avg_kendall_tau_a),
"max_std": float(max_std),
"max_std_step": float(max_std_step),
"min_std": float(min_std),
"min_std_step": float(min_std_step),
"mean_std": float(mean_std),
"avg_snr": float(snr),
"max_n_std": float(max_n_std)
}
def format_statistics(stats: dict[str, float]) -> tuple[str, str, str, str]:
if not stats:
stats = defaultdict(lambda: np.nan)
monotonicity_md_text = f"Average=**{stats['avg_spearman']:.3f}**"
variability_md_text = f"""SNR=**{stats['avg_snr']:.2f}**; Mean std_dev=**{stats['mean_std']:.5f}**;
Min std_dev=**{stats['min_std']:.3f} (step {stats['min_std_step']})**;
Max std_dev=**{stats['max_std']:.3f} (step {stats['max_std_step']})**"""
randomness_md_text = (f"Maximum distance of final checkpoint to random baseline="
f"**{stats['max_n_std']:.2f}** std_devs")
ordering_md_text = (f"Average Kendall-A Tau between second half of consecutive steps="
f"**{stats['avg_kendall_tau_a']:.3f}**")
return monotonicity_md_text, variability_md_text, randomness_md_text, ordering_md_text
def smooth_tasks(df: pd.DataFrame, rolling_window: int) -> pd.DataFrame:
if df.empty or "steps" not in df.columns:
return df
task_or_agg_columns = [c for c in df.columns if is_aggregate_column(c) or is_task_column(c)]
if rolling_window > 0:
smoothed_df = df.sort_values(by=["runname", "seed", "steps"])
smoothed_df = smoothed_df.groupby(['runname', 'seed'])[task_or_agg_columns].rolling(rolling_window, min_periods=1).mean().reset_index(level=[0,1])
smoothed_df["steps"] = df["steps"]
df = smoothed_df
return df
def generate_and_export_stats(run_data: RunData, std_dev_run_name: str, baseline_runs: list[str], baseline_mode: BASELINE_GROUPING_MODE, score_s: int, baseline_window: int) -> gr.File:
if not run_data:
return gr.File(value=None, visible=False)
stats_data: list[dict] = []
task_metrics = set(f"{task_info.name}/{metric}" for run in run_data for task_info in run.tasks
for metric, value in task_info.metrics.items())
df = create_df_from_run_data(run_data)
df = add_baseline_rows(df, baseline_runs, baseline_mode)
df = select_runs(df, runs_to_exclude=baseline_runs)
df = smooth_tasks(df, 3)
for column in list(progress.tqdm(task_metrics)):
if not is_task_column(column):
continue
# Calculate statistics
task_stats = calculate_statistics(df, std_dev_run_name, column, score_s, baseline_window)
task, metric = column.split("/")
# Add to stats_data
stats_data.append({
"task": task,
"metric": metric.removesuffix("|0/"),
**task_stats
})
# Create DataFrame and export to CSV
stats_df = pd.DataFrame(stats_data)
stats_df.to_csv("statistics.csv", index=False)
return gr.File(value="statistics.csv", visible=True)