File size: 7,831 Bytes
27411b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
from collections import defaultdict

import gradio as gr
import numpy as np
import pandas as pd
from scipy import stats
from viewer.literals import BASLINE_RUN_NAME
import tqdm as progress
from viewer.results import NormalizationMode, add_baseline_rows

from viewer.utils import BASELINE_GROUPING_MODE, create_df_from_run_data, get_groupped_score, get_run_name_seed, RunData, is_aggregate_column, is_task_column, select_runs


def fast_kendall_tau_a(x, y):
    x = np.array(x)
    y = np.array(y)
    n = len(x)
    if n <= 1:
        return 0.0

    # Create matrices of pairwise differences
    x_diff = x[:, np.newaxis] - x
    y_diff = y[:, np.newaxis] - y

    # Calculate concordant and discordant pairs
    concordant = np.sum((x_diff * y_diff > 0) & (np.triu(np.ones((n, n)), k=1) == 1))
    discordant = np.sum((x_diff * y_diff <= 0) & (np.triu(np.ones((n, n)), k=1) == 1))

    # Calculate tau-a
    tau_a = (concordant - discordant) / (n * (n - 1) / 2)

    return tau_a


def calculate_statistics(df: pd.DataFrame, std_dev_run_name: str, column_name: str,
                         score_s: int,  score_window: int, baseline_run: str = BASLINE_RUN_NAME) -> dict[str, float]:
    if len(df) == 0 or not (is_task_column(column_name) or is_aggregate_column(column_name)) or column_name not in df.columns:
        return defaultdict(lambda: np.nan)
    
    # drop baseline_score and other columns
    baseline_df = select_runs(df, runs_to_include=[baseline_run])

    df = select_runs(df, runs_to_exclude=[baseline_run])
    df = df[['runname', 'seed', 'steps', column_name]]
    
    # mean over seeds
    mean_over_seeds = df.groupby(['runname', 'steps'], as_index=False)[column_name].mean()
    pivot_df = mean_over_seeds.pivot(index='steps', columns='runname', values=column_name).interpolate(method='linear')

    # 1. monotonicity: Spearman Correlation
    spearman_corrs = [stats.spearmanr(pivot_df[col].index, pivot_df[col], nan_policy="omit")[0] for col in
                      pivot_df.columns if len(np.unique(pivot_df[col])) > 1]
    avg_spearman = np.mean([c for c in spearman_corrs if not np.isnan(c)]) if not all(
        map(np.isnan, spearman_corrs)) else np.nan

    # 2. ordering consistency: Average Kendall Tau-a
    last_half = int(len(pivot_df.index) / 2)
    step_pairs = list(zip(pivot_df.index[:-1], pivot_df.index[1:]))[last_half:]
    kendall_tau_a_values = [fast_kendall_tau_a(pivot_df.loc[s1], pivot_df.loc[s2]) for s1, s2 in step_pairs]
    avg_kendall_tau_a = np.mean(kendall_tau_a_values) if kendall_tau_a_values else np.nan

    # 3. variability: Std dev
    mean_std, min_std, min_std_step, max_std, max_std_step, snr, max_n_std  = (
        np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
    if std_dev_run_name and std_dev_run_name != "-":
        grouped_std_runs = df[(df['runname'] == std_dev_run_name) & (df['steps'] != 0)] \
            .groupby('steps')[column_name]
            
        means = grouped_std_runs.mean()
        stds = grouped_std_runs.std()

        window_steps = means.index[means.index <= score_s][-score_window:]
        pivot_df_window = pivot_df.loc[window_steps]

        stds_window = stds[window_steps]
            
        if not stds_window.empty:
            max_std, max_std_step = stds_window.max(), stds_window.index[stds_window.argmax()]
            min_std, min_std_step = stds_window.min(), stds_window.index[stds_window.argmin()]
            mean_std = stds_window.mean()
            
        
            
        score = pivot_df_window.loc[score_s]

        full_mean_std = stds.mean()
        if full_mean_std != 0.0 and full_mean_std != np.nan:
            snr = score.mean() / full_mean_std

        if not baseline_df.empty and mean_std != np.nan and mean_std != 0:
            # 4. randomness
            random_baseline_scores = baseline_df.set_index("steps")[column_name].reindex(
                pd.concat([baseline_df["steps"], pivot_df_window.index.to_series()]).unique().sort()
            ).interpolate(method='linear')

            baseline_score = random_baseline_scores.loc[score_s]
            max_n_std = (score - baseline_score).max() / mean_std

    # # 2. Standard Error, Mean, and Max
    # summary_stats = [(df[col].std() / np.sqrt(df[col].count()),
    #                   df[col].mean(),
    #                   df[col].max()) for col in df.columns if df[col].count() > 1]
    # avg_stderr, avg_mean, max_max = np.nan, np.nan, np.nan
    # if summary_stats:
    #     avg_stderr = np.mean([s for s, _, _ in summary_stats])
    #     avg_mean = np.mean([m for _, m, _ in summary_stats])
    #     max_max = np.max([mx for _, _, mx in summary_stats])

    return {
        "avg_spearman": float(avg_spearman),
        "avg_kendall_tau_a": float(avg_kendall_tau_a),
        "max_std": float(max_std),
        "max_std_step": float(max_std_step),
        "min_std": float(min_std),
        "min_std_step": float(min_std_step),
        "mean_std": float(mean_std),
        "avg_snr": float(snr),
        "max_n_std": float(max_n_std)
    }


def format_statistics(stats: dict[str, float]) -> tuple[str, str, str, str]:
    if not stats:
        stats = defaultdict(lambda: np.nan)
    monotonicity_md_text = f"Average=**{stats['avg_spearman']:.3f}**"
    variability_md_text = f"""SNR=**{stats['avg_snr']:.2f}**; Mean std_dev=**{stats['mean_std']:.5f}**;
    Min std_dev=**{stats['min_std']:.3f} (step {stats['min_std_step']})**; 
    Max std_dev=**{stats['max_std']:.3f} (step {stats['max_std_step']})**"""
    randomness_md_text = (f"Maximum distance of final checkpoint to random baseline="
                          f"**{stats['max_n_std']:.2f}** std_devs")
    ordering_md_text = (f"Average Kendall-A Tau between second half of consecutive steps="
                        f"**{stats['avg_kendall_tau_a']:.3f}**")

    return monotonicity_md_text, variability_md_text, randomness_md_text, ordering_md_text


def smooth_tasks(df: pd.DataFrame, rolling_window: int) -> pd.DataFrame:
    if df.empty or "steps" not in df.columns:
        return df
    
    
    task_or_agg_columns = [c for c in df.columns if is_aggregate_column(c) or is_task_column(c)]
    if rolling_window > 0:
        smoothed_df = df.sort_values(by=["runname", "seed", "steps"])
        smoothed_df = smoothed_df.groupby(['runname', 'seed'])[task_or_agg_columns].rolling(rolling_window, min_periods=1).mean().reset_index(level=[0,1])
        smoothed_df["steps"] = df["steps"]
        df = smoothed_df
    return df

def generate_and_export_stats(run_data: RunData, std_dev_run_name: str, baseline_runs: list[str], baseline_mode: BASELINE_GROUPING_MODE, score_s: int, baseline_window: int) -> gr.File:
    if not run_data:
        return gr.File(value=None, visible=False)

    stats_data: list[dict] = []

    task_metrics = set(f"{task_info.name}/{metric}" for run in run_data for task_info in run.tasks
                       for metric, value in task_info.metrics.items())

    df = create_df_from_run_data(run_data)
    
    
    df = add_baseline_rows(df, baseline_runs, baseline_mode)

    df = select_runs(df, runs_to_exclude=baseline_runs)

    df = smooth_tasks(df, 3)
    
    for column in list(progress.tqdm(task_metrics)):
        if not is_task_column(column):
            continue
        
        # Calculate statistics
        task_stats = calculate_statistics(df, std_dev_run_name, column, score_s, baseline_window)

        task, metric = column.split("/")

        # Add to stats_data
        stats_data.append({
            "task": task,
            "metric": metric.removesuffix("|0/"),
            **task_stats
        })

    # Create DataFrame and export to CSV
    stats_df = pd.DataFrame(stats_data)
    stats_df.to_csv("statistics.csv", index=False)

    return gr.File(value="statistics.csv", visible=True)