Spaces:
Runtime error
Runtime error
File size: 3,343 Bytes
bb0304e 8c49cb6 df66f6e 314f91a b1a1395 8c49cb6 3dfaf22 e120f5c 3dfaf22 e6df3fc dd3eaa3 3693dbd b1a1395 8c49cb6 fe9ad5d e6df3fc b1a1395 e120f5c e6df3fc e120f5c bb0304e 8c49cb6 e120f5c 8c49cb6 b1a1395 8c49cb6 adb0416 bb0304e 8c49cb6 05b8690 8c49cb6 05b8690 8c49cb6 eed1ccd 8c49cb6 bb0304e 8c49cb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
# populate.py
import json
import os
import pandas as pd
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
print("before get_raw_eval_results") # blz
raw_data = get_raw_eval_results(results_path, requests_path)
print(f"get_raw_eval_results {results_path} --- {requests_path}") # blz
#print(f"after get_raw_eval_results {raw_data}") # blz
all_data_json = [v.to_dict() for v in raw_data]
#print(f"all_data_json {all_data_json}") # blz
df = pd.DataFrame.from_records(all_data_json)
print(f"df {df}") # blz
# Print the name of the average field from AutoEvalColumn
print("Name of the average field in AutoEvalColumn:", AutoEvalColumn.average.name)
# Print DataFrame column names
print("DataFrame column names:", df.columns)
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
df = df[cols].round(decimals=2)
print("after df things") # blz
# filter out if any of the benchmarks have not been produced
df = df[has_no_nan_values(df, benchmark_cols)]
return raw_data, df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
print(f"get_evaluation_queue_df: Reading evaluation queue from {save_path}")
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
with open(file_path) as fp:
data = json.load(fp)
#print(f"get_evaluation_queue_df: Processing file {entry}")
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
elif ".md" not in entry:
# this is a folder
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
with open(file_path) as fp:
data = json.load(fp)
#print(f"get_evaluation_queue_df: Processing file {sub_entry} in folder {entry}")
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
print("get_evaluation_queue_df: Evaluation dataframes created.")
return df_finished[cols], df_running[cols], df_pending[cols]
|