Aaron Mueller commited on
Commit
59a9012
·
1 Parent(s): 891a1ea

remove track

Browse files
Files changed (3) hide show
  1. src/envs.py +1 -1
  2. src/leaderboard/read_evals.py +1 -1
  3. src/populate.py +2 -2
src/envs.py CHANGED
@@ -22,4 +22,4 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
25
- API = HfApi(token=TOKEN)
 
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
25
+ API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py CHANGED
@@ -134,7 +134,7 @@ def get_request_file_for_model(requests_path, model_name, precision):
134
  return request_file
135
 
136
 
137
- def get_raw_eval_results(results_path: str, requests_path: str, track: str) -> list[EvalResult]:
138
  """From the path of the results folder root, extract all needed info for results"""
139
  model_result_filepaths = []
140
 
 
134
  return request_file
135
 
136
 
137
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
138
  """From the path of the results folder root, extract all needed info for results"""
139
  model_result_filepaths = []
140
 
src/populate.py CHANGED
@@ -10,7 +10,7 @@ from src.leaderboard.read_evals import get_raw_eval_results
10
 
11
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path, track)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
  df = pd.DataFrame.from_records(all_data_json)
@@ -54,4 +54,4 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
54
  df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
55
  df_running = pd.DataFrame.from_records(running_list, columns=cols)
56
  df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
57
- return df_finished[cols], df_running[cols], df_pending[cols]
 
10
 
11
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
+ raw_data = get_raw_eval_results(results_path, requests_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
  df = pd.DataFrame.from_records(all_data_json)
 
54
  df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
55
  df_running = pd.DataFrame.from_records(running_list, columns=cols)
56
  df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
57
+ return df_finished[cols], df_running[cols], df_pending[cols]