Omartificial-Intelligence-Space
commited on
update populate
Browse files- src/populate.py +4 -57
src/populate.py
CHANGED
@@ -3,10 +3,8 @@
|
|
3 |
import os
|
4 |
import pandas as pd
|
5 |
import json
|
6 |
-
import random
|
7 |
|
8 |
-
from src.display.utils import COLUMNS, EVAL_COLS
|
9 |
-
from src.envs import EVAL_RESULTS_PATH, FIXED_QUESTIONS_FILE # Ensure FIXED_QUESTIONS_FILE is defined in envs.py
|
10 |
|
11 |
def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
|
12 |
# Initialize an empty DataFrame
|
@@ -14,16 +12,12 @@ def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_co
|
|
14 |
|
15 |
# Load evaluation results from JSON files
|
16 |
if os.path.exists(eval_results_path):
|
17 |
-
result_files = [
|
18 |
-
os.path.join(eval_results_path, f)
|
19 |
-
for f in os.listdir(eval_results_path)
|
20 |
-
if f.endswith('.json')
|
21 |
-
]
|
22 |
data_list = []
|
23 |
for file in result_files:
|
24 |
with open(file, 'r') as f:
|
25 |
data = json.load(f)
|
26 |
-
# Flatten the JSON structure
|
27 |
flattened_data = {}
|
28 |
flattened_data.update(data.get('config', {}))
|
29 |
flattened_data.update(data.get('results', {}))
|
@@ -31,10 +25,6 @@ def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_co
|
|
31 |
if data_list:
|
32 |
df = pd.DataFrame(data_list)
|
33 |
|
34 |
-
# Rename 'model_name' to 'model' if 'model' is missing
|
35 |
-
if 'model' not in df.columns and 'model_name' in df.columns:
|
36 |
-
df.rename(columns={'model_name': 'model'}, inplace=True)
|
37 |
-
|
38 |
# Ensure DataFrame has all columns
|
39 |
for col in cols:
|
40 |
if col not in df.columns:
|
@@ -54,11 +44,7 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
|
|
54 |
|
55 |
# Load evaluation requests from JSON files
|
56 |
if os.path.exists(eval_requests_path):
|
57 |
-
request_files = [
|
58 |
-
os.path.join(eval_requests_path, f)
|
59 |
-
for f in os.listdir(eval_requests_path)
|
60 |
-
if f.endswith('.json')
|
61 |
-
]
|
62 |
data_list = []
|
63 |
for file in request_files:
|
64 |
with open(file, 'r') as f:
|
@@ -72,42 +58,3 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
|
|
72 |
pending_df = df[df['status'] == 'pending']
|
73 |
|
74 |
return finished_df, running_df, pending_df
|
75 |
-
|
76 |
-
def preselect_fixed_questions(dataset_path, num_questions_per_subject=30, fixed_questions_file='fixed_questions.json'):
|
77 |
-
"""
|
78 |
-
Preselects a fixed number of questions per subject and saves them to a JSON file.
|
79 |
-
"""
|
80 |
-
# Load the dataset
|
81 |
-
# Assuming the dataset is in CSV format with a 'Subject' column
|
82 |
-
if not os.path.exists(dataset_path):
|
83 |
-
raise FileNotFoundError(f"Dataset file not found at {dataset_path}")
|
84 |
-
|
85 |
-
dataset = pd.read_csv(dataset_path)
|
86 |
-
|
87 |
-
fixed_questions = {}
|
88 |
-
|
89 |
-
for task in Tasks:
|
90 |
-
subject = task.value.benchmark
|
91 |
-
subject_questions = dataset[dataset['Subject'] == subject]
|
92 |
-
|
93 |
-
if len(subject_questions) < num_questions_per_subject:
|
94 |
-
raise ValueError(f"Not enough questions for subject '{subject}'. Required: {num_questions_per_subject}, Available: {len(subject_questions)}")
|
95 |
-
|
96 |
-
# Randomly select fixed number of questions
|
97 |
-
selected_questions = subject_questions.sample(n=num_questions_per_subject, random_state=42) # random_state for reproducibility
|
98 |
-
fixed_questions[subject] = selected_questions.to_dict(orient='records')
|
99 |
-
|
100 |
-
# Save fixed questions to a JSON file
|
101 |
-
with open(os.path.join(EVAL_RESULTS_PATH, fixed_questions_file), 'w') as f:
|
102 |
-
json.dump(fixed_questions, f, indent=4)
|
103 |
-
|
104 |
-
print(f"Fixed questions preselected and saved to {fixed_questions_file}")
|
105 |
-
|
106 |
-
if __name__ == "__main__":
|
107 |
-
# Example usage:
|
108 |
-
# Define the path to your dataset
|
109 |
-
DATASET_PATH = os.path.join(EVAL_RESULTS_PATH, "your_dataset.csv") # Update with your actual dataset file
|
110 |
-
FIXED_QUESTIONS_FILE = "fixed_questions.json" # Define the name for fixed questions file
|
111 |
-
|
112 |
-
# Preselect fixed questions
|
113 |
-
preselect_fixed_questions(DATASET_PATH, num_questions_per_subject=30, fixed_questions_file=FIXED_QUESTIONS_FILE)
|
|
|
3 |
import os
|
4 |
import pandas as pd
|
5 |
import json
|
|
|
6 |
|
7 |
+
from src.display.utils import COLUMNS, EVAL_COLS
|
|
|
8 |
|
9 |
def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
|
10 |
# Initialize an empty DataFrame
|
|
|
12 |
|
13 |
# Load evaluation results from JSON files
|
14 |
if os.path.exists(eval_results_path):
|
15 |
+
result_files = [os.path.join(eval_results_path, f) for f in os.listdir(eval_results_path) if f.endswith('.json')]
|
|
|
|
|
|
|
|
|
16 |
data_list = []
|
17 |
for file in result_files:
|
18 |
with open(file, 'r') as f:
|
19 |
data = json.load(f)
|
20 |
+
# Flatten the JSON structure if needed
|
21 |
flattened_data = {}
|
22 |
flattened_data.update(data.get('config', {}))
|
23 |
flattened_data.update(data.get('results', {}))
|
|
|
25 |
if data_list:
|
26 |
df = pd.DataFrame(data_list)
|
27 |
|
|
|
|
|
|
|
|
|
28 |
# Ensure DataFrame has all columns
|
29 |
for col in cols:
|
30 |
if col not in df.columns:
|
|
|
44 |
|
45 |
# Load evaluation requests from JSON files
|
46 |
if os.path.exists(eval_requests_path):
|
47 |
+
request_files = [os.path.join(eval_requests_path, f) for f in os.listdir(eval_requests_path) if f.endswith('.json')]
|
|
|
|
|
|
|
|
|
48 |
data_list = []
|
49 |
for file in request_files:
|
50 |
with open(file, 'r') as f:
|
|
|
58 |
pending_df = df[df['status'] == 'pending']
|
59 |
|
60 |
return finished_df, running_df, pending_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|