File size: 7,400 Bytes
01db817 47543ac 01db817 47543ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from Evaluator import evaluate_predictions_for_list
# Generate the combined metrics data structures
import json
import pandas as pd
import os
import matplotlib.pyplot as plt
import gradio as gr
def gradio_interface(ground_truth_json, prediction_jsons):
# prediction_folders = [r'C:\Users\kosti\OneDrive - Image Access Corp\ml-rnd\Products\Qwen2vl\qwen2_vl2b.json']
engine_eval_results = evaluate_predictions_for_list(prediction_jsons, ground_truth_json, save_metrics_in_folder=True)
# --------------------------------------------------------------------------------------------------------------- #
# Define the path to the parent folder containing the engine subfolders
# engine_folder = r"C:\Users\kosti\OneDrive - Image Access Corp\ml-rnd\data\EnginesSroieMetrics"
engine_folder = os.getcwd()
# Data structures to hold combined data
field_metrics_combined = []
file_metrics_combined = []
combined_json_data = {}
# Loop through each subdirectory in the parent folder
for subdir in os.listdir(engine_folder):
# Check if the folder ends with "_eval_results" (indicating it's an engine's result folder)
if subdir.endswith("_eval_results"):
engine_name = subdir.replace("_eval_results", "")
# Construct the expected file paths for the field and file metrics CSVs and the YnY_hat JSON file
field_metrics_path = os.path.join(engine_folder, subdir, f"{engine_name}_field_metrics.csv")
file_metrics_path = os.path.join(engine_folder, subdir, f"{engine_name}_file_metrics.csv")
json_path = os.path.join(engine_folder, subdir, f"{engine_name}_YnY_hat.json")
# Check if the CSV and JSON files exist
if os.path.exists(field_metrics_path) and os.path.exists(file_metrics_path) and os.path.exists(json_path):
# Load the CSV files into Pandas DataFrames
field_metrics_df = pd.read_csv(field_metrics_path)
file_metrics_df = pd.read_csv(file_metrics_path)
# Add a column to indicate the engine name for comparison
field_metrics_df["engine"] = engine_name
file_metrics_df["engine"] = engine_name
# Append to the combined lists
field_metrics_combined.append(field_metrics_df)
file_metrics_combined.append(file_metrics_df)
# Load the JSON data and merge it into the combined_json_data dictionary
with open(json_path, 'r') as f:
json_data = json.load(f)
combined_json_data[engine_name] = json_data
else:
print(f"Missing metrics or JSON files for engine: {engine_name}")
# Concatenate all the data into a single DataFrame for each metric type
field_metrics_combined = pd.concat(field_metrics_combined, ignore_index=True)
file_metrics_combined = pd.concat(file_metrics_combined, ignore_index=True)
# Pivot the data so that we have attributes as rows and engines/metrics|similarities as columns
field_metrics_combined = field_metrics_combined.pivot_table(index='attribute', columns='engine', values=['accuracy', 'avg_similarity'])
file_metrics_combined = file_metrics_combined.pivot_table(index='filename', columns='engine',
values=[col for col in file_metrics_combined.columns if col.endswith('_similarity')])
# Flatten the multi-level columns for easier understanding
field_metrics_combined.columns = [f'{metric}_{engine}' for metric, engine in field_metrics_combined.columns]
file_metrics_combined.columns = [f'{metric}_{engine}' for metric, engine in file_metrics_combined.columns]
# Save the combined JSON data to a file
field_metrics_combined.to_csv(os.path.join(engine_folder, 'field_metrics_combined.csv'))
file_metrics_combined.to_csv(os.path.join(engine_folder, 'file_metrics_combined.csv'))
with open(os.path.join(engine_folder, 'combined_YnY_hat.json'), 'w') as outfile:
json.dump(combined_json_data, outfile, indent=4)
print(f"Combined field metrics, file metrics, and JSON data have been saved successfully in {engine_folder}")
# --------------------------------------------------------------------------------------------------------------- #
# Plot accuracy and similarity in two separate figures
mean_accuracy_similarity_scores = field_metrics_combined.mean()
# print(f'Mean accuracy and similarity scores for engines:\n {mean_accuracy_similarity_scores}')
# Load the combined field and file metrics files
# field_metrics_combined_path = r'C:\Users\kosti\OneDrive - Image Access Corp\ml-rnd\data\EnginesSroieMetrics\field_metrics_combined.csv'
field_metrics_combined_path = 'field_metrics_combined.csv'
field_metrics_combined = pd.read_csv(field_metrics_combined_path)
# Separate the columns for accuracy and average similarity
accuracy_columns = [col for col in field_metrics_combined.columns if col.startswith('accuracy')]
similarity_columns = [col for col in field_metrics_combined.columns if col.startswith('avg_similarity')]
# Accuracy plot
fig1, ax1 = plt.subplots(figsize=(10, 6))
field_metrics_combined.set_index('attribute')[accuracy_columns].plot(kind='bar', ax=ax1, width=0.8)
ax1.set_title('Accuracy Comparison Across Engines')
ax1.set_xlabel('Attributes')
ax1.set_ylabel('Accuracy (%)')
plt.xticks(rotation=45)
ax1.legend(loc='lower right')
plt.tight_layout()
# Similarity plot
fig2, ax2 = plt.subplots(figsize=(10, 6))
field_metrics_combined.set_index('attribute')[similarity_columns].plot(kind='bar', ax=ax2, width=0.8)
ax2.set_title('Average Similarity Comparison Across Engines')
ax2.set_xlabel('Attributes')
ax2.set_ylabel('Average Similarity (%)')
plt.xticks(rotation=45)
ax2.legend(loc='lower right')
plt.tight_layout()
# Cleanup by deleting the qwen2_vl2b_eval_results folder
remove_eval_results_folder_after_plots = True
if remove_eval_results_folder_after_plots:
for subdir in os.listdir(engine_folder):
if subdir.endswith("_eval_results"):
import shutil
shutil.rmtree(os.path.join(engine_folder, subdir))
os.remove(os.path.join(engine_folder, 'combined_YnY_hat.json'))
os.remove(os.path.join(engine_folder, 'field_metrics_combined.csv'))
os.remove(os.path.join(engine_folder, 'file_metrics_combined.csv'))
return fig1, fig2, mean_accuracy_similarity_scores
gradio_app = gr.Interface(
fn=gradio_interface,
inputs=[gr.File(label='Ground Truth', file_types=['json']),
gr.File(label='Json Files with Predictions', file_count='multiple', file_types=['json'])],
outputs=[gr.Plot(), gr.Plot(), gr.Text(label='Accuracy Comparison Across Engines')],
examples=[['data/ground_truth/sroie_ground_truth.json',
[
'data/Predictions/amazon.json',
'data/Predictions/google_expense.json',
'data/Predictions/microsoft.json',
'data/Predictions/llr.json',
'data/Predictions/qwen2_vl2b.json',
'data/Predictions/qwen2_vl7b.json',
]]]
)
gradio_app.launch(debug=True, share=True)
|