Xuehai commited on
Commit
9e4b8f5
Β·
1 Parent(s): 5e51e36

update apperance

Browse files
Files changed (1) hide show
  1. app.py +10 -18
app.py CHANGED
@@ -22,19 +22,21 @@ MODEL_INFO = [
22
  ]
23
 
24
  # Column Names for DataFrame
25
- COLUMN_NAMES = MODEL_INFO + DISCIPLINES + ["Average"]
26
 
27
  # Data Types for DataFrame
28
- DATA_TITILE_TYPE = ['markdown'] + ['number'] * len(DISCIPLINES) + ['number']
29
 
30
  # Leaderboard Introduction
31
  LEADERBOARD_INTRODUCTION = """# MMWorld Leaderboard
32
 
33
  *"Towards Multi-discipline Multi-faceted World Model Evaluation in Videos"*
34
  πŸ† Welcome to the leaderboard of the **MMWorld**! 🎦 *A new benchmark for multi-discipline, multi-faceted multimodal video understanding*
35
- [![GitHub](https://img.shields.io/badge/Code-GitHub-black?logo=github)](https://github.com/eric-ai-lab/MMWorld)
36
 
37
  <div style="display: flex; flex-wrap: wrap; align-items: center; gap: 10px;">
 
 
 
38
  <a href='https://arxiv.org/abs/2406.08407'>
39
  <img src='https://img.shields.io/badge/cs.CV-Paper-b31b1b?logo=arxiv&logoColor=red'>
40
  </a>
@@ -47,8 +49,7 @@ LEADERBOARD_INTRODUCTION = """# MMWorld Leaderboard
47
 
48
  SUBMIT_INTRODUCTION = """# Submit on MMWorld Benchmark Introduction
49
 
50
- ## 🎈
51
- Please obtain the evaluation file `*.json` by running MMWorld in Github and upload the json file below.
52
 
53
  ⚠️ The contact information you filled in will not be made public.
54
  """
@@ -106,24 +107,15 @@ data = {
106
  # Create DataFrame
107
  df_full = pd.DataFrame(data)
108
 
109
- # Function to calculate average score
110
- def calculate_average(df, disciplines):
111
- df['Average'] = df[disciplines].mean(axis=1)
112
- return df
113
-
114
  # Function to get leaderboard DataFrame based on selected disciplines
115
  def get_leaderboard_df(selected_disciplines):
116
  if not selected_disciplines:
117
  selected_disciplines = DISCIPLINES # If none selected, default to all
118
  # Copy the full DataFrame
119
  df = df_full.copy()
120
- # Calculate the average based on selected disciplines
121
- df['Average'] = df[selected_disciplines].mean(axis=1)
122
  # Select columns to display
123
- columns_to_display = MODEL_INFO + selected_disciplines + ['Average']
124
  df = df[columns_to_display]
125
- # Sort by Average descending
126
- df = df.sort_values(by='Average', ascending=False)
127
  return df
128
 
129
  # Function to convert scores to two decimal places
@@ -229,7 +221,7 @@ with block:
229
  with gr.Row():
230
  with gr.Column():
231
  model_name_textbox = gr.Textbox(
232
- label="**Model name**", placeholder="Required field"
233
  )
234
  revision_name_textbox = gr.Textbox(
235
  label="Revision Model Name (Optional)", placeholder="GPT4V"
@@ -237,13 +229,13 @@ with block:
237
 
238
  with gr.Column():
239
  model_link = gr.Textbox(
240
- label="**Project Page/Paper Link**", placeholder="Required field"
241
  )
242
  team_name = gr.Textbox(
243
  label="Your Team Name (If left blank, it will be user upload)", placeholder="User Upload"
244
  )
245
  contact_email = gr.Textbox(
246
- label="E-Mail (**Will not be displayed**)", placeholder="Required field"
247
  )
248
 
249
  with gr.Column():
 
22
  ]
23
 
24
  # Column Names for DataFrame
25
+ COLUMN_NAMES = MODEL_INFO + DISCIPLINES
26
 
27
  # Data Types for DataFrame
28
+ DATA_TITILE_TYPE = ['markdown'] + ['number'] * len(DISCIPLINES)
29
 
30
  # Leaderboard Introduction
31
  LEADERBOARD_INTRODUCTION = """# MMWorld Leaderboard
32
 
33
  *"Towards Multi-discipline Multi-faceted World Model Evaluation in Videos"*
34
  πŸ† Welcome to the leaderboard of the **MMWorld**! 🎦 *A new benchmark for multi-discipline, multi-faceted multimodal video understanding*
 
35
 
36
  <div style="display: flex; flex-wrap: wrap; align-items: center; gap: 10px;">
37
+ <a href='https://github.com/eric-ai-lab/MMWorld'>
38
+ <img src='https://img.shields.io/badge/Code-GitHub-black?logo=github'>
39
+ </a>
40
  <a href='https://arxiv.org/abs/2406.08407'>
41
  <img src='https://img.shields.io/badge/cs.CV-Paper-b31b1b?logo=arxiv&logoColor=red'>
42
  </a>
 
49
 
50
  SUBMIT_INTRODUCTION = """# Submit on MMWorld Benchmark Introduction
51
 
52
+ ## 🎈 Please obtain the evaluation file `*.json` by running MMWorld in Github and upload the json file below.
 
53
 
54
  ⚠️ The contact information you filled in will not be made public.
55
  """
 
107
  # Create DataFrame
108
  df_full = pd.DataFrame(data)
109
 
 
 
 
 
 
110
  # Function to get leaderboard DataFrame based on selected disciplines
111
  def get_leaderboard_df(selected_disciplines):
112
  if not selected_disciplines:
113
  selected_disciplines = DISCIPLINES # If none selected, default to all
114
  # Copy the full DataFrame
115
  df = df_full.copy()
 
 
116
  # Select columns to display
117
+ columns_to_display = MODEL_INFO + selected_disciplines
118
  df = df[columns_to_display]
 
 
119
  return df
120
 
121
  # Function to convert scores to two decimal places
 
221
  with gr.Row():
222
  with gr.Column():
223
  model_name_textbox = gr.Textbox(
224
+ label="Model name", placeholder="Required field"
225
  )
226
  revision_name_textbox = gr.Textbox(
227
  label="Revision Model Name (Optional)", placeholder="GPT4V"
 
229
 
230
  with gr.Column():
231
  model_link = gr.Textbox(
232
+ label="Project Page/Paper Link", placeholder="Required field"
233
  )
234
  team_name = gr.Textbox(
235
  label="Your Team Name (If left blank, it will be user upload)", placeholder="User Upload"
236
  )
237
  contact_email = gr.Textbox(
238
+ label="E-Mail (Will not be displayed)", placeholder="Required field"
239
  )
240
 
241
  with gr.Column():