Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -178,7 +178,7 @@ def build_leaderboard_tab(leaderboard_table_file_en, leaderboard_table_file_cn,
|
|
178 |
gr.Markdown(md_head, elem_id="leaderboard_markdown")
|
179 |
with gr.Tabs() as tabs:
|
180 |
# arena table
|
181 |
-
with gr.Tab("OCRBench v2 English
|
182 |
arena_table_vals = get_arena_table(model_table_df_en)
|
183 |
md = "OCRBench v2 is a large-scale bilingual text-centric benchmark with currently the most comprehensive set of tasks (4× more tasks than the previous multi-scene benchmark OCRBench), the widest coverage of scenarios (31 diverse scenarios including street scene, receipt, formula, diagram, and so on), and thorough evaluation metrics, with a total of 10, 000 human-verified question-answering pairs and a high proportion of difficult samples."
|
184 |
gr.Markdown(md, elem_id="leaderboard_markdown")
|
|
|
178 |
gr.Markdown(md_head, elem_id="leaderboard_markdown")
|
179 |
with gr.Tabs() as tabs:
|
180 |
# arena table
|
181 |
+
with gr.Tab("OCRBench v2 English subsets", id=0):
|
182 |
arena_table_vals = get_arena_table(model_table_df_en)
|
183 |
md = "OCRBench v2 is a large-scale bilingual text-centric benchmark with currently the most comprehensive set of tasks (4× more tasks than the previous multi-scene benchmark OCRBench), the widest coverage of scenarios (31 diverse scenarios including street scene, receipt, formula, diagram, and so on), and thorough evaluation metrics, with a total of 10, 000 human-verified question-answering pairs and a high proportion of difficult samples."
|
184 |
gr.Markdown(md, elem_id="leaderboard_markdown")
|