Spaces:
Running
Running
Upload 6 files
Browse files- Inaccessible_model.csv +3 -0
- OCRBench.csv +29 -0
- README.md +8 -5
- TextRecognition.csv +26 -0
- app.py +310 -0
- gitattributes +35 -0
Inaccessible_model.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Model,Language Model,Open Source,Text Recognition,Scene Text-Centric VQA,Doc-Oriented VQA,KIE,HMER,Final Score,Link
|
2 |
+
Nanbeige-VL,-,No,260,173,117,137,40,727,https://github.com/jmiemirza/MMFM-Challenge/blob/master/2024-winnners-reports
|
3 |
+
BlueLM-VL,-,No,209,173,132,147,6,667,https://github.com/vivo-ai-lab/BlueLM
|
OCRBench.csv
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Model,Language Model,Open Source,Text Recognition,Scene Text-Centric VQA,Doc-Oriented VQA,KIE,HMER,Final Score,Link
|
2 |
+
Minicpm-V 2.6,Qwen2-7B,Yes, 261,186,176,181,48,852,https://github.com/OpenBMB/MiniCPM-V
|
3 |
+
MiniMonkey,internlm2-chat-1_8b,Yes, 251,174,141,169,71,806,https://arxiv.org/abs/2408.02034
|
4 |
+
H2OVL-Mississippi-2B,H2O-Danube2-1.8B,Yes,252,171,140,166,53,782,https://huggingface.co/h2oai/h2ovl-mississippi-2b
|
5 |
+
InternVL2-1B,Qwen2-0.5B-Instruct,Yes, 255,166,130,156,72,779,https://huggingface.co/OpenGVLab/InternVL2-1B
|
6 |
+
InternVL2-4B,Phi-3-mini-128k-instruct,Yes, 235,170,138,164,69,776,https://huggingface.co/OpenGVLab/InternVL2-4B
|
7 |
+
InternVL2-2B,internlm2-chat-1_8b,Yes, 245,172,122,167,62,768,https://huggingface.co/OpenGVLab/InternVL2-2B
|
8 |
+
H2OVL-Mississippi-0.8B,H2O-Danube3-0.5B,Yes, 274,162,112,152,51,751,https://huggingface.co/h2oai/h2ovl-mississippi-800m
|
9 |
+
Qwen-VL-Max,-,No,254,166,148,143,12,723,https://github.com/QwenLM/Qwen-VL
|
10 |
+
Qwen-VL-Plus,-,No,248,155,141,141,9,694,https://github.com/QwenLM/Qwen-VL
|
11 |
+
Gemini,-,No,215,174,128,134,8,659,https://deepmind.google/technologies/gemini/
|
12 |
+
GPT4V,-,No,167,163,146,160,9,645,https://openai.com/
|
13 |
+
MiniCPM-V-2,MiniCPM-2.4B, Yes,245,171,103,86,0,605,https://github.com/OpenBMB/MiniCPM-V
|
14 |
+
mPLUG-DocOwl1.5,LLaMA-2 7B, Yes,182,157,126,134,0,599,https://arxiv.org/abs/2403.12895
|
15 |
+
TextMonkey,Qwen-7B,Yes,169,164,115,113,0,561,https://export.arxiv.org/abs/2403.04473
|
16 |
+
InternVL-Chat-Chinese,LLaMA2-13B,Yes,228,153,72,64,0,517,https://arxiv.org/abs/2312.14238
|
17 |
+
Monkey,Qwen-7B,Yes,174,161,91,88,0,514,https://arxiv.org/abs/2311.06607
|
18 |
+
InternLM-XComposer2,InternLM2-7B,Yes,160,160,103,87,1,511,https://arxiv.org/abs/2401.16420
|
19 |
+
QwenVL,Qwen-7B,Yes,179,157,95,75,0,506,https://arxiv.org/abs/2308.12966
|
20 |
+
mPLUG-Owl2,LLaMA2-7B,Yes,153,153,41,19,0,366,https://arxiv.org/abs/2311.04257
|
21 |
+
LLaVAR,LLaMA-13B.,Yes,186,122,25,13,0,346,https://arxiv.org/abs/2306.17107
|
22 |
+
LLaVA1.5-13B,Vicuna-v1.5-13B,Yes,176,129,19,7,0,331,https://arxiv.org/abs/2310.03744
|
23 |
+
InternLM-XComposer,InternLM-7B,Yes,192,91,14,6,0,303,https://arxiv.org/abs/2309.15112
|
24 |
+
LLaVA1.5-7B,Vicuna-v1.5-7B,Yes,160,117,15,5,0,297,https://arxiv.org/abs/2310.03744
|
25 |
+
mPLUG-Owl,LLaMA-2 7B,Yes,172,104,18,3,0,297,https://arxiv.org/abs/2304.14178
|
26 |
+
BLIVA,Vicuna-7B,Yes,165,103,22,1,0,291,https://arxiv.org/abs/2308.09936
|
27 |
+
InstructBLIP,Vicuna-7b,Yes,168,93,14,1,0,276,https://arxiv.org/abs/2305.06500
|
28 |
+
BLIP2-6.7B,OPT-6.7B,Yes,154,71,10,0,0,235,https://arxiv.org/abs/2301.12597
|
29 |
+
MiniGPT4V2,LLaMA2-13B,Yes,124,29,4,0,0,157,https://arxiv.org/abs/2310.09478
|
README.md
CHANGED
@@ -1,10 +1,13 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
|
|
|
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
|
10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Ocrbench Leaderboard
|
3 |
+
emoji: 🏆
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.15.0
|
8 |
+
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: mit
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
TextRecognition.csv
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Model,Language Model,Open Source,Regular Text,Irregular Text,Artistic Text,Handwriting,Digit string,Non-semantic Text,ALL,Link
|
2 |
+
InternVL2-1B,Qwen2-0.5B-Instruct,Yes, 47,45,44,26,49,44,255,https://huggingface.co/OpenGVLab/InternVL2-1B
|
3 |
+
InternVL2-4B,Phi-3-mini-128k-instruct,Yes,46,44,44,17,43,41,235,https://huggingface.co/OpenGVLab/InternVL2-4B
|
4 |
+
InternVL2-2B,internlm2-chat-1_8b,Yes,46,45,45,26,42,41,245,https://huggingface.co/OpenGVLab/InternVL2-2B
|
5 |
+
Nanbeige-VL,-,No,47,46,45,38,39,45,260,https://github.com/jmiemirza/MMFM-Challenge/blob/master/2024-winnners-reports
|
6 |
+
Qwen-VL-Max,-,No,49,50,49,27,36,43,254,https://github.com/QwenLM/Qwen-VL
|
7 |
+
Qwen-VL-Plus,-,No,49,49,48,36,23,43,248,https://github.com/QwenLM/Qwen-VL
|
8 |
+
BlueLM-VL,-,No,46,39,42,20,27,35,209,https://github.com/vivo-ai-lab/BlueLM
|
9 |
+
Gemini,-,No,47,35,45,31,25,32,215,https://deepmind.google/technologies/gemini/
|
10 |
+
GPT4V,-,No,39,37,41,11,1,38,167,https://openai.com/
|
11 |
+
mPLUG-DocOwl1.5,LLaMA-2 7B, Yes,45,39,39,22,15,22,182,https://arxiv.org/abs/2403.12895
|
12 |
+
TextMonkey,Qwen-7B,Yes,45,35,39,15,9,26,169,https://export.arxiv.org/abs/2403.04473
|
13 |
+
InternVL-Chat-Chinese,LLaMA2-13B,Yes,49,46,46,28,27,32,228,https://arxiv.org/abs/2312.14238
|
14 |
+
Monkey,Qwen-7B,Yes,44,37,40,14,11,28,174,https://arxiv.org/abs/2311.06607
|
15 |
+
InternLM-XComposer2,InternLM2-7B,Yes,45,37,37,12,7,22,160,https://arxiv.org/abs/2401.16420
|
16 |
+
QwenVL,Qwen-7B,Yes,46,39,42,14,10,28,179,https://arxiv.org/abs/2308.12966
|
17 |
+
mPLUG-Owl2,LLaMA2-7B,Yes,43,37,40,12,4,17,153,https://arxiv.org/abs/2311.04257
|
18 |
+
LLaVAR,LLaMA-13B.,Yes,48,42,43,28,12,13,186,https://arxiv.org/abs/2306.17107
|
19 |
+
LLaVA1.5-13B,Vicuna-v1.5-13B,Yes,48,44,43,30,7,4,176,https://arxiv.org/abs/2310.03744
|
20 |
+
InternLM-XComposer,InternLM-7B,Yes,49,44,46,23,13,17,192,https://arxiv.org/abs/2309.15112
|
21 |
+
LLaVA1.5-7B,Vicuna-v1.5-7B,Yes,43,40,41,26,5,5,160,https://arxiv.org/abs/2310.03744
|
22 |
+
mPLUG-Owl,LLaMA-2 7B,Yes,44,42,44,13,9,20,172,https://arxiv.org/abs/2304.14178
|
23 |
+
BLIVA,Vicuna-7B,Yes,48,42,42,24,5,4,165,https://arxiv.org/abs/2308.09936
|
24 |
+
InstructBLIP,Vicuna-7b,Yes,46,43,44,19,8,8,168,https://arxiv.org/abs/2305.06500
|
25 |
+
BLIP2-6.7B,OPT-6.7B,Yes,47,41,44,15,1,6,154,https://arxiv.org/abs/2301.12597
|
26 |
+
MiniGPT4V2,LLaMA2-13B,Yes,35,37,36,13,1,2,124,https://arxiv.org/abs/2310.09478
|
app.py
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import argparse
|
3 |
+
import glob
|
4 |
+
import pickle
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
block_css = """
|
10 |
+
#notice_markdown {
|
11 |
+
font-size: 104%
|
12 |
+
}
|
13 |
+
#notice_markdown th {
|
14 |
+
display: none;
|
15 |
+
}
|
16 |
+
#notice_markdown td {
|
17 |
+
padding-top: 6px;
|
18 |
+
padding-bottom: 6px;
|
19 |
+
}
|
20 |
+
#leaderboard_markdown {
|
21 |
+
font-size: 104%
|
22 |
+
}
|
23 |
+
#leaderboard_markdown td {
|
24 |
+
padding-top: 6px;
|
25 |
+
padding-bottom: 6px;
|
26 |
+
}
|
27 |
+
#leaderboard_dataframe td {
|
28 |
+
line-height: 0.1em;
|
29 |
+
}
|
30 |
+
footer {
|
31 |
+
display:none !important
|
32 |
+
}
|
33 |
+
.image-container {
|
34 |
+
display: flex;
|
35 |
+
align-items: center;
|
36 |
+
padding: 1px;
|
37 |
+
}
|
38 |
+
.image-container img {
|
39 |
+
margin: 0 30px;
|
40 |
+
height: 20px;
|
41 |
+
max-height: 100%;
|
42 |
+
width: auto;
|
43 |
+
max-width: 20%;
|
44 |
+
}
|
45 |
+
"""
|
46 |
+
def model_hyperlink(model_name, link):
|
47 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
48 |
+
def load_leaderboard_table_csv(filename, add_hyperlink=True):
|
49 |
+
lines = open(filename).readlines()
|
50 |
+
heads = [v.strip() for v in lines[0].split(",")]
|
51 |
+
rows = []
|
52 |
+
for i in range(1, len(lines)):
|
53 |
+
row = [v.strip() for v in lines[i].split(",")]
|
54 |
+
for j in range(len(heads)):
|
55 |
+
item = {}
|
56 |
+
for h, v in zip(heads, row):
|
57 |
+
if h != "Model" and h != "Link" and h != "Language Model" and h != "Open Source":
|
58 |
+
item[h] = int(v)
|
59 |
+
else:
|
60 |
+
item[h] = v
|
61 |
+
if add_hyperlink:
|
62 |
+
item["Model"] = model_hyperlink(item["Model"], item["Link"])
|
63 |
+
rows.append(item)
|
64 |
+
return rows
|
65 |
+
|
66 |
+
def get_arena_table(model_table_df):
|
67 |
+
# sort by rating
|
68 |
+
model_table_df = model_table_df.sort_values(by=["Final Score"], ascending=False)
|
69 |
+
values = []
|
70 |
+
for i in range(len(model_table_df)):
|
71 |
+
row = []
|
72 |
+
model_key = model_table_df.index[i]
|
73 |
+
model_name = model_table_df["Model"].values[model_key]
|
74 |
+
# rank
|
75 |
+
row.append(i + 1)
|
76 |
+
# model display name
|
77 |
+
row.append(model_name)
|
78 |
+
|
79 |
+
row.append(
|
80 |
+
model_table_df["Language Model"].values[model_key]
|
81 |
+
)
|
82 |
+
row.append(
|
83 |
+
model_table_df["Open Source"].values[model_key]
|
84 |
+
)
|
85 |
+
row.append(
|
86 |
+
model_table_df["Text Recognition"].values[model_key]
|
87 |
+
)
|
88 |
+
|
89 |
+
row.append(
|
90 |
+
model_table_df["Scene Text-Centric VQA"].values[model_key]
|
91 |
+
)
|
92 |
+
|
93 |
+
row.append(
|
94 |
+
model_table_df["Doc-Oriented VQA"].values[model_key]
|
95 |
+
)
|
96 |
+
|
97 |
+
row.append(
|
98 |
+
model_table_df["KIE"].values[model_key]
|
99 |
+
)
|
100 |
+
|
101 |
+
row.append(
|
102 |
+
model_table_df["HMER"].values[model_key]
|
103 |
+
)
|
104 |
+
|
105 |
+
row.append(
|
106 |
+
model_table_df["Final Score"].values[model_key]
|
107 |
+
)
|
108 |
+
values.append(row)
|
109 |
+
return values
|
110 |
+
|
111 |
+
def get_recog_table(model_table_df):
|
112 |
+
# sort by rating
|
113 |
+
values = []
|
114 |
+
for i in range(len(model_table_df)):
|
115 |
+
row = []
|
116 |
+
model_key = model_table_df.index[i]
|
117 |
+
model_name = model_table_df["Model"].values[model_key]
|
118 |
+
# rank
|
119 |
+
row.append(i + 1)
|
120 |
+
# model display name
|
121 |
+
row.append(model_name)
|
122 |
+
|
123 |
+
row.append(
|
124 |
+
model_table_df["Language Model"].values[model_key]
|
125 |
+
)
|
126 |
+
row.append(
|
127 |
+
model_table_df["Open Source"].values[model_key]
|
128 |
+
)
|
129 |
+
row.append(
|
130 |
+
model_table_df["Regular Text"].values[model_key]
|
131 |
+
)
|
132 |
+
|
133 |
+
row.append(
|
134 |
+
model_table_df["Irregular Text"].values[model_key]
|
135 |
+
)
|
136 |
+
|
137 |
+
row.append(
|
138 |
+
model_table_df["Artistic Text"].values[model_key]
|
139 |
+
)
|
140 |
+
|
141 |
+
row.append(
|
142 |
+
model_table_df["Handwriting"].values[model_key]
|
143 |
+
)
|
144 |
+
|
145 |
+
row.append(
|
146 |
+
model_table_df["Digit string"].values[model_key]
|
147 |
+
)
|
148 |
+
|
149 |
+
row.append(
|
150 |
+
model_table_df["Non-semantic Text"].values[model_key]
|
151 |
+
)
|
152 |
+
row.append(
|
153 |
+
model_table_df["ALL"].values[model_key]
|
154 |
+
)
|
155 |
+
values.append(row)
|
156 |
+
return values
|
157 |
+
|
158 |
+
def build_leaderboard_tab(leaderboard_table_file, text_recog_file, Inaccessible_model_file, show_plot=False):
|
159 |
+
if leaderboard_table_file:
|
160 |
+
data = load_leaderboard_table_csv(leaderboard_table_file)
|
161 |
+
data_recog = load_leaderboard_table_csv(text_recog_file)
|
162 |
+
data_Inaccessible = load_leaderboard_table_csv(Inaccessible_model_file)
|
163 |
+
model_table_df = pd.DataFrame(data)
|
164 |
+
model_table_df_Inaccessible = pd.DataFrame(data_Inaccessible)
|
165 |
+
recog_table_df = pd.DataFrame(data_recog)
|
166 |
+
md_head = f"""
|
167 |
+
# 🏆 OCRBench Leaderboard
|
168 |
+
| [GitHub](https://github.com/Yuliang-Liu/MultimodalOCR) | [Paper](https://arxiv.org/abs/2305.07895) |
|
169 |
+
"""
|
170 |
+
gr.Markdown(md_head, elem_id="leaderboard_markdown")
|
171 |
+
with gr.Tabs() as tabs:
|
172 |
+
# arena table
|
173 |
+
with gr.Tab("OCRBench", id=0):
|
174 |
+
arena_table_vals = get_arena_table(model_table_df)
|
175 |
+
md = "OCRBench is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation."
|
176 |
+
gr.Markdown(md, elem_id="leaderboard_markdown")
|
177 |
+
gr.Dataframe(
|
178 |
+
headers=[
|
179 |
+
"Rank",
|
180 |
+
"Name",
|
181 |
+
"Language Model",
|
182 |
+
"Open Source",
|
183 |
+
"Text Recognition",
|
184 |
+
"Scene Text-Centric VQA",
|
185 |
+
"Doc-Oriented VQA",
|
186 |
+
"KIE",
|
187 |
+
"HMER",
|
188 |
+
"Final Score",
|
189 |
+
],
|
190 |
+
datatype=[
|
191 |
+
"str",
|
192 |
+
"markdown",
|
193 |
+
"str",
|
194 |
+
"str",
|
195 |
+
"number",
|
196 |
+
"number",
|
197 |
+
"number",
|
198 |
+
"number",
|
199 |
+
"number",
|
200 |
+
"number",
|
201 |
+
],
|
202 |
+
value=arena_table_vals,
|
203 |
+
elem_id="arena_leaderboard_dataframe",
|
204 |
+
height=700,
|
205 |
+
column_widths=[60, 120,150,100, 150, 200, 180, 80, 80, 160],
|
206 |
+
wrap=True,
|
207 |
+
)
|
208 |
+
with gr.Tab("Text Recognition", id=1):
|
209 |
+
arena_table_vals = get_recog_table(recog_table_df)
|
210 |
+
md = "OCRBench is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation."
|
211 |
+
gr.Markdown(md, elem_id="leaderboard_markdown")
|
212 |
+
gr.Dataframe(
|
213 |
+
headers=[
|
214 |
+
"Rank",
|
215 |
+
"Name",
|
216 |
+
"Language Model",
|
217 |
+
"Open Source",
|
218 |
+
"Regular Text",
|
219 |
+
"Irregular Text",
|
220 |
+
"Artistic Text",
|
221 |
+
"Handwriting",
|
222 |
+
"Digit string",
|
223 |
+
"Non-semantic Text",
|
224 |
+
"ALL",
|
225 |
+
],
|
226 |
+
datatype=[
|
227 |
+
"str",
|
228 |
+
"markdown",
|
229 |
+
"str",
|
230 |
+
"str",
|
231 |
+
"number",
|
232 |
+
"number",
|
233 |
+
"number",
|
234 |
+
"number",
|
235 |
+
"number",
|
236 |
+
"number",
|
237 |
+
"number",
|
238 |
+
],
|
239 |
+
value=arena_table_vals,
|
240 |
+
elem_id="arena_leaderboard_dataframe",
|
241 |
+
height=700,
|
242 |
+
column_widths=[60, 120,150,100, 100, 100, 100, 100, 100,100, 80],
|
243 |
+
wrap=True,
|
244 |
+
)
|
245 |
+
with gr.Tab("Inaccessible Model", id=2):
|
246 |
+
arena_table_vals = get_arena_table(model_table_df_Inaccessible)
|
247 |
+
md = "The models on this list are neither open-source nor have API call interfaces available."
|
248 |
+
gr.Markdown(md, elem_id="leaderboard_markdown")
|
249 |
+
gr.Dataframe(
|
250 |
+
headers=[
|
251 |
+
"Rank",
|
252 |
+
"Name",
|
253 |
+
"Language Model",
|
254 |
+
"Open Source",
|
255 |
+
"Text Recognition",
|
256 |
+
"Scene Text-Centric VQA",
|
257 |
+
"Doc-Oriented VQA",
|
258 |
+
"KIE",
|
259 |
+
"HMER",
|
260 |
+
"Final Score",
|
261 |
+
],
|
262 |
+
datatype=[
|
263 |
+
"str",
|
264 |
+
"markdown",
|
265 |
+
"str",
|
266 |
+
"str",
|
267 |
+
"number",
|
268 |
+
"number",
|
269 |
+
"number",
|
270 |
+
"number",
|
271 |
+
"number",
|
272 |
+
"number",
|
273 |
+
],
|
274 |
+
value=arena_table_vals,
|
275 |
+
elem_id="arena_leaderboard_dataframe",
|
276 |
+
height=700,
|
277 |
+
column_widths=[60, 120,150,100, 150, 200, 180, 80, 80, 160],
|
278 |
+
wrap=True,
|
279 |
+
)
|
280 |
+
else:
|
281 |
+
pass
|
282 |
+
md_tail = f"""
|
283 |
+
# Notice
|
284 |
+
Sometimes, API calls to closed-source models may not succeed. In such cases, we will repeat the calls for unsuccessful samples until it becomes impossible to obtain a successful response. It is important to note that due to rigorous security reviews by OpenAI, GPT4V refuses to provide results for the 84 samples in OCRBench.
|
285 |
+
If you would like to include your model in the OCRBench leaderboard, please follow the evaluation instructions provided on [GitHub](https://github.com/Yuliang-Liu/MultimodalOCR), [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) or [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval) and feel free to contact us via email at [email protected]. We will update the leaderboard in time."""
|
286 |
+
gr.Markdown(md_tail, elem_id="leaderboard_markdown")
|
287 |
+
|
288 |
+
def build_demo(leaderboard_table_file, recog_table_file, Inaccessible_model_file):
|
289 |
+
text_size = gr.themes.sizes.text_lg
|
290 |
+
|
291 |
+
with gr.Blocks(
|
292 |
+
title="OCRBench Leaderboard",
|
293 |
+
theme=gr.themes.Base(text_size=text_size),
|
294 |
+
css=block_css,
|
295 |
+
) as demo:
|
296 |
+
leader_components = build_leaderboard_tab(
|
297 |
+
leaderboard_table_file, recog_table_file,Inaccessible_model_file,show_plot=True
|
298 |
+
)
|
299 |
+
return demo
|
300 |
+
|
301 |
+
if __name__ == "__main__":
|
302 |
+
parser = argparse.ArgumentParser()
|
303 |
+
parser.add_argument("--share", action="store_true")
|
304 |
+
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench.csv")
|
305 |
+
parser.add_argument("--TextRecognition_file", type=str, default="./TextRecognition.csv")
|
306 |
+
parser.add_argument("--Inaccessible_model_file", type=str, default="./Inaccessible_model.csv")
|
307 |
+
args = parser.parse_args()
|
308 |
+
|
309 |
+
demo = build_demo(args.OCRBench_file, args.TextRecognition_file, args.Inaccessible_model_file)
|
310 |
+
demo.launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|