Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- app/draw_diagram.py +1 -41
- app/pages.py +1 -1
app/draw_diagram.py
CHANGED
@@ -6,46 +6,7 @@ from streamlit_echarts import st_echarts
|
|
6 |
from streamlit_javascript import st_javascript
|
7 |
# from PIL import Image
|
8 |
|
9 |
-
links_dic = {
|
10 |
-
"meta_llama_3_8b": "https://huggingface.co/meta-llama/Meta-Llama-3-8B",
|
11 |
-
"mistral_7b_instruct_v0_2": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
|
12 |
-
"sailor_0_5b": "https://huggingface.co/sail/Sailor-0.5B",
|
13 |
-
"sailor_1_8b": "https://huggingface.co/sail/Sailor-1.8B",
|
14 |
-
"sailor_4b": "https://huggingface.co/sail/Sailor-4B",
|
15 |
-
"sailor_7b": "https://huggingface.co/sail/Sailor-7B",
|
16 |
-
"sailor_0_5b_chat": "https://huggingface.co/sail/Sailor-0.5B-Chat",
|
17 |
-
"sailor_1_8b_chat": "https://huggingface.co/sail/Sailor-1.8B-Chat",
|
18 |
-
"sailor_4b_chat": "https://huggingface.co/sail/Sailor-4B-Chat",
|
19 |
-
"sailor_7b_chat": "https://huggingface.co/sail/Sailor-7B-Chat",
|
20 |
-
"sea_mistral_highest_acc_inst_7b": "https://seaeval.github.io/",
|
21 |
-
"meta_llama_3_8b_instruct": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
|
22 |
-
"flan_t5_base": "https://huggingface.co/google/flan-t5-base",
|
23 |
-
"flan_t5_large": "https://huggingface.co/google/flan-t5-large",
|
24 |
-
"flan_t5_xl": "https://huggingface.co/google/flan-t5-xl",
|
25 |
-
"flan_t5_xxl": "https://huggingface.co/google/flan-t5-xxl",
|
26 |
-
"flan_ul2": "https://huggingface.co/google/flan-t5-ul2",
|
27 |
-
"flan_t5_small": "https://huggingface.co/google/flan-t5-small",
|
28 |
-
"mt0_xxl": "https://huggingface.co/bigscience/mt0-xxl",
|
29 |
-
"seallm_7b_v2": "https://huggingface.co/SeaLLMs/SeaLLM-7B-v2",
|
30 |
-
"gpt_35_turbo_1106": "https://openai.com/blog/chatgpt",
|
31 |
-
"meta_llama_3_70b": "https://huggingface.co/meta-llama/Meta-Llama-3-70B",
|
32 |
-
"meta_llama_3_70b_instruct": "https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct",
|
33 |
-
"sea_lion_3b": "https://huggingface.co/aisingapore/sea-lion-3b",
|
34 |
-
"sea_lion_7b": "https://huggingface.co/aisingapore/sea-lion-7b",
|
35 |
-
"qwen1_5_110b": "https://huggingface.co/Qwen/Qwen1.5-110B",
|
36 |
-
"qwen1_5_110b_chat": "https://huggingface.co/Qwen/Qwen1.5-110B-Chat",
|
37 |
-
"llama_2_7b_chat": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf",
|
38 |
-
"gpt4_1106_preview": "https://openai.com/blog/chatgpt",
|
39 |
-
"gemma_2b": "https://huggingface.co/google/gemma-2b",
|
40 |
-
"gemma_7b": "https://huggingface.co/google/gemma-7b",
|
41 |
-
"gemma_2b_it": "https://huggingface.co/google/gemma-2b-it",
|
42 |
-
"gemma_7b_it": "https://huggingface.co/google/gemma-7b-it",
|
43 |
-
"qwen_1_5_7b": "https://huggingface.co/Qwen/Qwen1.5-7B",
|
44 |
-
"qwen_1_5_7b_chat": "https://huggingface.co/Qwen/Qwen1.5-7B-Chat",
|
45 |
-
"sea_lion_7b_instruct": "https://huggingface.co/aisingapore/sea-lion-7b-instruct",
|
46 |
-
"sea_lion_7b_instruct_research": "https://huggingface.co/aisingapore/sea-lion-7b-instruct-research",
|
47 |
-
"LLaMA_3_Merlion_8B": "https://seaeval.github.io/",
|
48 |
-
"LLaMA_3_Merlion_8B_v1_1": "https://seaeval.github.io/"}
|
49 |
|
50 |
links_dic = {k.lower().replace('_', '-') : v for k, v in links_dic.items()}
|
51 |
|
@@ -75,7 +36,6 @@ def draw(folder_name, category_name, dataset_name, metrics):
|
|
75 |
if len(chart_data) == 0:
|
76 |
return
|
77 |
|
78 |
-
|
79 |
# if sorted == 'Ascending':
|
80 |
# ascend = True
|
81 |
# else:
|
|
|
6 |
from streamlit_javascript import st_javascript
|
7 |
# from PIL import Image
|
8 |
|
9 |
+
links_dic = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
links_dic = {k.lower().replace('_', '-') : v for k, v in links_dic.items()}
|
12 |
|
|
|
36 |
if len(chart_data) == 0:
|
37 |
return
|
38 |
|
|
|
39 |
# if sorted == 'Ascending':
|
40 |
# ascend = True
|
41 |
# else:
|
app/pages.py
CHANGED
@@ -264,7 +264,7 @@ def gr():
|
|
264 |
else:
|
265 |
draw('vu', 'GR', 'VoxCeleb1-Gender-Test', 'llama3_70b_judge_binary')
|
266 |
|
267 |
-
def
|
268 |
st.title("Speech Translation")
|
269 |
|
270 |
filters_levelone = ['Covost2-EN-ID-test',
|
|
|
264 |
else:
|
265 |
draw('vu', 'GR', 'VoxCeleb1-Gender-Test', 'llama3_70b_judge_binary')
|
266 |
|
267 |
+
def spt():
|
268 |
st.title("Speech Translation")
|
269 |
|
270 |
filters_levelone = ['Covost2-EN-ID-test',
|