munish0838 commited on
Commit
0f66054
·
verified ·
1 Parent(s): 939ce98

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +92 -82
README.md CHANGED
@@ -6,7 +6,7 @@ license_name: katanemo-research
6
  license_link: >-
7
  https://huggingface.co/katanemolabs/Arch-Function-3B/blob/main/LICENSE
8
  base_model:
9
- - Qwen/Qwen2.5-3B-Instruct
10
  language:
11
  - en
12
  pipeline_tag: text-generation
@@ -66,7 +66,7 @@ Katanemo Arch-Function collection is built on top of the [Qwen 2.5](https://hugg
66
 
67
 
68
  ## Performance Benchmarks
69
- We evaluate Katanemo Arch-Function series on the [Berkeley Function-Calling Leaderboard (BFCL)](https://gorilla.cs.berkeley.edu/leaderboard.html#leaderboard). For each model family, we select the one with the highest rank. The results are shwon below:
70
 
71
  <table>
72
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
@@ -87,103 +87,113 @@ We evaluate Katanemo Arch-Function series on the [Berkeley Function-Calling Lead
87
  </tr>
88
  <tr style="text-align: center; vertical-align: middle;">
89
  <td>1</td>
90
- <td>GPT-4-turbo-2024-04-09</td>
91
- <td>59.49%</td>
92
- <td>82.65%</td>
93
- <td>83.80%</td>
94
- <td>73.39%</td>
95
- <td>21.62%</td>
96
- <td>70.73%</td>
97
- <td>79.79%</td>
98
- </tr>
99
- <tr style="text-align: center; vertical-align: middle;">
100
- <td>3</td>
101
- <td>xLAM-8x22b-r</td>
102
- <td>59.13%</td>
103
- <td>89.75%</td>
104
- <td>89.32%</td>
105
- <td>72.81%</td>
106
- <td>15.62%</td>
107
- <td>97.56%</td>
108
- <td>75.23%</td>
109
  </tr>
110
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
111
  <td> </td>
112
  <td>Arch-Function-7B</td>
113
- <td>57.48%</td>
114
- <td>87.50%</td>
115
- <td>86.80%</td>
116
- <td>72.19%</td>
117
- <td>13.75%</td>
118
- <td>82.93%</td>
119
- <td>79.54%</td>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  </tr>
121
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
122
  <td> </td>
123
  <td>Arch-Function-3B</td>
124
- <td>56.23%</td>
125
- <td>85.10%</td>
126
- <td>89.16%</td>
127
- <td>70.72%</td>
128
- <td>12.28%</td>
129
  <td>90.24%</td>
130
- <td>73.98%</td>
131
  </tr>
132
- <tr style="text-align: center; vertical-align: middle;">
133
- <td>7</td>
134
- <td>mistral-large-2407</td>
135
- <td>55.82%</td>
136
- <td>84.12%</td>
137
- <td>83.09%</td>
138
- <td>67.17%</td>
139
- <td>20.50%</td>
140
- <td>78.05%</td>
141
- <td>48.93%</td>
142
- </tr>
143
- <tr style="text-align: center; vertical-align: middle;">
144
- <td>9</td>
145
- <td>Claude-3.5-Sonnet-20240620</td>
146
- <td>54.83%</td>
147
- <td>70.35%</td>
148
- <td>66.34%</td>
149
- <td>71.39%</td>
150
- <td>23.5%</td>
151
- <td>63.41%</td>
152
- <td>75.91%</td>
153
  </tr>
 
 
 
 
 
 
 
 
 
 
154
  </tr>
155
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
156
  <td> </td>
157
- <td>Arch-Function-3B</td>
158
- <td>53.61%</td>
159
- <td>82.60%</td>
160
- <td>87.36%</td>
161
- <td>68.19%</td>
162
- <td>8.62%</td>
163
  <td>87.80%</td>
164
- <td>75.90%</td>
165
  </tr>
166
- <tr style="text-align: center; vertical-align: middle;">
167
- <td>11</td>
168
- <td>o1-mini-2024-09-12</td>
169
- <td>53.43%</td>
170
- <td>75.48%</td>
171
- <td>76.86%</td>
172
- <td>71.17%</td>
173
- <td>11.00%</td>
174
- <td>46.34%</td>
175
- <td>88.07%</td>
176
  </tr>
177
- <tr style="text-align: center; vertical-align: middle;">
178
- <td>12</td>
179
- <td>Gemini-1.5-Flash-Preview-0514</td>
180
- <td>53.01%</td>
181
- <td>77.10%</td>
182
- <td>71.23%</td>
183
- <td>71.17%</td>
184
- <td>13.12%</td>
185
- <td>60.98%</td>
186
- <td>76.15%</td>
187
  </tr>
188
  </table>
189
 
 
6
  license_link: >-
7
  https://huggingface.co/katanemolabs/Arch-Function-3B/blob/main/LICENSE
8
  base_model:
9
+ - Qwen/Qwen2.5-Coder-3B-Instruct
10
  language:
11
  - en
12
  pipeline_tag: text-generation
 
66
 
67
 
68
  ## Performance Benchmarks
69
+ We evaluate Katanemo Arch-Function series on the [Berkeley Function-Calling Leaderboard (BFCL)](https://gorilla.cs.berkeley.edu/leaderboard.html#leaderboard). We compare with commonly-used models and the results (as of Oct 21st, 2024) are shwon below. For each model family, we select the one with the highest rank.
70
 
71
  <table>
72
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
 
87
  </tr>
88
  <tr style="text-align: center; vertical-align: middle;">
89
  <td>1</td>
90
+ <td>GPT-4o-2024-08-06 (FC)</td>
91
+ <td>62.19%</td>
92
+ <td>85.90%</td>
93
+ <td>85.64%</td>
94
+ <td>75.43%</td>
95
+ <td>25.00%</td>
96
+ <td>63.41%</td>
97
+ <td>82.93%</td>
 
 
 
 
 
 
 
 
 
 
 
98
  </tr>
99
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
100
  <td> </td>
101
  <td>Arch-Function-7B</td>
102
+ <td>59.62%</td>
103
+ <td>86.83%</td>
104
+ <td>88.07%</td>
105
+ <td>71.57%</td>
106
+ <td>21.00%</td>
107
+ <td>95.12%</td>
108
+ <td>73.63%</td>
109
+ </tr>
110
+ <tr style="text-align: center; vertical-align: middle;">
111
+ <td>6</td>
112
+ <td>o1-preview-2024-09-12 (Prompt)</td>
113
+ <td>59.27%</td>
114
+ <td>86.42%</td>
115
+ <td>88.88%</td>
116
+ <td>73.08%</td>
117
+ <td>17.62%</td>
118
+ <td>73.17%</td>
119
+ <td>74.60%</td>
120
+ </tr>
121
+ <tr style="text-align: center; vertical-align: middle; ">
122
+ <td>9</td>
123
+ <td>Gemini-1.5-Flash-002 (Prompt)</td>
124
+ <td>57.92%</td>
125
+ <td>86.58%</td>
126
+ <td>89.48%</td>
127
+ <td>76.28%</td>
128
+ <td>9.88%</td>
129
+ <td>85.37%</td>
130
+ <td>78.54%</td>
131
  </tr>
132
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
133
  <td> </td>
134
  <td>Arch-Function-3B</td>
135
+ <td>57.69%</td>
136
+ <td>85.19%</td>
137
+ <td>86.18%</td>
138
+ <td>71.21%</td>
139
+ <td>17.50%</td>
140
  <td>90.24%</td>
141
+ <td>72.88%</td>
142
  </tr>
143
+ <tr style="text-align: center; vertical-align: middle; ">
144
+ <td>12</td>
145
+ <td>Claude-3.5-Sonnet-20240620 (FC)</td>
146
+ <td>57.42%</td>
147
+ <td>70.04%</td>
148
+ <td>66.27%</td>
149
+ <td>74.68%</td>
150
+ <td>28.38%</td>
151
+ <td>68.29%</td>
152
+ <td>74.58%</td>
 
 
 
 
 
 
 
 
 
 
 
153
  </tr>
154
+ <tr style="text-align: center; vertical-align: middle; ">
155
+ <td>13</td>
156
+ <td>mistral-large-2407 (FC)</td>
157
+ <td>56.80%</td>
158
+ <td>86.62%</td>
159
+ <td>84.57%</td>
160
+ <td>68.37%</td>
161
+ <td>20.62%</td>
162
+ <td>75.61%</td>
163
+ <td>49.44%</td>
164
  </tr>
165
  <tr style="text-align: center; vertical-align: middle; font-weight: bold;">
166
  <td> </td>
167
+ <td>Arch-Function-1.5B</td>
168
+ <td>56.20%</td>
169
+ <td>84.40%</td>
170
+ <td>83.96%</td>
171
+ <td>69.36%</td>
172
+ <td>15.88%</td>
173
  <td>87.80%</td>
174
+ <td>74.39%</td>
175
  </tr>
176
+ <tr style="text-align: center; vertical-align: middle; ">
177
+ <td>21</td>
178
+ <td>Llama-3.1-70B-Instruct (Prompt)</td>
179
+ <td>53.67%</td>
180
+ <td>88.90%</td>
181
+ <td>89.34%</td>
182
+ <td>61.13%</td>
183
+ <td>12.38%</td>
184
+ <td>92.68%</td>
185
+ <td>58.38%</td>
186
  </tr>
187
+ <tr style="text-align: center; vertical-align: middle; ">
188
+ <td>22</td>
189
+ <td>Gemma-2-27b-it (Prompt)</td>
190
+ <td>53.66%</td>
191
+ <td>88.52%</td>
192
+ <td>87.89%</td>
193
+ <td>69.48%</td>
194
+ <td>4.12%</td>
195
+ <td>87.8%</td>
196
+ <td>68.76%</td>
197
  </tr>
198
  </table>
199