Adam Jirkovsky
commited on
Commit
·
e3fc811
1
Parent(s):
38909e1
Update captcha logic
Browse files- app.py +1 -1
- src/submission/submit.py +10 -10
app.py
CHANGED
@@ -375,7 +375,7 @@ with demo:
|
|
375 |
contact_email,
|
376 |
captcha_correct,
|
377 |
],
|
378 |
-
outputs = [submission_result],
|
379 |
)
|
380 |
|
381 |
with gr.Row():
|
|
|
375 |
contact_email,
|
376 |
captcha_correct,
|
377 |
],
|
378 |
+
outputs = [submission_result, captcha_correct],
|
379 |
)
|
380 |
|
381 |
with gr.Row():
|
src/submission/submit.py
CHANGED
@@ -33,19 +33,19 @@ def add_new_eval(
|
|
33 |
):
|
34 |
try:
|
35 |
if not eval_name:
|
36 |
-
return styled_error("Please provide a model name.")
|
37 |
|
38 |
if not precision:
|
39 |
-
return styled_error("Please select precision.")
|
40 |
|
41 |
if not contact_email:
|
42 |
-
return styled_error("Please provide your contact email.")
|
43 |
|
44 |
if not upload:
|
45 |
-
return styled_error("Please upload a results file.")
|
46 |
|
47 |
if not captcha_ok:
|
48 |
-
return styled_error("Please prove you are a human!")
|
49 |
|
50 |
with open(upload, mode="r") as f:
|
51 |
data = json.load(f)
|
@@ -71,11 +71,11 @@ def add_new_eval(
|
|
71 |
continue
|
72 |
if k not in BENCHMARK_COL_IDS:
|
73 |
print(f"Missing: {k}")
|
74 |
-
return styled_error(f'Missing: {k}')
|
75 |
|
76 |
if len(BENCHMARK_COL_IDS) != len(ret) - 4:
|
77 |
print(f"Missing columns")
|
78 |
-
return styled_error(f'Missing result entries')
|
79 |
|
80 |
# TODO add complex validation
|
81 |
#print(results.keys())
|
@@ -99,7 +99,7 @@ def add_new_eval(
|
|
99 |
|
100 |
if ret['eval_name'] in existing_eval_names:
|
101 |
print(f"Model name {ret['eval_name']} is used!")
|
102 |
-
return styled_error(f"Model name {ret['eval_name']} is used!")
|
103 |
|
104 |
out_path = f"{OUT_DIR}/{eval_name}_eval_request.json"
|
105 |
|
@@ -218,7 +218,7 @@ def add_new_eval(
|
|
218 |
"""
|
219 |
return styled_message(
|
220 |
"Your results have been successfully submitted. They will be added to the leaderboard upon verification."
|
221 |
-
)
|
222 |
|
223 |
except Exception as e:
|
224 |
-
return styled_error(f"An error occurred: {e}")
|
|
|
33 |
):
|
34 |
try:
|
35 |
if not eval_name:
|
36 |
+
return styled_error("Please provide a model name."), captcha_ok
|
37 |
|
38 |
if not precision:
|
39 |
+
return styled_error("Please select precision."), captcha_ok
|
40 |
|
41 |
if not contact_email:
|
42 |
+
return styled_error("Please provide your contact email."), captcha_ok
|
43 |
|
44 |
if not upload:
|
45 |
+
return styled_error("Please upload a results file."), captcha_ok
|
46 |
|
47 |
if not captcha_ok:
|
48 |
+
return styled_error("Please prove you are a human!"), captcha_ok
|
49 |
|
50 |
with open(upload, mode="r") as f:
|
51 |
data = json.load(f)
|
|
|
71 |
continue
|
72 |
if k not in BENCHMARK_COL_IDS:
|
73 |
print(f"Missing: {k}")
|
74 |
+
return styled_error(f'Missing: {k}'), captcha_ok
|
75 |
|
76 |
if len(BENCHMARK_COL_IDS) != len(ret) - 4:
|
77 |
print(f"Missing columns")
|
78 |
+
return styled_error(f'Missing result entries'), captcha_ok
|
79 |
|
80 |
# TODO add complex validation
|
81 |
#print(results.keys())
|
|
|
99 |
|
100 |
if ret['eval_name'] in existing_eval_names:
|
101 |
print(f"Model name {ret['eval_name']} is used!")
|
102 |
+
return styled_error(f"Model name {ret['eval_name']} is used!"), captcha_ok
|
103 |
|
104 |
out_path = f"{OUT_DIR}/{eval_name}_eval_request.json"
|
105 |
|
|
|
218 |
"""
|
219 |
return styled_message(
|
220 |
"Your results have been successfully submitted. They will be added to the leaderboard upon verification."
|
221 |
+
), False
|
222 |
|
223 |
except Exception as e:
|
224 |
+
return styled_error(f"An error occurred: {e}"), captcha_ok
|