run_id
large_stringlengths 64
64
| timestamp_utc
int64 1,736B
1,738B
| timestamp_day_hour_utc
int64 1,736B
1,738B
| model_name_or_path
large_stringclasses 5
values | unitxt_card
large_stringclasses 76
values | unitxt_recipe
large_stringlengths 330
400
| quantization_type
large_stringclasses 1
value | quantization_bit_count
large_stringclasses 1
value | inference_runtime_s
float64 1.1
745
| generation_args
large_stringclasses 1
value | model_args
large_stringclasses 5
values | inference_engine
large_stringclasses 1
value | packages_versions
large_stringclasses 1
value | scores
large_stringlengths 174
242
| num_gpu
int64 1
1
| device
large_stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
421a1a0295097e57c8d7726c6256cc357354e6c12e477b1c5b1de15e27741b7d | 1,736,456,164,091 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.328578 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.8, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.72, 'num_of_instances': 100} | 1 | a100_80gb |
4ad189e3c2c13787380a5b740c69c90d57590e3721d4b8469d1eb71abe0125b3 | 1,736,456,176,505 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.909006 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.16, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.24, 'score_name': 'accuracy', 'score': 0.16, 'score_ci_high': 0.24, 'score_ci_low': 0.1, 'num_of_instances': 100} | 1 | a100_80gb |
69f55a3f75093d991c3ff7593dccb9cfd1f042cdaef60b588b6fd4318f375b1a | 1,736,456,182,221 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.381021 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
9d95af2f540a043a028099ac1866972a0cd8855c00295bbf336d4d70c66413a3 | 1,736,456,113,548 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.287465 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.24, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.32, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.32, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
dcef5377a6238e922386e3f18b33bab74b64100dfc59afb55d0c5c72b223ad05 | 1,736,456,122,159 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.062434 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
a542c1ae8a567dafc3162dbd71092b9ce234ecd3f3c5b1927eed2b79681f56ee | 1,736,456,127,412 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.35884 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.51, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.6, 'score_name': 'accuracy', 'score': 0.51, 'score_ci_high': 0.6, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
5325a0c7bae6174b69fca45f7b86d254a7798cc9ea7b0600b26b87a26de09e68 | 1,736,456,134,953 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.951166 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.87, 'accuracy_ci_low': 0.78, 'accuracy_ci_high': 0.92, 'score_name': 'accuracy', 'score': 0.87, 'score_ci_high': 0.92, 'score_ci_low': 0.78, 'num_of_instances': 100} | 1 | a100_80gb |
18cb7a7e71be94d7d69c4d5ed5d5bccb298c8d4f8521127207fc697791e8f740 | 1,736,456,139,630 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.93327 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.35, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.45, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
06653883b65f48d9bef873bbef83bb4cbf151f96e557df6f9523f8484d01208a | 1,736,456,145,345 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.190135 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
425d198abe021c3527d2ef1824ca2b52036f4835e5c507377c2ff4d2b8e5553f | 1,736,456,151,206 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.128911 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.79, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
20ef4227253e16f0bf3fec0426a83176734690cfb95ec2bf1b2424281de3ca4e | 1,736,456,157,370 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.036416 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.55, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
b49ace8fae6a72266574a1b0496881d2f2ddd52410f6e4dc6ffad0f160e0dafd | 1,736,456,167,143 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.94132 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
5196f79c653c4cf9f7f0651094fcf6cfa32007e9075b6722d184a7caf11f38ca | 1,736,456,179,407 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.770642 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.57, 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.57, 'score_ci_high': 0.66, 'score_ci_low': 0.47, 'num_of_instances': 100} | 1 | a100_80gb |
6c014c86ada5820b0eecab957ac317d4f087c6c4912e442501db240d8461a9b6 | 1,736,456,093,586 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.253075 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
28cc4e780b9133117a5e386adab51eb9dec0c66eafff6edc5b7d9c58a392f004 | 1,736,456,103,233 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.747221 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.752634219778273, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.752634219778273, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
5c072e5896bfd598ec295f2a418a141966865924e44fb59011628d7d2fdcdac3 | 1,736,456,167,642 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.806668 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.74, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
be4f1382e25047755269cdae8bca717ec7f0f0b77da8bd6927a46985e9e1b481 | 1,736,456,124,851 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 20.477413 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.81, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
7bdd1c2fd2aecbb15be681f9091193b3ea5cfbb0232b0e1a29fc3157ea5b0aaa | 1,736,456,130,635 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.032057 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.2, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.29, 'score_ci_low': 0.13, 'num_of_instances': 100} | 1 | a100_80gb |
c65269bd7b7d032d602598f2a35e13473d33287c1500e30b27369acc814b1789 | 1,736,456,142,618 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.engineering | card=cards.mmlu_pro.engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.438957 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.23, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.32, 'score_name': 'accuracy', 'score': 0.23, 'score_ci_high': 0.32, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
baf9c13f27da662d80c83b61402d6c6cce3b8dbce7e5962a4ea244211c8aa0a5 | 1,736,456,147,307 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.623824 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
1aed17a8cfefcf95d65b8dde27f8451bb09c3fb6c18855913124b935662edaab | 1,736,456,152,855 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.economics | card=cards.mmlu_pro.economics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.005547 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.5670980726228083, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.5670980726228083, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
7defa16765f2941dd347170317fa229fc1bad695c5cc1cf2b286212244771350 | 1,736,456,158,188 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.477125 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
089d4f3b4e4656119eb7359f9ce0793b7e7b49cc3e72a5c9a790c261794539e8 | 1,736,456,175,772 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.world_religions | card=cards.mmlu.world_religions,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.2276 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
5cc45b76707ca48785ac53240c5183fa76585fe2d3dc87a462fdb3ffe8327319 | 1,736,456,106,327 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.943037 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
a7b771741b7992bb03ebbfeb0e8f59cfc39a39b1fcf79efe567e6597f4fae379 | 1,736,456,113,565 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.709056 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.77, 'accuracy_ci_low': 0.68, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.77, 'score_ci_high': 0.84, 'score_ci_low': 0.68, 'num_of_instances': 100} | 1 | a100_80gb |
df1331a716070f830f76673fa05e4e71f92664d34905eb13bf3a2d9ecdef798c | 1,736,456,120,222 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.927625 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.51, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.51, 'score_ci_high': 0.61, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
de202436a1fe038bdbf0168b20772b0bf35196dca143aae47d1a93fc1d69e6a1 | 1,736,456,124,369 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.504515 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.64, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
62489245c9cefa9f55e8df48ec49cbe024d53ad55c0a8d450bcdc534df9f120b | 1,736,456,131,686 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.762018 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.81, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.87, 'score_ci_low': 0.72, 'num_of_instances': 100} | 1 | a100_80gb |
842b4db99292377c3d56b74386db18f42e768d4bd92d38163a017430230cbed8 | 1,736,456,137,976 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.520446 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.78, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.78, 'score_ci_high': 0.85, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
06cdf679eaa884ae52b70754ae282f30c59dcab2e16253a423747dc0f5360cac | 1,736,456,143,067 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 2.427266 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
13942d5b1486a355cf72ff011e1a91bbf4a717f56154ce48856b8b5ca52f679f | 1,736,456,158,077 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.63014 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
1c853674f180831e8e007d2293c215e7f66b2cea05e4b3ea2a24263a5b37bc5f | 1,736,456,146,720 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.conceptual_physics | card=cards.mmlu.conceptual_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.173303 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.33, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.42, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
9b9a28086737d3e54c16aa00572cb60a9fb8bd76cd5850913864061ef1a56244 | 1,736,456,151,835 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.601044 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.62, 'score_ci_low': 0.42, 'num_of_instances': 100} | 1 | a100_80gb |
806756ecbf7bcb2fbbec26980b52346266fad1f872b4a1e2c210989abfae9d89 | 1,736,456,084,320 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.physics | card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.028456 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.15, 'accuracy_ci_low': 0.09, 'accuracy_ci_high': 0.23, 'score_name': 'accuracy', 'score': 0.15, 'score_ci_high': 0.23, 'score_ci_low': 0.09, 'num_of_instances': 100} | 1 | a100_80gb |
492b071573b3a6bbc0b9b4a3d8560d3ed47ac8963e98348cbc5d6e106b6c48d9 | 1,736,456,092,995 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.022907 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
fd2f2d1f5cbfedaad04c0a2b399359a1d3a9a7b4f9c311c19cd051030721bab7 | 1,736,456,097,293 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.3996 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
1298a337426a049af8d91ad995f68bc557a49700b408a64d8a36cb0dd3415196 | 1,736,456,100,444 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 2.611089 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
81a9a90b37d7aaa247c19fa3753177d8cad93623a09440c317cde083767c41bf | 1,736,456,107,347 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.383146 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.6676473737438486, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.6676473737438486, 'num_of_instances': 100} | 1 | a100_80gb |
872829733ed1748af9db8541ed0a1dd04c5add0d3ae7576431495edeaaaf3472 | 1,736,456,122,298 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 14.119248 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.2, 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.29, 'score_ci_low': 0.14, 'num_of_instances': 100} | 1 | a100_80gb |
4b03583745b260e6aa9d67733fc3147f32a9894780ac549f1011e3d67f5be73d | 1,736,456,126,953 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.507177 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.75, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
2f232c1bffa719e5c79369fe1b6306ad4403d8328adddfc244b015d0b05a6f3c | 1,736,456,133,179 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.731011 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
5191b157df084f59c55badc0acc5d1dd8af2855df15cc8d6ee08f311fe822b10 | 1,736,456,141,763 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.728659 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.43, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.53, 'score_name': 'accuracy', 'score': 0.43, 'score_ci_high': 0.53, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
8d97ea76b86e5b1f3a9c935fadf20d06fe569bcae949db19ed8c9748c587bea7 | 1,736,456,149,451 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.math | card=cards.mmlu_pro.math,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.870559 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
190753c457fb048c8db59341312032193fd9ce1a4ffc11c50465dcc7b76d0c13 | 1,736,456,087,168 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.372392 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.8372929013367616, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.8372929013367616, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
18562e73e1e06f6642dfba45e72680fcd0fc91f0d89469bc2ec6bfafafaa9175 | 1,736,456,094,433 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.960022 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.38973380258259815, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.58, 'score_ci_low': 0.38973380258259815, 'num_of_instances': 100} | 1 | a100_80gb |
baa2595c43d8c4d6bcedeeabe0d3151d0b107ea9fe52d2c7704b16fd6bc9b309 | 1,736,456,099,168 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.519618 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.41, 'score_ci_low': 0.23, 'num_of_instances': 100} | 1 | a100_80gb |
ee0dc9fbee461bee76f49840ab6c45a2f5604927834a61eedb9d9f4f08272d10 | 1,736,456,103,428 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_physics | card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.706579 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
b5f6085871488fe70b9e459b449598c832ea850e68ee0b191d84db81c654d9b7 | 1,736,456,110,296 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.physics | card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.268895 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.07, 'accuracy_ci_low': 0.03, 'accuracy_ci_high': 0.13, 'score_name': 'accuracy', 'score': 0.07, 'score_ci_high': 0.13, 'score_ci_low': 0.03, 'num_of_instances': 100} | 1 | a100_80gb |
be81b3f9c9ab22c15bf1b8b6377c28f9efca53001765f4ff2d362361f0b834c8 | 1,736,456,114,666 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.689085 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
084e89034a50bb78607c9fdeb1b28dd23aa200ef4d2b63b0905d7cb08d56530b | 1,736,456,122,413 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.ai2_arc.arc_easy | card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.173781 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.87, 'accuracy_ci_low': 0.79, 'accuracy_ci_high': 0.93, 'score_name': 'accuracy', 'score': 0.87, 'score_ci_high': 0.93, 'score_ci_low': 0.79, 'num_of_instances': 100} | 1 | a100_80gb |
8fe14b85c375b35a4e4ac005456de8758267350fb24579e4c20f1eb62b6300e5 | 1,736,456,130,623 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.503324 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
e45c22b3775cb5822e7f7bdf4bc6535cbee30188dc9c7423f18904962d6e30d7 | 1,736,456,135,883 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.886775 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.83, 'accuracy_ci_low': 0.75, 'accuracy_ci_high': 0.89, 'score_name': 'accuracy', 'score': 0.83, 'score_ci_high': 0.89, 'score_ci_low': 0.75, 'num_of_instances': 100} | 1 | a100_80gb |
36f8b2ecb5d7a7e993ac8d31ff00da146342dc2ad8d6057325696164a0336b91 | 1,736,456,140,996 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.496143 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
c40fd4e696ce562b769dc619f7d6ddc4ff451a117b93f8e5631e05888c660d78 | 1,736,456,011,123 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.5485 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
648cb937deddfc5ebb056e70eb7cdd2fa779f482f2143261277603754db8269c | 1,736,456,026,219 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 14.432779 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
ced053aee626f02a1c567eba71b967e912b72a5746b129c6fc8f2ac4b8ceec4c | 1,736,456,033,123 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.818842 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.72, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
da72496f88eb95c93cd893090106951467b7eb7ea70b9ca975a83a32389a410f | 1,736,456,037,876 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.62149 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.21, 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.3, 'score_name': 'accuracy', 'score': 0.21, 'score_ci_high': 0.3, 'score_ci_low': 0.14, 'num_of_instances': 100} | 1 | a100_80gb |
a67ea458ba260e1b1fefcdc308db57ffd4914f2c606eb65c0404dc285d79fa3f | 1,736,456,049,762 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.engineering | card=cards.mmlu_pro.engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.338367 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.4, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
dde6625c189b68a2877e990d726bf8c0abd2d5b443c1e8efcef4ecc4221fc539 | 1,736,456,061,314 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.047015 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
8ac50f71ca8371173e986472906f50761a85a6fb73d9abfd8231d56b1f2d0074 | 1,736,456,074,287 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.348437 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.3940452774063561, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.58, 'score_ci_low': 0.3940452774063561, 'num_of_instances': 100} | 1 | a100_80gb |
3b9bd6aa76f4caa51de6457bdba498d06cf894de80850ae0870cfb1e9e8a9389 | 1,736,456,083,526 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.561738 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.8, 'accuracy_ci_low': 0.71, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.71, 'num_of_instances': 100} | 1 | a100_80gb |
3448752a95e10c21de539d15c82fad6d100d5948cacca5ae077818c2a3f46b33 | 1,736,456,101,558 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 17.177184 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
764250fe96356ae83c64970f852b0257b162f125e40092e5b1f572bc364c8d94 | 1,736,456,107,949 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.953957 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.54, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
e8cd2f7990305e02ff6dbab25b5a527df08fd19b4ed30237ed592cb99ecbede0 | 1,736,456,017,764 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.other | card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.155484 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.36, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
5f685cf0ac2404568dd7c5bfa43507a90a029a3c8d2207baf4f6b1cf4264818a | 1,736,456,025,620 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.067219 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
9d679edf6223afde09a6c094467836b7aff0a0846966d22f11bbb76d7c1df33b | 1,736,456,033,592 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.10528 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.92, 'accuracy_ci_low': 0.85, 'accuracy_ci_high': 0.97, 'score_name': 'accuracy', 'score': 0.92, 'score_ci_high': 0.97, 'score_ci_low': 0.85, 'num_of_instances': 100} | 1 | a100_80gb |
15194b734f7b1ed9a3a03570b2a596cab84adbc9dea3ba8d3c8c4effb6a89786 | 1,736,456,045,298 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.801249 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
e67e0420b0fcd4024aee4ca6f3848d2b8a3a1fa4d41ac1d8a86e7c11a0860648 | 1,736,456,052,502 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.22573 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.31, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.41, 'score_ci_low': 0.23, 'num_of_instances': 100} | 1 | a100_80gb |
b34de24fcd96b75bbe785a50cdf938b7821ecd2cda4d8819bf3bfc9447f923d0 | 1,736,456,064,335 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.42031 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
c34d22e96ce8922933f1d404a508728e19e6c0d6d6620a45d0f8335bd563641c | 1,736,456,060,205 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.016584 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.83, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
27c77b813775d87d27aa41366e89b50463d3c695cb4c34994463d7876351c5f9 | 1,736,456,074,523 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.665115 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.6, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.6, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
0589e52dde6443b954cf7695af85bf18dfcb330ff057a7fef70a12c06d6ffe69 | 1,736,456,087,770 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.448085 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.19, 'accuracy_ci_low': 0.12, 'accuracy_ci_high': 0.28, 'score_name': 'accuracy', 'score': 0.19, 'score_ci_high': 0.28, 'score_ci_low': 0.12, 'num_of_instances': 100} | 1 | a100_80gb |
8fa13792e9723b70228c57ce50f2e71ace7914cb512ee33f4b8f0669f83ccaa1 | 1,736,456,099,675 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.809836 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.38, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
4a89ac4059509c768e9508585f7eacfed47c4ec206f090b15bf3fe231f026740 | 1,736,456,009,087 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.173644 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
471ca33ba62cb76c3e55764e0432112f3b1d2e67aa5dca955e24a91ae91f8c75 | 1,736,456,017,548 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.608854 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.43, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.43, 'score_ci_high': 0.54, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
113620ad71aa86663279284dddf58f0babeff6e819b4706a791567445311b7ef | 1,736,456,024,111 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.633331 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.79, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
861dae09aef8c0efb55539f47ae315f4d2d30ae96f1bf73ae78c23c1002dcb44 | 1,736,456,035,043 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.151788 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.55, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
7422dd6f931662acfe0d8bd44ba286e3137309d17d4eea699b6fbdc656b139b2 | 1,736,456,043,502 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.421089 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.8280222281295914, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.8280222281295914, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
91e930bbadf95636d023d57f978d72a2011e45eff666b71d0f2962582bf54270 | 1,736,456,053,758 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.39465 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.3, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.3, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
cb16ac4b71db7d66537a21d67d346bb9ae013eb25d0c79c1536c0f256a92ef48 | 1,736,456,058,972 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.059153 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
3a2416924236483b2e72914a7a7dc9f54462cd3f95f71d5e611beeaaee3655ab | 1,736,456,069,510 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.international_law | card=cards.mmlu.international_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.923741 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
d563463144533130da7a10c5751dace022fbd6823320c7b5ffedb39ec10537cc | 1,736,456,077,625 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.035816 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
0c1ffecd4d7587eaa43770934b7c733fd1f2b82e7181aef45846c66e7faa956c | 1,736,456,086,396 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.013835 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.77, 'accuracy_ci_low': 0.69, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.77, 'score_ci_high': 0.85, 'score_ci_low': 0.69, 'num_of_instances': 100} | 1 | a100_80gb |
a4055cf61023212ba2292ed5852131ca1b40b6417f3425f9a11c2feed580347d | 1,736,456,020,285 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.056086 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
1c71a8380d71334ea209076c25c9315be5c2ef510d9957fc40b593211337333d | 1,736,456,026,825 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.081747 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.47, 'score_ci_low': 0.28, 'num_of_instances': 100} | 1 | a100_80gb |
a080c2899d031dbb80205cdcd7f64906b4477984dd4bc9a41c886d45ad0d88c7 | 1,736,456,031,078 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.62833 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.43, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
323358f3de5537a8b5366ef7e477ba21db5779b3c2aa19e9d5e364593e36d9e9 | 1,736,456,038,389 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.692959 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.83, 'accuracy_ci_low': 0.74, 'accuracy_ci_high': 0.89, 'score_name': 'accuracy', 'score': 0.83, 'score_ci_high': 0.89, 'score_ci_low': 0.74, 'num_of_instances': 100} | 1 | a100_80gb |
272188ee50d632a10708863a4ebbe29e500c63a91ae5eb5b4c2b5f8a20e28d08 | 1,736,456,042,350 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.197391 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
e35d721556a563a920613df984bb810a314a5ddd11d33528a58bec32db7c65f9 | 1,736,456,049,007 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.151748 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.47, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
3a6fdaf429bb37c7118f4e4c08bcb3d9317914083e91c6d42221bb6ff6c4458b | 1,736,456,054,827 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_greek_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.64672 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.59, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
ad2e013b97106823767f60c4f4932927be57d0e90ed86cda3401e47ee19f6d25 | 1,736,456,066,691 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.235104 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
f0586950cf1c8ca85874785a7370dbc07f024a917d939fa71c5e8886baf97cc6 | 1,736,456,071,127 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.282847 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
c025d9fcc1ea765fcadd15a51b0144aa8823a53325d7c7085ec2c63409ced32b | 1,736,456,076,709 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.ai2_arc.arc_challenge | card=cards.ai2_arc.arc_challenge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.063009 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
3ea522af6669ad8b7d84e44c35937823a54841b8392a5f17890e8af37131c9cd | 1,736,455,965,391 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.815254 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.39, 'accuracy_ci_low': 0.30638474581218117, 'accuracy_ci_high': 0.49, 'score_name': 'accuracy', 'score': 0.39, 'score_ci_high': 0.49, 'score_ci_low': 0.30638474581218117, 'num_of_instances': 100} | 1 | a100_80gb |
7c5ec72ba35e9d6761a791a3cbd077e7ca6d89e0914362a00b59bb1fc0010383 | 1,736,455,975,844 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.379534 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.81, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
53efecb41f5dbde60da6bde2d66ac76df1d01984ae6090482edff50b5e8865ee | 1,736,455,984,538 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.349664 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.6075815615474596, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.6075815615474596, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
e6c160ebab5bdce99a8665986ccde704c45dc9ea0e4b25c184f3139985f7bd04 | 1,736,455,990,990 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.192467 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
e4f6afb34be491fab5dcd2adb1f01e25e4ca2c79d0299d0f11c7f8af0eeb8f09 | 1,736,455,995,277 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_capitals_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.499481 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
179724b3df9cf8afef38c673e81d035c90fe21c39fbe4229cec1daa8709767d9 | 1,736,456,000,395 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.537529 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37370984545172975, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37370984545172975, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
7bbbb6dff9defd0910abceb650c7e7e711dad49315bdae4ed427bb2f5b5e6599 | 1,736,456,008,779 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.77266 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.78, 'accuracy_ci_low': 0.69, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.78, 'score_ci_high': 0.85, 'score_ci_low': 0.69, 'num_of_instances': 100} | 1 | a100_80gb |