run_id
large_stringlengths
64
64
timestamp_utc
int64
1,736B
1,737B
timestamp_day_hour_utc
int64
1,736B
1,737B
model_name_or_path
large_stringclasses
5 values
unitxt_card
large_stringclasses
76 values
unitxt_recipe
large_stringlengths
330
400
quantization_type
large_stringclasses
1 value
quantization_bit_count
large_stringclasses
1 value
inference_runtime_s
float64
1.18
295
generation_args
large_stringclasses
1 value
model_args
large_stringclasses
5 values
inference_engine
large_stringclasses
1 value
packages_versions
large_stringclasses
1 value
scores
large_stringlengths
174
242
num_gpu
int64
1
1
device
large_stringclasses
1 value
43e6f6fb631e5c47cc109a996068baa9c1cdb8759c86ebf20718cdeab6e0c8a6
1,736,456,359,755
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.machine_learning
card=cards.mmlu.machine_learning,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.72382
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43561760629112994, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.43561760629112994, 'score_ci_low': 0.25, 'num_of_instances': 100}
1
a100_80gb
f0d9a653d30a428c073436e4ae9e5dc6d6d3a335cd2fe5def5d7c6f29394569c
1,736,456,369,625
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.professional_accounting
card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.705451
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.4, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.51, 'score_ci_low': 0.31, 'num_of_instances': 100}
1
a100_80gb
ba23aef7e59d0f01e9ecef2573d64a7d280b52b578d54e48f8ffa9ddd49d0398
1,736,456,376,066
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.health
card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.496366
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.24, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.33, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.33, 'score_ci_low': 0.17, 'num_of_instances': 100}
1
a100_80gb
b5186cb82e9e50a665b214575dda234b7e82e85bc55efae5a5d6f64d6e160564
1,736,456,283,134
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
12.277388
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100}
1
a100_80gb
d0ffe08ee7ce2bd6bd9c4a49dbafc7fcd1ec6c7837eab4c1f10d6cad54b0e853
1,736,456,363,676
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
12.046299
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100}
1
a100_80gb
7180950f5f189377c8e4d592c61a15e8393e8bf4d28da94310369d22ff9d8a94
1,736,456,288,990
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_geography
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.31812
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.56, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.45, 'num_of_instances': 100}
1
a100_80gb
594e7bb00b3724c8bab4a55b5d60af4a7865e654c65a7bf0f63fe0f67daafdc5
1,736,456,293,467
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_government_and_politics
card=cards.mmlu.high_school_government_and_politics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.94416
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.7, 'num_of_instances': 100}
1
a100_80gb
6230005784f93b3a1198f21813daced7aa124099a3be531e6c8473f79d265738
1,736,456,299,363
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.physics
card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.258107
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.09, 'accuracy_ci_low': 0.05, 'accuracy_ci_high': 0.17202120525548595, 'score_name': 'accuracy', 'score': 0.09, 'score_ci_high': 0.17202120525548595, 'score_ci_low': 0.05, 'num_of_instances': 100}
1
a100_80gb
aba9d5c18a4e0ef46e3a0f1536aaf376cb91802e5a9113f547a2df9b59a92e51
1,736,456,309,035
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.professional_accounting
card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.999926
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.49, 'num_of_instances': 100}
1
a100_80gb
6438ab693cde1dd161920803e3fc008477bb8d95c67b49e28fa221a91bcc63c3
1,736,456,342,322
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.prehistory
card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
32.314101
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.83, 'score_ci_low': 0.67, 'num_of_instances': 100}
1
a100_80gb
3414ed9c89bdba6a1ef5a18dd16e39b6639aa1c41b9f4cd66b45c64affaa5fb3
1,736,456,350,776
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.us_foreign_policy
card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.553101
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.84, 'accuracy_ci_low': 0.76, 'accuracy_ci_high': 0.9, 'score_name': 'accuracy', 'score': 0.84, 'score_ci_high': 0.9, 'score_ci_low': 0.76, 'num_of_instances': 100}
1
a100_80gb
06c256081967d74c45063607cc1e34dbbcbf3155ac2093c89a4e571a3e02204c
1,736,456,373,429
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.marketing
card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_greek_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.02422
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.8, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.7, 'num_of_instances': 100}
1
a100_80gb
955d0863e6818dac1a521fd1f10c0f49477c24103ced84de7f05e6368b2e5a7c
1,736,456,381,268
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_biology
card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.075741
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.76, 'accuracy_ci_low': 0.68, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.68, 'num_of_instances': 100}
1
a100_80gb
4f04733caca1397c29f0ce615565523d5f00a65ef5e7f40a36bdf02ee641e27b
1,736,456,266,240
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_chemistry
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.131114
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.41, 'score_ci_low': 0.23, 'num_of_instances': 100}
1
a100_80gb
ae056cca325a36432a582c542919a1e9d7852364c60c12f7b42e0eebb260248c
1,736,456,276,333
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.international_law
card=cards.mmlu.international_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.527507
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.86, 'accuracy_ci_low': 0.78, 'accuracy_ci_high': 0.92, 'score_name': 'accuracy', 'score': 0.86, 'score_ci_high': 0.92, 'score_ci_low': 0.78, 'num_of_instances': 100}
1
a100_80gb
61b68cf6144035ad18f8f5f30b15909a9b9e47b1b22391fe2f53cfe0691825c0
1,736,456,286,412
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.other
card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.953254
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.19, 'num_of_instances': 100}
1
a100_80gb
0f294566380ff0c634b6f2711877a4c6da66f9c8e8acba0d4c72c22f8a321e25
1,736,456,348,854
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.other
card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.236511
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.31, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.4, 'score_ci_low': 0.21, 'num_of_instances': 100}
1
a100_80gb
569be690806a58d468a45a7e3cd2eb24aabbc58593e44ab4b87fb526d3a90655
1,736,456,296,205
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.formal_logic
card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.756039
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.43, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.53, 'score_name': 'accuracy', 'score': 0.43, 'score_ci_high': 0.53, 'score_ci_low': 0.34, 'num_of_instances': 100}
1
a100_80gb
55d4ad5bbcd9873239927c01962d77865831fd850e78f4165d5ad0b3237fa1d9
1,736,456,306,963
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.chemistry
card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.812266
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.1, 'accuracy_ci_low': 0.05, 'accuracy_ci_high': 0.17, 'score_name': 'accuracy', 'score': 0.1, 'score_ci_high': 0.17, 'score_ci_low': 0.05, 'num_of_instances': 100}
1
a100_80gb
b79901975dcddd2b271c9de77471beabbb91cf5e01eb738ffe7d5ef854b18f8b
1,736,456,338,066
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_mathematics
card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
30.356357
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100}
1
a100_80gb
571e093cade360843e06e529622508d6dba01650a34b241af9162ffba284c17c
1,736,456,354,264
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.openbook_qa
card=cards.openbook_qa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.308349
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.74, 'score_ci_low': 0.56, 'num_of_instances': 100}
1
a100_80gb
e06e3137158ead81535fd4eb6c9f4a529ef57b584e9801224ba59d0ae3adfece
1,736,456,362,946
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_mathematics
card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.117687
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.34, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.4226363972358115, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.4226363972358115, 'score_ci_low': 0.26, 'num_of_instances': 100}
1
a100_80gb
a93dd64f3980d7b41f42d31f4bf83ba0ed4fec9dfba0c2e0118685bd489a88b6
1,736,456,368,286
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_computer_science
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.262445
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.39, 'score_ci_low': 0.22, 'num_of_instances': 100}
1
a100_80gb
d2ddea3ca252511707e67c1f14824022e8eeab09ae2e3064ff054cd50c2ae419
1,736,456,233,561
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_physics
card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.935892
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39, 'score_ci_low': 0.21, 'num_of_instances': 100}
1
a100_80gb
8f38804c414bb9129e0e6529ec3712005e5f1716d497988c676c29831c80b66c
1,736,456,241,346
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.public_relations
card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.889752
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.68, 'accuracy_ci_low': 0.5777266103823754, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.76, 'score_ci_low': 0.5777266103823754, 'num_of_instances': 100}
1
a100_80gb
27fc068e648fbb8f2b3d65d575f96398e553180a5bf222adbae8cf1b9b3c4ecd
1,736,456,249,545
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_biology
card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.449292
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100}
1
a100_80gb
3322037f4aff953bbcf7ae92aeacce5ab9e1c2e3a2e15f67a1ff3f870ead8cdb
1,736,456,255,392
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.physics
card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.010545
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.1, 'accuracy_ci_low': 0.05, 'accuracy_ci_high': 0.18, 'score_name': 'accuracy', 'score': 0.1, 'score_ci_high': 0.18, 'score_ci_low': 0.05, 'num_of_instances': 100}
1
a100_80gb
456ce04c9bfc5deabf629008d2aeaf10893ed37eae076b925e0448268c863884
1,736,456,260,525
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.other
card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.485805
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100}
1
a100_80gb
2fbaebdd8603c29283cfdea0a1a876ac3b359c49c4285b1aa392d92275e363c1
1,736,456,272,787
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
11.544485
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.66, 'accuracy_ci_low': 0.5742804386466187, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.5742804386466187, 'num_of_instances': 100}
1
a100_80gb
2721811eb5fea313de2297d2a29126a7871a7aeb95944acef39956879e26f142
1,736,456,347,559
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
32.367373
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.71, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.61, 'num_of_instances': 100}
1
a100_80gb
0ec89ca5b43b60394920848a32d14cc271674f68ba79e6c145f6e728c96b3689
1,736,456,280,016
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.abstract_algebra
card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.452695
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.24, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.32, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.32, 'score_ci_low': 0.16, 'num_of_instances': 100}
1
a100_80gb
ca3d0d97d54a9872e5f25b4f02685de43a198a1af57266739eeaa7263f65da9c
1,736,456,309,865
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_european_history
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
29.317161
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.72, 'accuracy_ci_low': 0.6202780739683089, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.81, 'score_ci_low': 0.6202780739683089, 'num_of_instances': 100}
1
a100_80gb
b1cb08e32a9e2872f571578f883772f9e9171c14e94ce5d68f123770c9e27bc0
1,736,456,357,720
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.philosophy
card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.048779
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100}
1
a100_80gb
f2c2ce301b538d1fb8fb8405c96df84dd8f4796f9accf901735fe0d518097a43
1,736,456,263,475
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_chemistry
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.195882
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.37, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.47, 'score_ci_low': 0.28, 'num_of_instances': 100}
1
a100_80gb
73bfdea290ae3fac3dbb05d34437210d517d7ff40681addba975a09b6012a2b9
1,736,456,267,619
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.elementary_mathematics
card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.550938
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.24, 'accuracy_ci_low': 0.16450043884336127, 'accuracy_ci_high': 0.33, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.33, 'score_ci_low': 0.16450043884336127, 'num_of_instances': 100}
1
a100_80gb
7012e9433ab25ef8879d0a9929660fd6ea0887e7e116417813df5f46ed00ec81
1,736,456,271,311
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.management
card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.171109
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.85, 'accuracy_ci_low': 0.77, 'accuracy_ci_high': 0.91, 'score_name': 'accuracy', 'score': 0.85, 'score_ci_high': 0.91, 'score_ci_low': 0.77, 'num_of_instances': 100}
1
a100_80gb
f2212b9f7a0322e98189a6c70db018636d9dcf38e77f0ea3f901fd8a8a96a6b1
1,736,456,276,460
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.health
card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.640496
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.31, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100}
1
a100_80gb
1c1a7e602ca95542dbb34ee9112fc80685e024ecc3b67bcf8ce4a3c7e27a73a9
1,736,456,297,556
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.professional_law
card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
20.399922
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100}
1
a100_80gb
510a552f310bc8e4e9d3377c6313aea5bf38d3435fed423f43f9c4b32be6199d
1,736,456,325,276
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_us_history
card=cards.mmlu.high_school_us_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
24.536512
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.9, 'accuracy_ci_low': 0.83, 'accuracy_ci_high': 0.95, 'score_name': 'accuracy', 'score': 0.9, 'score_ci_high': 0.95, 'score_ci_low': 0.83, 'num_of_instances': 100}
1
a100_80gb
75e9f95aeeafcd6b12b67e4af3c807d7d22960743c6e35f93c3a805aa5e14954
1,736,456,340,086
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.prehistory
card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
11.241342
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.52, 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.62, 'score_ci_low': 0.42, 'num_of_instances': 100}
1
a100_80gb
3ce625a91bfc83022269e324844fdaaa0d9afa7442d463e9d9711522fb0a53e5
1,736,456,344,465
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.machine_learning
card=cards.mmlu.machine_learning,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.806029
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.35, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.44, 'score_ci_low': 0.25, 'num_of_instances': 100}
1
a100_80gb
ff837cc8c4ac5e005bd3560764add2dc6f00cfa89b665dc9c6729b6606e1735b
1,736,456,351,353
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.medical_genetics
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.340724
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.72, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.63, 'num_of_instances': 100}
1
a100_80gb
ad0fc7e1ecb6959f23774665917ea3d2002152c4cbb4acc58181c4c099ef56b6
1,736,456,356,891
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.economics
card=cards.mmlu_pro.economics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.837383
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.34, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.4432207288887548, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.4432207288887548, 'score_ci_low': 0.26, 'num_of_instances': 100}
1
a100_80gb
2226fd73c61ddb151126537fbad0ef368432ea52e1cb2554630ab6a3dc0516af
1,736,456,244,348
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_macroeconomics
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.757604
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.62, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.72, 'score_ci_low': 0.52, 'num_of_instances': 100}
1
a100_80gb
cac9d21d5265c06a3aace50ac5e893572c7af146e4007e00d36792b99165c3b5
1,736,456,281,153
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_macroeconomics
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.899845
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.59, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.6812610162924546, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.6812610162924546, 'score_ci_low': 0.49, 'num_of_instances': 100}
1
a100_80gb
ff898dc3c3d6db8365612322102874bcbf1ea69ccda6d8a010f8b5da25b76e76
1,736,456,248,910
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_physics
card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.724066
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.25, 'accuracy_ci_low': 0.17575267789416785, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.35, 'score_ci_low': 0.17575267789416785, 'num_of_instances': 100}
1
a100_80gb
c74b604ebb0ce8466fc777e405c7f9c2393b65c052f6c3be758f790c6e68a17c
1,736,456,253,143
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.astronomy
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.611609
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.55, 'num_of_instances': 100}
1
a100_80gb
20ecc84e7ff06661edccc28c4e8737006768b9fbda58431a8ee9e4f94c0e2e99
1,736,456,258,300
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.security_studies
card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.556168
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.6, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.6985253010054066, 'score_name': 'accuracy', 'score': 0.6, 'score_ci_high': 0.6985253010054066, 'score_ci_low': 0.51, 'num_of_instances': 100}
1
a100_80gb
c3ebce5723d30cb52a316158e4a02940fc2eb9e9b862c5d2be2d9a2887db971a
1,736,456,348,614
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.security_studies
card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
38.588675
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.77, 'accuracy_ci_low': 0.68, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.77, 'score_ci_high': 0.85, 'score_ci_low': 0.68, 'num_of_instances': 100}
1
a100_80gb
0f8093828a3b676f9797f4d3d847b688a4b8fa8adb06b8700cecdc1d375d7d87
1,736,456,273,109
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_greek_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
14.004528
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100}
1
a100_80gb
02a28503778b7006cc3c1c5d14b558235873345703bf4b2b758fff643494f092
1,736,456,287,872
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_us_history
card=cards.mmlu.high_school_us_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.962347
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100}
1
a100_80gb
ec2651e528c6c2ed8ddc67e53d06ea6cc5fefc14e7ebaf9dacfa66179d2dc67c
1,736,456,295,819
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.sociology
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.153618
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.71, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.61, 'num_of_instances': 100}
1
a100_80gb
8178b758ccd65a79aaac1245500924738434c14b6e56f70282eb481e52965481
1,736,456,306,058
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.philosophy
card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.373963
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.37, 'score_ci_low': 0.19, 'num_of_instances': 100}
1
a100_80gb
c7bb5d12c796071b7079cb4dccdeb83a5d5166b429dceb8c32bba8909c908242
1,736,456,274,179
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.business_ethics
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.140677
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.68, 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.59, 'num_of_instances': 100}
1
a100_80gb
8b09f0317759ff397f205c5bf2af497e774d4e0388cd55ec682edbaddcaf92ca
1,736,456,278,653
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_microeconomics
card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.596622
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.54, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100}
1
a100_80gb
74dcec18c9365b6fc3c481d8d57f3cdb778eb70c443cd170c582a38738092895
1,736,456,282,947
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.jurisprudence
card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.716969
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.56, 'num_of_instances': 100}
1
a100_80gb
e693a6cf79b5116bed66b89f9b86c54657d80b85d9078a767c7660b6b7cd3dfa
1,736,456,287,402
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_biology
card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.85494
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.81, 'accuracy_ci_low': 0.73, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.88, 'score_ci_low': 0.73, 'num_of_instances': 100}
1
a100_80gb
64b9036f986711e8b21bf6b2eb90d13bc67e022bbfe33aebdca10f04ceeb4c66
1,736,456,291,581
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.nutrition
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.601859
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.55, 'accuracy_ci_low': 0.4554741621440572, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.4554741621440572, 'num_of_instances': 100}
1
a100_80gb
cf57a6d19518bee4e5996f4bfe5fe65c6f9cc7e1d44e0257d9f9bc49622907c1
1,736,456,299,373
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.physics
card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.413819
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.14, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.22, 'score_name': 'accuracy', 'score': 0.14, 'score_ci_high': 0.22, 'score_ci_low': 0.08, 'num_of_instances': 100}
1
a100_80gb
b02460b9e5f4b28c676ff4b5353d95bd16db7bbb97d4a766f7d52edb81e40268
1,736,456,303,413
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.human_aging
card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.367444
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.49, 'accuracy_ci_low': 0.39412998186786197, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.59, 'score_ci_low': 0.39412998186786197, 'num_of_instances': 100}
1
a100_80gb
1d1eee383776c76a0e1521d15b88b8e62ab8666a0fe23764a316d2ee29dd08b5
1,736,456,323,997
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_statistics
card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
20.052728
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.36, 'score_ci_low': 0.18, 'num_of_instances': 100}
1
a100_80gb
f82f0aa8985515b5ab0f0e5d1ae3ce09e4d57dd427c7d4798a6583db04ad58ed
1,736,456,341,454
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_computer_science
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
16.815514
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.43, 'score_ci_low': 0.23, 'num_of_instances': 100}
1
a100_80gb
08f525465462bf8742ccac60181d78e79c60b21b4a984db4b5bbe5e36e764573
1,736,456,346,533
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.moral_scenarios
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.494803
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.26, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.36, 'score_ci_low': 0.19, 'num_of_instances': 100}
1
a100_80gb
3d74db6bf286df1a981041e7ba83dabb5df87ad7e78c3ef64ee1cdbc4978ff54
1,736,456,250,499
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.ai2_arc.arc_easy
card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.575458
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.92, 'accuracy_ci_low': 0.85, 'accuracy_ci_high': 0.96, 'score_name': 'accuracy', 'score': 0.92, 'score_ci_high': 0.96, 'score_ci_low': 0.85, 'num_of_instances': 100}
1
a100_80gb
fd4395ae38849320158e1b246df39e5b7174d32748f831bad9805772534a2e2f
1,736,456,254,590
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_psychology
card=cards.mmlu.high_school_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_capitals_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.401183
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.88, 'accuracy_ci_low': 0.8, 'accuracy_ci_high': 0.93, 'score_name': 'accuracy', 'score': 0.88, 'score_ci_high': 0.93, 'score_ci_low': 0.8, 'num_of_instances': 100}
1
a100_80gb
e10fb8949c9593fb55e705a0e44074d3c676eab65bcadc88bc9d114ea642bbd4
1,736,456,263,148
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_physics
card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.02574
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.41, 'score_ci_low': 0.23, 'num_of_instances': 100}
1
a100_80gb
480b0850bda858fdcda42b94120e98bc33b1a7bf52309621051c91d6080f5728
1,736,456,286,654
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_physics
card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.026688
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.36, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.45, 'score_ci_low': 0.26, 'num_of_instances': 100}
1
a100_80gb
89b899e78a84e6e0505bb897cf07a8acf4b27e8c20e9fdb18035356560ae42b9
1,736,456,269,869
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.world_religions
card=cards.mmlu.world_religions,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.098086
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.81, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.88, 'score_ci_low': 0.72, 'num_of_instances': 100}
1
a100_80gb
347feda6b549c34ba5ef1b987daafec3ced7fe877ea1dd0a7ee18279d8afc9e2
1,736,456,276,962
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.us_foreign_policy
card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.481476
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.91, 'accuracy_ci_low': 0.84, 'accuracy_ci_high': 0.96, 'score_name': 'accuracy', 'score': 0.91, 'score_ci_high': 0.96, 'score_ci_low': 0.84, 'num_of_instances': 100}
1
a100_80gb
67b83779fb51ac1d7fb97e164c5e6147234e702fc16a40aca1497ddad655a8f1
1,736,456,291,350
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.social_iqa
card=cards.social_iqa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.Social_IQa.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.81605
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.35, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.44, 'score_ci_low': 0.25, 'num_of_instances': 100}
1
a100_80gb
668f2c8c7b622c20b2e7f7438700d228beefed206cb8373fdc85a07414bdc897
1,736,456,298,631
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.philosophy
card=cards.mmlu.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.784947
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.77, 'score_ci_low': 0.6, 'num_of_instances': 100}
1
a100_80gb
735f1a57deeb16525c003daf24759ff67750fefa423da5a061190889687343af
1,736,456,305,427
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.law
card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.062462
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.18, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.26, 'score_name': 'accuracy', 'score': 0.18, 'score_ci_high': 0.26, 'score_ci_low': 0.11, 'num_of_instances': 100}
1
a100_80gb
5f186970a5f2aabce8fa77298aafa41a68dc63db0239173398d9e9746c992826
1,736,456,342,504
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.moral_scenarios
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
36.0106
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.4, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.5, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.5, 'score_ci_low': 0.31, 'num_of_instances': 100}
1
a100_80gb
9359ea3810e7f5cbd238b2de94dacfc3a13022d5030316c57400f90d1de957c5
1,736,456,255,917
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.formal_logic
card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.53771
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.64, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.55, 'num_of_instances': 100}
1
a100_80gb
6ed1503acc8b975e54b04eba3fea9f330fc106f8a506786df5beb4c8b0de03a8
1,736,456,265,440
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_chemistry
card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.563471
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.48, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.57, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.57, 'score_ci_low': 0.39, 'num_of_instances': 100}
1
a100_80gb
69dd8c5fd5a98b11ef55811206a62285af7cc1fbd8e6263a16ca5ce5705a0eac
1,736,456,275,069
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_biology
card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.955966
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.72, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.63, 'num_of_instances': 100}
1
a100_80gb
dfb3db026c971bac90b542a0d90125d2c7d68515480bdd4f5930ec86f3e1dbba
1,736,456,279,206
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.public_relations
card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.29608
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.64, 'accuracy_ci_low': 0.5339271447720959, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.5339271447720959, 'num_of_instances': 100}
1
a100_80gb
1ec7da7f1e53a32699672dceb686600de7c8b43c2290dd670f12ffaddb65eb42
1,736,456,283,963
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.clinical_knowledge
card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.484637
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.65, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.56, 'num_of_instances': 100}
1
a100_80gb
7ad68cdffde6f74e82bd614d3fb861af4f60900ce633e7665fd513da0e9c8150
1,736,456,288,126
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_mathematics
card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.587294
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.2, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.29, 'score_ci_low': 0.13, 'num_of_instances': 100}
1
a100_80gb
639bdbe6ebaeccc3dbb22be151f96e3026dabc608d90ffe70f6dbef3d1f82192
1,736,456,292,159
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.elementary_mathematics
card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.496073
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39437495841517944, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39437495841517944, 'score_ci_low': 0.21, 'num_of_instances': 100}
1
a100_80gb
6dd738942c9679b778f2237c5dee84185b58330932c12d53b99fd805dc280760
1,736,456,295,967
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.virology
card=cards.mmlu.virology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.288778
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100}
1
a100_80gb
18c5702e5fb19087ef9f44251b8b2b37afc5696ce8d710ed7869aa5d7cd912e1
1,736,456,304,739
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_chemistry
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.215433
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.55, 'score_ci_low': 0.35, 'num_of_instances': 100}
1
a100_80gb
c1d6b2b1c38b72695baa9e3dcabf5f4d30d1ad077a5ea9713bef5199f3799f90
1,736,456,343,550
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.other
card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
38.011904
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.45, 'score_ci_low': 0.27, 'num_of_instances': 100}
1
a100_80gb
e22d9e8b19e26166e54e5607330d03693eab20cc56e3a71c7d9581e4574cf92b
1,736,456,246,450
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.sociology
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.451158
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100}
1
a100_80gb
15de370563638b1fc40d99420e85ca378d4387d9deba1d640c71a9d8845e1db7
1,736,456,250,863
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.nutrition
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.542414
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.41, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.41, 'score_ci_high': 0.51, 'score_ci_low': 0.32, 'num_of_instances': 100}
1
a100_80gb
834798dfc33b5a21a299a4701f4fab3b4ce1b75c4de275677b02d24ef32f4c4d
1,736,456,255,198
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.social_iqa
card=cards.social_iqa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.Social_IQa.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.741491
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.46, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100}
1
a100_80gb
89a7d9e00e9958f4048787dcab8717ce532d5027c8ac189926d6a1b6d9a023a5
1,736,456,265,235
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_mathematics
card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.549338
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.4, 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.5, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.5, 'score_ci_low': 0.3, 'num_of_instances': 100}
1
a100_80gb
123aadd07204ffb6f3c9ab047d7a137ef43b57ab3fe6ed4475014a1b98cff941
1,736,456,272,574
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.jurisprudence
card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.499379
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.73, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.81, 'score_ci_low': 0.63, 'num_of_instances': 100}
1
a100_80gb
b07d797e7b5425e8b4ff601253bf963ffb6bb611808fe556f0834221c4497224
1,736,456,276,983
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.philosophy
card=cards.mmlu.philosophy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.617859
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.82, 'score_ci_low': 0.64, 'num_of_instances': 100}
1
a100_80gb
21c16cc8ce9ec8b4d26d440f96d28d57dcf96fd693f6d5e2ef246fe42583f0cb
1,736,456,286,245
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.econometrics
card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.667788
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.64, 'score_ci_low': 0.45, 'num_of_instances': 100}
1
a100_80gb
df0244cd6280db061ae75e0e9e99263d5f0909904921642e07b206b09258151d
1,736,456,294,175
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.business
card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.5438
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.16, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.24, 'score_name': 'accuracy', 'score': 0.16, 'score_ci_high': 0.24, 'score_ci_low': 0.1, 'num_of_instances': 100}
1
a100_80gb
069002a7984b5b03531e2d7d4095032d1e83b6ca33220b5f2d9365f83988c712
1,736,456,299,028
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.professional_psychology
card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.179784
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.7, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100}
1
a100_80gb
e67f3d67f16f114920eab13a4d925033fd65d01ba5e91e950b0cf927548a8f44
1,736,456,307,627
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.college_biology
card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.950442
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.65, 'num_of_instances': 100}
1
a100_80gb
731b4afed99b8e97e5a79e52c84efbcc1f68b9b52282cd763762dcbc282bbd27
1,736,456,185,620
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.clinical_knowledge
card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_roman_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.504224
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100}
1
a100_80gb
8141feca57c3d65a728db06c10a49cd04b0beba46d7579e82446f99cf6d655df
1,736,456,190,167
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.high_school_computer_science
card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.968652
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.62, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.53, 'num_of_instances': 100}
1
a100_80gb
1aced06cfc68da8fe2c34d4401152c2002e70e4d54c52f80bf4d3b1f2726cd1c
1,736,456,194,935
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.prehistory
card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.428159
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.55, 'score_ci_low': 0.35, 'num_of_instances': 100}
1
a100_80gb
4ee00e44741996984173ab21030dd2e969778b37006c96c17012b4276b223a38
1,736,456,206,964
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.biology
card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
11.45643
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.49, 'num_of_instances': 100}
1
a100_80gb
0e8b8fa6c6a84d061db445e85e18044ef8766fb04b4cce8eebac012750fb426e
1,736,456,216,883
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.moral_disputes
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.771329
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100}
1
a100_80gb
69bd516361a0ff7b839676c69ad4d0cabb1163ab93c7cfb5edce91afcf6582d2
1,736,456,230,608
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu_pro.computer_science
card=cards.mmlu_pro.computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
12.818354
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.33, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.43, 'score_ci_low': 0.25, 'num_of_instances': 100}
1
a100_80gb
d2a76f215388cd5ed211e16c8e5a83481ef312024ebc72f4511b3bef1d19bff0
1,736,456,235,796
1,736,452,800,000
meta-llama_Meta-Llama-3-8B-Instruct
cards.mmlu.econometrics
card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.024727
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"}
{'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.35, 'score_ci_low': 0.19, 'num_of_instances': 100}
1
a100_80gb