run_id
large_stringlengths 64
64
| timestamp_utc
int64 1,736B
1,738B
| timestamp_day_hour_utc
int64 1,736B
1,738B
| model_name_or_path
large_stringclasses 5
values | unitxt_card
large_stringclasses 76
values | unitxt_recipe
large_stringlengths 330
400
| quantization_type
large_stringclasses 1
value | quantization_bit_count
large_stringclasses 1
value | inference_runtime_s
float64 1.1
745
| generation_args
large_stringclasses 1
value | model_args
large_stringclasses 5
values | inference_engine
large_stringclasses 1
value | packages_versions
large_stringclasses 1
value | scores
large_stringlengths 174
242
| num_gpu
int64 1
1
| device
large_stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9bd4ea33b09c8efb90538a2dfc25130b3789adc10eb0a28f6b7e47e0de82225 | 1,736,455,673,444 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.0416 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
54c9978485a5af7ad582afc91fbe1ff750fd5f16ce564d6938646c6e9a8aaeb2 | 1,736,455,684,103 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.635508 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.4, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.4995902645439276, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.4995902645439276, 'score_ci_low': 0.31, 'num_of_instances': 100} | 1 | a100_80gb |
05a16fefbed1567e4908b5cfb7d9a9e8e76e093a1ce84eb320fe53b0e18227d0 | 1,736,455,688,530 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.376575 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
432ca0477e3687bd9d38497526902454cbf932ad779d6f410b8c5a7464e64ec7 | 1,736,455,598,609 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.461815 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
836c14c95800385da4bfededd1474b0318200f49d7b8717993bbd173b596fb7f | 1,736,455,602,926 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.20835 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.8181097801361409, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.8181097801361409, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
2ee692972fc2852bdb706298a1f02752f03321c0c4021d395471efc0e0a03278 | 1,736,455,608,392 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.691727 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
46053142a96c58c5668300597606bb2f91e0d1ce3bdb4213a34f693ce592a241 | 1,736,455,637,900 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.919364 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.8, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
1baf22781ac505a447f3f1a24f82455e34253332e9023d54eb667aa6918afe2b | 1,736,455,648,420 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.463915 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.5155190296772321, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.5155190296772321, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
4f9be5739864197b2c3a8227a45e9cdd2d6f9c428d8ff26e375d3c5be8d231b2 | 1,736,455,669,601 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.548184 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.57, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.57, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
8b6bd2fe278cb1466dce7860048f553d06ea3f6e50f95787136ca6cc41f1bb8d | 1,736,455,657,814 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.690324 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
db02fdce2a8ba7518e42fc90bc2dde0035ae466f77ba186f0d4255583db3b72e | 1,736,455,662,473 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.867475 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
2f5c7597f5081ca816fbdbcb718866219270bbc6429fb83ac295a196a04a9c80 | 1,736,455,674,768 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.ai2_arc.arc_easy | card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.404537 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.9, 'accuracy_ci_low': 0.82, 'accuracy_ci_high': 0.95, 'score_name': 'accuracy', 'score': 0.9, 'score_ci_high': 0.95, 'score_ci_low': 0.82, 'num_of_instances': 100} | 1 | a100_80gb |
4aac43684ad7389ed4fab7d501b25290c47ae0f97ba05d63fb6bb4df20c9172c | 1,736,455,681,579 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.257076 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.78, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.78, 'score_ci_high': 0.85, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
f13e05bd7d75990698945a11f3cdb78371ea821e6d9990d8ae7ed83e2d6d2a68 | 1,736,455,597,293 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.383487 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.61, 'score_ci_low': 0.42, 'num_of_instances': 100} | 1 | a100_80gb |
538ee6c81fd5c6380b9afb588b3732d4ed462b5ac4dc0827f1d22087f46645d8 | 1,736,455,604,776 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.ai2_arc.arc_easy | card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.380233 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.87, 'accuracy_ci_low': 0.79, 'accuracy_ci_high': 0.93, 'score_name': 'accuracy', 'score': 0.87, 'score_ci_high': 0.93, 'score_ci_low': 0.79, 'num_of_instances': 100} | 1 | a100_80gb |
82df2b51d134889049550e6a7a89f0834ec0acc16f8093f664be73b87bdb6d81 | 1,736,455,616,288 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.ai2_arc.arc_easy | card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.867629 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
389712292b6025dcea126d74a933e0ef17572b68dfa009facd6b3914493c8600 | 1,736,455,611,673 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.philosophy | card=cards.mmlu.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.160611 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
be293433fee9dca1d59775080051e4df32ed817756628c50aef1d02ee2fbade5 | 1,736,455,622,054 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.social_iqa | card=cards.social_iqa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.Social_IQa.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.220541 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
b380298a634c8eeb64b574019e4ac78c4d7b766b65c4c113e254f3510e99a87c | 1,736,455,630,309 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.63515 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.5728261893162818, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.5728261893162818, 'num_of_instances': 100} | 1 | a100_80gb |
b048e4c36c0f2e72d952d3a9f5966c03e11f837d9cab33476844f41f15bdda9f | 1,736,455,634,940 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.724516 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.35, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.47, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
62d193cabfcd4a333b92c7e3665ead84c84e5d8d77ef455d241b71eeda3d7d9f | 1,736,455,639,074 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.56988 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.45, 'score_ci_low': 0.25, 'num_of_instances': 100} | 1 | a100_80gb |
92d91b6ea740d5329137649267af03ecebd953f96f7f10a93efc78365b3f7cad | 1,736,455,644,500 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.871542 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.81, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.88, 'score_ci_low': 0.72, 'num_of_instances': 100} | 1 | a100_80gb |
83137679e897181ffb74172a42cb17d1bb54400695d5a913ddf5ceffb6d17075 | 1,736,455,658,245 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.103039 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.35, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
efdee1a28ed6416d126de68fb9e1aac2ecf3d996a4ccc79bd8b37353071bb12a | 1,736,455,558,284 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.470785 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
23413bbbc692a7d06ac7f8b75005ed2b614b129ad831251e2317110e66386c53 | 1,736,455,567,785 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.611932 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.77, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
151fa208cec3ae7db61581a1706ae3fbaf2f8107af16cec81ae72266442351da | 1,736,455,576,642 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_government_and_politics | card=cards.mmlu.high_school_government_and_politics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.911205 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.82, 'accuracy_ci_low': 0.73, 'accuracy_ci_high': 0.89, 'score_name': 'accuracy', 'score': 0.82, 'score_ci_high': 0.89, 'score_ci_low': 0.73, 'num_of_instances': 100} | 1 | a100_80gb |
266cc4d9d384b6792510f94dcb62e0379ff7ce8976ee05218b894b80a0b44e71 | 1,736,455,585,222 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.689993 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
b84b72605e7dc0b0c18b38770739f4435ba65263ff8fb39c554b0cc3df5fac69 | 1,736,455,593,632 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.597322 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.91, 'accuracy_ci_low': 0.84, 'accuracy_ci_high': 0.95, 'score_name': 'accuracy', 'score': 0.91, 'score_ci_high': 0.95, 'score_ci_low': 0.84, 'num_of_instances': 100} | 1 | a100_80gb |
e624f70584b3d7d0303ea6a4fd1fd8ab95fcdc347b6bc9ec66eb7004229592e4 | 1,736,455,604,251 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.788133 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
ca609aef2495dd73b8ded3392f304a8a5b6b85ac9a8fce5d68205136a57ce9ea | 1,736,455,609,623 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.421466 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
97254f1f1ea29aa1fc162bafd1012eed7beb2b1cc31d01d6ecf0c26e4fdc1727 | 1,736,455,615,148 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.888046 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
083003d3cb137075c36376dfe0b14f541f450684c154d17e9d70bce890be5d7b | 1,736,455,638,870 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.835451 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.41, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.41, 'score_ci_high': 0.51, 'score_ci_low': 0.31, 'num_of_instances': 100} | 1 | a100_80gb |
fe438bf4ad7dca357a15fda760a732fbe1eb4435d420c9489c26c9755cd01fc4 | 1,736,455,644,618 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.760218 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
f336d8f1b0e5c3b2af5112eff85b5ca2fb857b46bf38b58dc6bc2d9ec8da40f2 | 1,736,455,544,359 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.ai2_arc.arc_challenge | card=cards.ai2_arc.arc_challenge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.249706 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.6794706162255193, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.6794706162255193, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
91af4252c65641450c7e97c0ab49c518bb30123b1b9c9444093bbd32ce5b68b7 | 1,736,455,548,217 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.290034 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
79896a24b36f14f4526084772b359c2a9ca047a4d65f262cc3158c0f4a72ac4b | 1,736,455,617,632 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.013869 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.44, 'score_ci_low': 0.25, 'num_of_instances': 100} | 1 | a100_80gb |
ae1a9ed1e4734722fca139e6eec9030046dd9e826d1dd02b0cdd7e83a1d9ed60 | 1,736,455,551,911 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.167927 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.59, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
e5c7fc07e2fc3cbe03ce19f24d053e32e3a8c0e9273f572c6886c30ce4346e34 | 1,736,455,560,920 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.440272 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.5184151990454869, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.71, 'score_ci_low': 0.5184151990454869, 'num_of_instances': 100} | 1 | a100_80gb |
ac4923d7e3a8d29221d8512a58d91efe3409e224c79e715ed904fcd132294eca | 1,736,455,568,814 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.anatomy | card=cards.mmlu.anatomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.935233 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.61, 'score_ci_low': 0.4, 'num_of_instances': 100} | 1 | a100_80gb |
b982ec442c21c642595ff90898459b3f974cc75f96a77e42d9a1b6ebdcf89faf | 1,736,455,580,901 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.computer_science | card=cards.mmlu_pro.computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.264175 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
8e256ee380b6345ae89cce7085e3f3070eed6c65263a592de099c4fd19d4d95a | 1,736,455,594,473 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.375914 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
71386e68ed6aba7c16ff58cc7c2b3f60f37c96120f983de9f2e8ee22c7acb4f4 | 1,736,455,605,379 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.503964 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
1b397c05143d0cad9ef5b20357a7c2993ca78d0c0dddc47724cd0fdecc3215ce | 1,736,455,613,751 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_psychology | card=cards.mmlu.high_school_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.313923 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.85, 'accuracy_ci_low': 0.77, 'accuracy_ci_high': 0.91, 'score_name': 'accuracy', 'score': 0.85, 'score_ci_high': 0.91, 'score_ci_low': 0.77, 'num_of_instances': 100} | 1 | a100_80gb |
739f53f9c58be9d0220edc69e66f10f2ab22a611af2cc55c6175e516ae8db0db | 1,736,455,548,405 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.ai2_arc.arc_easy | card=cards.ai2_arc.arc_easy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.17238 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.5826805167881407, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.76, 'score_ci_low': 0.5826805167881407, 'num_of_instances': 100} | 1 | a100_80gb |
fe5a2ee07ac1e90015ef0bd1965881f182f17f770e1229fa254437348aaaa2f2 | 1,736,455,557,224 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.274144 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
6a94cc1aa947977d5f02a2adba010b7feba0aea863751dea187b793e43211b43 | 1,736,455,561,358 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.153647 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.56, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
af9e45b2882a1e0250b00feab6b109aa411f0e2c32b4edfbfe6a450a3e177dfc | 1,736,455,565,288 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.397553 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
01971f30fdb0ae65fae6dbd4a3a892d44447af1ba5f168818cebe7b5c0cae368 | 1,736,455,573,676 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.851489 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
23af7fd1d3de1dc3b74a3e006ba279a38d3d8c756c489918470ab93800fbc52d | 1,736,455,586,096 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.511097 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.6, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.6, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
fca0ec2bad498e2526bec2ba4508a14adb83799e1898ac40bc51cc720d4f811b | 1,736,455,593,748 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_microeconomics | card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.420928 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
9f5f4a298c79ed575a094205ddabe78b8322546a13bc20d85ec012d77eba5fb5 | 1,736,455,602,142 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.22403 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
c52b3ba95417eab29fa56647a60e8107e28ac7ca4b389435bd462f00ca150c8c | 1,736,455,605,747 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.095572 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.51, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
ea7f265e38cfa24de45a7c56adcfa49045bb7b0d1c3de676aadc7e70f920a2db | 1,736,455,609,930 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.642173 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
49051e8eaa6337ca57cd4f64aaeae2b54d7c4e430b2383524bea7b8eeea1823a | 1,736,455,518,537 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.030797 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.8164359907838434, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.8164359907838434, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
779cd83e8376165317af1a36957496cc41adc2eb96afe6c092742923546d49df | 1,736,455,522,667 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_microeconomics | card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.43073 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.66, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
3cbb2ee7244d76aa3cac6280ef5bc3c6df7d8725ae0f4698d8966dcffd277f39 | 1,736,455,528,354 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.093358 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.59, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
7f2acf4bba85b4897458492dfb66d2370bdffa1ffc1d54174cbae2e176b9e4c1 | 1,736,455,533,819 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.628607 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
a6fc727c81704c208ddee2ec3444f8e6fe5d4ff9f1c775981630c1e29e321c5c | 1,736,455,537,882 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsState2.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.53305 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.47, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
6dfe596e7f9274d6d3b8b9e5077907301503063a03c4e659d19a0ef6f3d9b4ca | 1,736,455,551,441 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.engineering | card=cards.mmlu_pro.engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.918607 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.17, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.25, 'score_name': 'accuracy', 'score': 0.17, 'score_ci_high': 0.25, 'score_ci_low': 0.11, 'num_of_instances': 100} | 1 | a100_80gb |
62a0b4d504a8bee31fcbde86a808da1d082b0c5f257ff02633e28259f0b1933e | 1,736,455,562,202 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.492502 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
ae34c9a94ee49e9a3d0d3f8a7b829d2cc433ae8142ea374c8d80fddf3653349e | 1,736,455,568,096 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.physics | card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.993202 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.14, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.22, 'score_name': 'accuracy', 'score': 0.14, 'score_ci_high': 0.22, 'score_ci_low': 0.08, 'num_of_instances': 100} | 1 | a100_80gb |
866a0e880249ef11fd2b97d810c04810c1a390c12835826389b194adf10c612a | 1,736,455,583,979 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 15.21263 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.79, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
315881bd0cd1da919fc74576cc4aee8c79a9a24deac9bb572f1acbd724f8a3ab | 1,736,455,598,192 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.600234 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
d96a71413b768f584d18422ab8e7a9d7e1114ea5c0d77a0b5494db249fa0647a | 1,736,455,499,715 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.psychology | card=cards.mmlu_pro.psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.215488 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.48, 'score_ci_low': 0.28, 'num_of_instances': 100} | 1 | a100_80gb |
eb7d6a61d497894099742395d38d67f1fb9cc2a3f2d41f13dd02aeac8c43c987 | 1,736,455,547,070 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.psychology | card=cards.mmlu_pro.psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.251428 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
77e7a43d180671b1ebf5bdeed920be87052c3135e31a598dc97cd4e8a0855b2c | 1,736,455,504,651 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.119683 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.64, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.64, 'score_ci_high': 0.73, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
83bc859a760aed4a72b7a595d3d2cc25dc323f5b0c968ffad1cfefc3792429d8 | 1,736,455,511,568 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.computer_science | card=cards.mmlu_pro.computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.335037 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.17, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.25, 'score_name': 'accuracy', 'score': 0.17, 'score_ci_high': 0.25, 'score_ci_low': 0.11, 'num_of_instances': 100} | 1 | a100_80gb |
d529a9281586d084ce0a12dfa019f54527fc0b435a2f5365cb714abd1be88e69 | 1,736,455,586,437 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.computer_science | card=cards.mmlu_pro.computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.24085 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.14, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.22, 'score_name': 'accuracy', 'score': 0.14, 'score_ci_high': 0.22, 'score_ci_low': 0.08, 'num_of_instances': 100} | 1 | a100_80gb |
b57ca2739a59227bea367b6b5b425fd594fcb844dd051385315de2cb9c581760 | 1,736,455,528,582 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 16.305811 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
f699846c157d020c0f977a7d206e9adb01fd757454e4de3d18a871a04635eeb3 | 1,736,455,536,151 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.physics | card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.067104 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.17, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.25, 'score_name': 'accuracy', 'score': 0.17, 'score_ci_high': 0.25, 'score_ci_low': 0.11, 'num_of_instances': 100} | 1 | a100_80gb |
39d1006d1ff8878467e30901fcca6b1f03d47f8da707f01da75a766bad9fc900 | 1,736,455,557,469 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.239236 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.8, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.72, 'num_of_instances': 100} | 1 | a100_80gb |
2f7f1e26ad846fd62f4572ea1c09ea569bce60b4204b53080ee78e58f089c531 | 1,736,455,570,409 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.000664 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.94, 'accuracy_ci_low': 0.88, 'accuracy_ci_high': 0.98, 'score_name': 'accuracy', 'score': 0.94, 'score_ci_high': 0.98, 'score_ci_low': 0.88, 'num_of_instances': 100} | 1 | a100_80gb |
6538ad51e46ef0e060e52f9be7167fef57ead771a070439f8dca2adba88bd448 | 1,736,455,579,242 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.69617 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
7a561bfd7477b9acb9ecaba63eb9be3f061320b8f879e02a757872cddfcd2ef5 | 1,736,455,515,382 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.836857 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.7, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
31eeaf66965769e6629dc802b562d428fe619ec60408b5b690da497d39d3278b | 1,736,455,519,344 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.social_iqa | card=cards.social_iqa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.Social_IQa.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.033226 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
789e23cce88c520679695a3f2a0e67fc7b0c8004bc1748dc24ef30cd88958fa6 | 1,736,455,523,110 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.269889 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
d492b24d3ca45b72f63d156f4f5ab08bd8bb542dc7658099d2d9f501037241f9 | 1,736,455,535,441 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.math | card=cards.mmlu_pro.math,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.774674 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.2, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.29, 'score_ci_low': 0.13, 'num_of_instances': 100} | 1 | a100_80gb |
326ff005b8457725bb1f9a8457054402f892bed82233175ca2748085f3f73a4a | 1,736,455,547,263 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.631547 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
f4fdce80316c6a7268457a1ec2faee50a893986a46828a1d6b7f4b54a17b0b19 | 1,736,455,553,792 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.357965 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.14, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.21, 'score_name': 'accuracy', 'score': 0.14, 'score_ci_high': 0.21, 'score_ci_low': 0.08, 'num_of_instances': 100} | 1 | a100_80gb |
ba354a296a9d09ef7b3b6726d359ea41378961da6f742b94813829677f0e4737 | 1,736,455,567,787 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.29355 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.16, 'accuracy_ci_low': 0.09, 'accuracy_ci_high': 0.24, 'score_name': 'accuracy', 'score': 0.16, 'score_ci_high': 0.24, 'score_ci_low': 0.09, 'num_of_instances': 100} | 1 | a100_80gb |
f3b813da98792ddcdd85fb53faf4e9e466278b72e2980eb99449e9e561a60d1f | 1,736,455,574,167 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.philosophy | card=cards.mmlu.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.498306 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.83, 'accuracy_ci_low': 0.7498953730116564, 'accuracy_ci_high': 0.89, 'score_name': 'accuracy', 'score': 0.83, 'score_ci_high': 0.89, 'score_ci_low': 0.7498953730116564, 'num_of_instances': 100} | 1 | a100_80gb |
e3a40c022304416f6c67171380f29b939654b1868557404f2cb1c1cfa3ad8ba1 | 1,736,455,578,445 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.560262 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.39, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.49, 'score_name': 'accuracy', 'score': 0.39, 'score_ci_high': 0.49, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
e5f47d6dc9b70e91bf317546332fb6df9e5134559a09771f1085b6e18b5d6445 | 1,736,455,586,215 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.191748 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
13eb4b9972ae6dda9b6c34a2939020783f7a483901b3886f8b52b16b410f9779 | 1,736,455,483,844 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.461906 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
5b21ac4f5129cada7448321a0181cc5610da4128644917d1c1f24bbc5d3f61c1 | 1,736,455,493,206 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.703835 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.69, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
3936d54df06bb0d64905cda867e7dc79b0e34ee8a6a646f586a7235145c2c027 | 1,736,455,525,411 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.187927 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.47, 'num_of_instances': 100} | 1 | a100_80gb |
bac4cd1c29fb2daf483cb24e5b0fce6725c424f7e99afce85d4f3a9f9c87837c | 1,736,455,502,662 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.127972 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.42, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
853c8ab8014ac4678258e4c681be9324765b35646100a883316810895de49400 | 1,736,455,507,707 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.176781 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
4cb016d767f869f84bb8b74f4d9d7dd87c2ffb11b81ea7c86560edeb227f02ab | 1,736,455,519,618 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.292687 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.37, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
b8c6fd58c7977fa421b99246332e14ffb8c5341e61d198e393eee4e2290fe9ea | 1,736,455,529,784 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.715492 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
989ed0194346ef8c0fdc8890747e4a2e5b11c1673d8361091c391fba645b6bea | 1,736,455,534,458 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.077402 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
ece06753881c38045adbdb3b694af1da5fdaabdf54555d67ebeb2640171683b7 | 1,736,455,545,232 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.machine_learning | card=cards.mmlu.machine_learning,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.17161 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.64, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
fae947616c36d6e13ea776021c47fc9628bae65c07c036a4a887122a06fbc57c | 1,736,455,553,051 | 1,736,452,800,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.212883 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
40ea3f80cce6f91fb491b986c34f557ffbc557413451f2a8a2b75e66040e95bd | 1,736,455,464,770 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.social_iqa | card=cards.social_iqa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.Social_IQa.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.805375 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
b4ed5e4ab04db65c877986ee72eea009b3b4e01893679be6105cdaf436802cd0 | 1,736,455,476,425 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.9967 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.4665194662143706, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.4665194662143706, 'num_of_instances': 100} | 1 | a100_80gb |
08e0825cbe82d967eec23fd46f2f741c9b9d122c6a0b0ca541b38c549b439b81 | 1,736,455,480,628 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.30342 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.59, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
87c3023a0723a8dff0743759f075f1a472ff61a378b31fc292e0d9728e13a06e | 1,736,455,486,965 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.conceptual_physics | card=cards.mmlu.conceptual_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.804695 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.51, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.51, 'score_ci_high': 0.61, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
69bac4915a863d4f7259677bb55e5ca8e871ac977841a3f8db99b23b511a6fe7 | 1,736,455,500,938 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.270504 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
0ebfeeebb3c27e7a45e32a95a05e98f5596b264922ba49b73ba25c4de868233c | 1,736,455,508,210 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.526475 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.44010712284621717, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.64, 'score_ci_low': 0.44010712284621717, 'num_of_instances': 100} | 1 | a100_80gb |
de1e6201fc21328c7d8ef54ee677103babb751a95cea2024151ed954cc795ada | 1,736,455,515,733 | 1,736,452,800,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_government_and_politics | card=cards.mmlu.high_school_government_and_politics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.936486 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.84, 'accuracy_ci_low': 0.75, 'accuracy_ci_high': 0.9, 'score_name': 'accuracy', 'score': 0.84, 'score_ci_high': 0.9, 'score_ci_low': 0.75, 'num_of_instances': 100} | 1 | a100_80gb |