Muennighoff
commited on
Scheduled Commit
Browse files
data/clustering_individual-8d38bb89-1e1a-471a-8b9e-35c1f784690e.jsonl
CHANGED
@@ -4,3 +4,9 @@
|
|
4 |
{"tstamp": 1722361050.8948, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722361050.6801, "finish": 1722361050.8948, "ip": "", "conv_id": "ae38a38353664ad292bdd931655fee3c", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["historical fiction", "fantasy", "science fiction", "horror", "thriller", "romance", "mystery", "linen", "silk", "wool", "leather", "denim", "incisor", "premolar", "molar", "canine", "wisdom tooth", "tornado", "hailstorm", "blizzard", "fog", "drought"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
{"tstamp": 1722361056.8438, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722361056.7634, "finish": 1722361056.8438, "ip": "", "conv_id": "2353080a415d4d11aab302c53f701ff1", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["penguin", "tiger", "dolphin", "giraffe", "lion", "koala", "airplane", "boat", "bicycle", "car", "motorcycle", "train", "Cancer", "Leo", "concave", "convex", "parabolic", "mackerel", "halibut", "cod", "salmon", "bass", "trout", "tuna"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
{"tstamp": 1722361056.8438, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722361056.7634, "finish": 1722361056.8438, "ip": "", "conv_id": "d8ca698edd9246e4bd9219064067b85c", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["penguin", "tiger", "dolphin", "giraffe", "lion", "koala", "airplane", "boat", "bicycle", "car", "motorcycle", "train", "Cancer", "Leo", "concave", "convex", "parabolic", "mackerel", "halibut", "cod", "salmon", "bass", "trout", "tuna"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
{"tstamp": 1722361050.8948, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722361050.6801, "finish": 1722361050.8948, "ip": "", "conv_id": "ae38a38353664ad292bdd931655fee3c", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["historical fiction", "fantasy", "science fiction", "horror", "thriller", "romance", "mystery", "linen", "silk", "wool", "leather", "denim", "incisor", "premolar", "molar", "canine", "wisdom tooth", "tornado", "hailstorm", "blizzard", "fog", "drought"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
{"tstamp": 1722361056.8438, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722361056.7634, "finish": 1722361056.8438, "ip": "", "conv_id": "2353080a415d4d11aab302c53f701ff1", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["penguin", "tiger", "dolphin", "giraffe", "lion", "koala", "airplane", "boat", "bicycle", "car", "motorcycle", "train", "Cancer", "Leo", "concave", "convex", "parabolic", "mackerel", "halibut", "cod", "salmon", "bass", "trout", "tuna"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
{"tstamp": 1722361056.8438, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722361056.7634, "finish": 1722361056.8438, "ip": "", "conv_id": "d8ca698edd9246e4bd9219064067b85c", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["penguin", "tiger", "dolphin", "giraffe", "lion", "koala", "airplane", "boat", "bicycle", "car", "motorcycle", "train", "Cancer", "Leo", "concave", "convex", "parabolic", "mackerel", "halibut", "cod", "salmon", "bass", "trout", "tuna"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
7 |
+
{"tstamp": 1722363261.7965, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722363261.7141, "finish": 1722363261.7965, "ip": "", "conv_id": "b76eafd60a354f2f9f8b9bfe3fa6de9e", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
8 |
+
{"tstamp": 1722363261.7965, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363261.7141, "finish": 1722363261.7965, "ip": "", "conv_id": "fb2cf7f9ac6f41eba82993886cfe0176", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
9 |
+
{"tstamp": 1722363294.4266, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722363294.3361, "finish": 1722363294.4266, "ip": "", "conv_id": "b76eafd60a354f2f9f8b9bfe3fa6de9e", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?", "octagon", "rectangle", "Temple of Artemis", "Colossus of Rhodes", "Statue of Zeus", "Lighthouse of Alexandria", "Hanging Gardens of Babylon", "Pyramids of Giza", "brunette", "black", "blonde", "redhead", "gray", "auburn", "white", "soccer", "basketball", "tennis", "baseball", "cricket", "ruby", "topaz", "diamond"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
10 |
+
{"tstamp": 1722363294.4266, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363294.3361, "finish": 1722363294.4266, "ip": "", "conv_id": "fb2cf7f9ac6f41eba82993886cfe0176", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?", "octagon", "rectangle", "Temple of Artemis", "Colossus of Rhodes", "Statue of Zeus", "Lighthouse of Alexandria", "Hanging Gardens of Babylon", "Pyramids of Giza", "brunette", "black", "blonde", "redhead", "gray", "auburn", "white", "soccer", "basketball", "tennis", "baseball", "cricket", "ruby", "topaz", "diamond"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
11 |
+
{"tstamp": 1722363303.4905, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722363303.3937, "finish": 1722363303.4905, "ip": "", "conv_id": "b76eafd60a354f2f9f8b9bfe3fa6de9e", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?", "octagon", "rectangle", "Temple of Artemis", "Colossus of Rhodes", "Statue of Zeus", "Lighthouse of Alexandria", "Hanging Gardens of Babylon", "Pyramids of Giza", "brunette", "black", "blonde", "redhead", "gray", "auburn", "white", "soccer", "basketball", "tennis", "baseball", "cricket", "ruby", "topaz", "diamond"], "ncluster": 5, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
12 |
+
{"tstamp": 1722363303.4905, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363303.3937, "finish": 1722363303.4905, "ip": "", "conv_id": "fb2cf7f9ac6f41eba82993886cfe0176", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["If someone online buys something off of my Amazon wish list, do they get my full name and address?", "Package \"In Transit\" over a week. No scheduled delivery date, no locations. What's up?", "Can Amazon gift cards replace a debit card?", "Homesick GWS star Cameron McCarthy on road to recovery", "Accidently ordered 2 of an item, how do I only return 1? For free?", "Need help ASAP, someone ordering in my account", "So who's everyone tipping for Round 1?", "octagon", "rectangle", "Temple of Artemis", "Colossus of Rhodes", "Statue of Zeus", "Lighthouse of Alexandria", "Hanging Gardens of Babylon", "Pyramids of Giza", "brunette", "black", "blonde", "redhead", "gray", "auburn", "white", "soccer", "basketball", "tennis", "baseball", "cricket", "ruby", "topaz", "diamond"], "ncluster": 5, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-8d38bb89-1e1a-471a-8b9e-35c1f784690e.jsonl
CHANGED
@@ -1,3 +1,7 @@
|
|
1 |
{"tstamp": 1722362021.8407, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "6c79be6c3a75449590dbdf0de2712f54", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "are fresh or frozen vegetables healthier", "0_output": [["are fresh or frozen vegetables healthier", "Frozen meal\n\nFollowing this, the dinners are either covered with aluminum foil or paper, and the product is tightly packed with a partial vacuum created to ensure no evaporation takes place that can cause the food to dry out. Then the packaged dinners are placed in a refrigerated storage facility, transported by refrigerated truck, and stored in the grocer's freezer. TV dinners prepared with the aforementioned steps—that is, frozen and packaged properly—can remain in near-perfect condition for a long time, so long as they are stored at during shipping and storage.\nHealth concerns\nFrozen meals are often heavily processed with extra salt and fat to make foods last longer. In addition, stabilizing the product for a long period typically means that companies will use partially hydrogenated vegetable oils for some items (typically dessert). Partially hydrogenated vegetable oils are high in trans fats and are shown to adversely affect cardiovascular health. The dinners are almost always significantly less nutritious than fresh food and are formulated to remain edible after long periods of storage, thus often requiring preservatives such as butylated hydroxytoluene. There is, however, some variability between brands.\nIn recent years there has been a push by a number of independent manufacturers and retailers to make meals that are low in salt and fat and free of artificial additives. In the UK, most British supermarkets also produce their own \"healthy eating\" brands. Nearly all chilled or frozen ready meals sold in the UK are now clearly labeled with the salt, sugar and fat content and the recommended daily intake. Concern about obesity and government publicity initiatives such as those by the Food Standards Agency and the National Health Service have encouraged manufacturers to reduce the levels of salt and fat in ready prepared food."]], "0_corpus": "wikipedia", "1_conv_id": "7b8aff77791243c8ade0e1a6091a52d9", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "are fresh or frozen vegetables healthier", "1_output": [["are fresh or frozen vegetables healthier", "Fresh Fruit for Rotting Vegetables\n\nFresh Fruit for Rotting Vegetables is the debut studio album by the American punk rock band Dead Kennedys. It was first released on September 2, 1980, through Cherry Red Records in the United Kingdom, and I.R.S. Records in the United States. It was later issued by Jello Biafra's own Alternative Tentacles label in the United States. It is the only Dead Kennedys studio album to feature drummer Bruce Slesinger and (on one track) guitarist Carlo Cadona.\nRecording and release\nThe photo on the front cover, showing several police cars on fire, was taken during the White Night riots of May 21, 1979, that resulted from the light sentence given to former San Francisco City Supervisor Dan White for the murder of Mayor George Moscone and Supervisor Harvey Milk. When Biafra ran for mayor, one of his policies had been for a statue to be erected to Dan White, and for eggs, tomatoes and stones to be available nearby for pelting it. In addition, the band's version of \"I Fought the Law\" has rewritten lyrics making reference to the incident."]], "1_corpus": "wikipedia"}
|
2 |
{"tstamp": 1722362060.3289, "task_type": "retrieval", "type": "bothbadvote", "models": ["", ""], "ip": "", "0_conv_id": "4e0b17f4fecb4b1685dcac82f33b7b27", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "\thow does qualtrics fit in with the big data trend?", "0_output": [["\thow does qualtrics fit in with the big data trend?", "Qualtrics\n\nQualtrics is an American experience management company, with co-headquarters in Seattle, Washington, and Provo, Utah, in the United States. The company was founded in 2002 by Scott M. Smith, Ryan Smith, Jared Smith, and Stuart Orgill.\nQualtrics offers a cloud-based subscription software platform for experience management, which it launched in March 2017."]], "0_corpus": "wikipedia", "1_conv_id": "3a42eda27b5d4c9fb598e714d2144882", "1_model_name": "text-embedding-3-large", "1_prompt": "\thow does qualtrics fit in with the big data trend?", "1_output": [["\thow does qualtrics fit in with the big data trend?", "Qualtrics\n\nAwards and ranking In 2020, Qualtrics earned a ‘Leader’ designation in Gartner's Magic Quadrant for Voice of Customer, a ‘leader’ designation in Forrester's Employee Experience for Enterprise wave, and the top ranking in G2's Experience Management category. In 2016, Qualtrics was ranked #12 on the Forbes Cloud 100 list, moving to #6 in 2017. In March 2020, Qualtrics's CoreXM platform was named a 2020 gold winner by the Edison Awards in the Applied Technology category.\nAcquisitions\nIn May 2016, Qualtrics acquired statistical analysis startup Statwing for an undisclosed sum. Statwing was a San Francisco-based company that created point-and-click software for advanced statistical analysis.\nIn April 2018 the firm acquired Delighted for an undisclosed sum. Delighted had more than 1,500 customers at the time of acquisition.\nIn October 2021 the firm acquired Clarabridge in an all-stock deal for $1.125 billion. Clarabridge was a Virginia-based company that created software for omnichannel conversational analytics.\nAcquisition by SAP SE\nIn November 2018, SAP announced its intent to acquire Qualtrics. SAP acquired all outstanding shares of Qualtrics for US$8 billion in an all cash deal. SAP secured €7 billion in financing. At the time it was announced, the Qualtrics acquisition was SAP's second-biggest purchase ever, behind the $8.3 billion acquisition of travel and expense management firm Concur in 2014. The acquisition was formally closed January 23, 2019."]], "1_corpus": "wikipedia"}
|
3 |
{"tstamp": 1722362558.6289, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "e8c6f75538814d58ac1610d735ef218d", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": "how many numbers can computer count?", "0_output": [["how many numbers can computer count?", "Orders of magnitude (numbers)\n\nComputing – UTF-16/Unicode: There are 1,114,112 (220 + 216) distinct values encodable in UTF-16, and, thus (as Unicode is currently limited to the UTF-16 code space), 1,114,112 valid code points in Unicode (1,112,064 scalar values and 2,048 surrogates).\nLudology – Number of games: Approximately 1,181,019 video games have been created as of 2019.\nBiology – Species: The World Resources Institute claims that approximately 1.4 million species have been named, out of an unknown number of total species (estimates range between 2 and 100 million species). Some scientists give 8.8 million species as an exact figure.\nGenocide: Approximately 800,000–1,500,000 (1.5 million) Armenians were killed in the Armenian genocide.\nLinguistics: The number of possible conjugations for each verb in the Archi language is 1,502,839.\nInfo: The freedb database of CD track listings has around 1,750,000 entries .\nComputing – UTF-8: 2,164,864 (221 + 216 + 211 + 27) possible one- to four-byte UTF-8 sequences, if the restrictions on overlong sequences, surrogate code points, and code points beyond U+10FFFF are not adhered to. (Note that not all of these correspond to unique code points.)\nMathematics – Playing cards: There are 2,598,960 different 5-card poker hands that can be dealt from a standard 52-card deck.\nMathematics: There are 3,149,280 possible positions for the Skewb."]], "0_corpus": "wikipedia", "1_conv_id": "46323c41f9be4245865047b7788ba305", "1_model_name": "intfloat/e5-mistral-7b-instruct", "1_prompt": "how many numbers can computer count?", "1_output": [["how many numbers can computer count?", "Computer number format\n\nThe relation between numbers and bit patterns is chosen for convenience in computer manipulation; eight bytes stored in computer memory may represent a 64-bit real, two 32-bit reals, or four signed or unsigned integers, or some other kind of data that fits into eight bytes. The only difference is how the computer interprets them. If the computer stored four unsigned integers and then read them back from memory as a 64-bit real, it almost always would be a perfectly valid real number, though it would be junk data.\nOnly a finite range of real numbers can be represented with a given number of bits. Arithmetic operations can overflow or underflow, producing a value too large or too small to be represented.\nThe representation has a limited precision. For example, only 15 decimal digits can be represented with a 64-bit real. If a very small floating-point number is added to a large one, the result is just the large one. The small number was too small to even show up in 15 or 16 digits of resolution, and the computer effectively discards it. Analyzing the effect of limited precision is a well-studied problem. Estimates of the magnitude of round-off errors and methods to limit their effect on large calculations are part of any large computation project. The precision limit is different from the range limit, as it affects the significand, not the exponent."]], "1_corpus": "wikipedia"}
|
|
|
|
|
|
|
|
|
|
1 |
{"tstamp": 1722362021.8407, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "6c79be6c3a75449590dbdf0de2712f54", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "are fresh or frozen vegetables healthier", "0_output": [["are fresh or frozen vegetables healthier", "Frozen meal\n\nFollowing this, the dinners are either covered with aluminum foil or paper, and the product is tightly packed with a partial vacuum created to ensure no evaporation takes place that can cause the food to dry out. Then the packaged dinners are placed in a refrigerated storage facility, transported by refrigerated truck, and stored in the grocer's freezer. TV dinners prepared with the aforementioned steps—that is, frozen and packaged properly—can remain in near-perfect condition for a long time, so long as they are stored at during shipping and storage.\nHealth concerns\nFrozen meals are often heavily processed with extra salt and fat to make foods last longer. In addition, stabilizing the product for a long period typically means that companies will use partially hydrogenated vegetable oils for some items (typically dessert). Partially hydrogenated vegetable oils are high in trans fats and are shown to adversely affect cardiovascular health. The dinners are almost always significantly less nutritious than fresh food and are formulated to remain edible after long periods of storage, thus often requiring preservatives such as butylated hydroxytoluene. There is, however, some variability between brands.\nIn recent years there has been a push by a number of independent manufacturers and retailers to make meals that are low in salt and fat and free of artificial additives. In the UK, most British supermarkets also produce their own \"healthy eating\" brands. Nearly all chilled or frozen ready meals sold in the UK are now clearly labeled with the salt, sugar and fat content and the recommended daily intake. Concern about obesity and government publicity initiatives such as those by the Food Standards Agency and the National Health Service have encouraged manufacturers to reduce the levels of salt and fat in ready prepared food."]], "0_corpus": "wikipedia", "1_conv_id": "7b8aff77791243c8ade0e1a6091a52d9", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "are fresh or frozen vegetables healthier", "1_output": [["are fresh or frozen vegetables healthier", "Fresh Fruit for Rotting Vegetables\n\nFresh Fruit for Rotting Vegetables is the debut studio album by the American punk rock band Dead Kennedys. It was first released on September 2, 1980, through Cherry Red Records in the United Kingdom, and I.R.S. Records in the United States. It was later issued by Jello Biafra's own Alternative Tentacles label in the United States. It is the only Dead Kennedys studio album to feature drummer Bruce Slesinger and (on one track) guitarist Carlo Cadona.\nRecording and release\nThe photo on the front cover, showing several police cars on fire, was taken during the White Night riots of May 21, 1979, that resulted from the light sentence given to former San Francisco City Supervisor Dan White for the murder of Mayor George Moscone and Supervisor Harvey Milk. When Biafra ran for mayor, one of his policies had been for a statue to be erected to Dan White, and for eggs, tomatoes and stones to be available nearby for pelting it. In addition, the band's version of \"I Fought the Law\" has rewritten lyrics making reference to the incident."]], "1_corpus": "wikipedia"}
|
2 |
{"tstamp": 1722362060.3289, "task_type": "retrieval", "type": "bothbadvote", "models": ["", ""], "ip": "", "0_conv_id": "4e0b17f4fecb4b1685dcac82f33b7b27", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "\thow does qualtrics fit in with the big data trend?", "0_output": [["\thow does qualtrics fit in with the big data trend?", "Qualtrics\n\nQualtrics is an American experience management company, with co-headquarters in Seattle, Washington, and Provo, Utah, in the United States. The company was founded in 2002 by Scott M. Smith, Ryan Smith, Jared Smith, and Stuart Orgill.\nQualtrics offers a cloud-based subscription software platform for experience management, which it launched in March 2017."]], "0_corpus": "wikipedia", "1_conv_id": "3a42eda27b5d4c9fb598e714d2144882", "1_model_name": "text-embedding-3-large", "1_prompt": "\thow does qualtrics fit in with the big data trend?", "1_output": [["\thow does qualtrics fit in with the big data trend?", "Qualtrics\n\nAwards and ranking In 2020, Qualtrics earned a ‘Leader’ designation in Gartner's Magic Quadrant for Voice of Customer, a ‘leader’ designation in Forrester's Employee Experience for Enterprise wave, and the top ranking in G2's Experience Management category. In 2016, Qualtrics was ranked #12 on the Forbes Cloud 100 list, moving to #6 in 2017. In March 2020, Qualtrics's CoreXM platform was named a 2020 gold winner by the Edison Awards in the Applied Technology category.\nAcquisitions\nIn May 2016, Qualtrics acquired statistical analysis startup Statwing for an undisclosed sum. Statwing was a San Francisco-based company that created point-and-click software for advanced statistical analysis.\nIn April 2018 the firm acquired Delighted for an undisclosed sum. Delighted had more than 1,500 customers at the time of acquisition.\nIn October 2021 the firm acquired Clarabridge in an all-stock deal for $1.125 billion. Clarabridge was a Virginia-based company that created software for omnichannel conversational analytics.\nAcquisition by SAP SE\nIn November 2018, SAP announced its intent to acquire Qualtrics. SAP acquired all outstanding shares of Qualtrics for US$8 billion in an all cash deal. SAP secured €7 billion in financing. At the time it was announced, the Qualtrics acquisition was SAP's second-biggest purchase ever, behind the $8.3 billion acquisition of travel and expense management firm Concur in 2014. The acquisition was formally closed January 23, 2019."]], "1_corpus": "wikipedia"}
|
3 |
{"tstamp": 1722362558.6289, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "e8c6f75538814d58ac1610d735ef218d", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": "how many numbers can computer count?", "0_output": [["how many numbers can computer count?", "Orders of magnitude (numbers)\n\nComputing – UTF-16/Unicode: There are 1,114,112 (220 + 216) distinct values encodable in UTF-16, and, thus (as Unicode is currently limited to the UTF-16 code space), 1,114,112 valid code points in Unicode (1,112,064 scalar values and 2,048 surrogates).\nLudology – Number of games: Approximately 1,181,019 video games have been created as of 2019.\nBiology – Species: The World Resources Institute claims that approximately 1.4 million species have been named, out of an unknown number of total species (estimates range between 2 and 100 million species). Some scientists give 8.8 million species as an exact figure.\nGenocide: Approximately 800,000–1,500,000 (1.5 million) Armenians were killed in the Armenian genocide.\nLinguistics: The number of possible conjugations for each verb in the Archi language is 1,502,839.\nInfo: The freedb database of CD track listings has around 1,750,000 entries .\nComputing – UTF-8: 2,164,864 (221 + 216 + 211 + 27) possible one- to four-byte UTF-8 sequences, if the restrictions on overlong sequences, surrogate code points, and code points beyond U+10FFFF are not adhered to. (Note that not all of these correspond to unique code points.)\nMathematics – Playing cards: There are 2,598,960 different 5-card poker hands that can be dealt from a standard 52-card deck.\nMathematics: There are 3,149,280 possible positions for the Skewb."]], "0_corpus": "wikipedia", "1_conv_id": "46323c41f9be4245865047b7788ba305", "1_model_name": "intfloat/e5-mistral-7b-instruct", "1_prompt": "how many numbers can computer count?", "1_output": [["how many numbers can computer count?", "Computer number format\n\nThe relation between numbers and bit patterns is chosen for convenience in computer manipulation; eight bytes stored in computer memory may represent a 64-bit real, two 32-bit reals, or four signed or unsigned integers, or some other kind of data that fits into eight bytes. The only difference is how the computer interprets them. If the computer stored four unsigned integers and then read them back from memory as a 64-bit real, it almost always would be a perfectly valid real number, though it would be junk data.\nOnly a finite range of real numbers can be represented with a given number of bits. Arithmetic operations can overflow or underflow, producing a value too large or too small to be represented.\nThe representation has a limited precision. For example, only 15 decimal digits can be represented with a 64-bit real. If a very small floating-point number is added to a large one, the result is just the large one. The small number was too small to even show up in 15 or 16 digits of resolution, and the computer effectively discards it. Analyzing the effect of limited precision is a well-studied problem. Estimates of the magnitude of round-off errors and methods to limit their effect on large calculations are part of any large computation project. The precision limit is different from the range limit, as it affects the significand, not the exponent."]], "1_corpus": "wikipedia"}
|
4 |
+
{"tstamp": 1722363249.6393, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "19611b79c0b8467cb1ffaee054c1e14e", "0_model_name": "BAAI/bge-large-en-v1.5", "0_prompt": "Training an adapter for a retrieval model", "0_output": [["Training an adapter for a retrieval model", "Title: Parameter-Efficient Sparse Retrievers and Rerankers using Adapters\n\nAbstract: Parameter-Efficient transfer learning with Adapters have been studied in Natural Language Processing (NLP) as an alternative to full fine-tuning. Adapters are memory-efficient and scale well with downstream tasks by training small bottle-neck layers added between transformer layers while keeping the large pretrained language model (PLMs) frozen. In spite of showing promising results in NLP, these methods are under-explored in Information Retrieval. While previous studies have only experimented with dense retriever or in a cross lingual retrieval scenario, in this paper we aim to complete the picture on the use of adapters in IR. First, we study adapters for SPLADE, a sparse retriever, for which adapters not only retain the efficiency and effectiveness otherwise achieved by finetuning, but are memory-efficient and orders of magnitude lighter to train. We observe that Adapters-SPLADE not only optimizes just 2\\% of training parameters, but outperforms fully fine-tuned counterpart and existing parameter-efficient dense IR models on IR benchmark datasets. Secondly, we address domain adaptation of neural retrieval thanks to adapters on cross-domain BEIR datasets and TripClick. Finally, we also consider knowledge sharing between rerankers and first stage rankers. Overall, our study complete the examination of adapters for neural IR"]], "0_corpus": "arxiv", "1_conv_id": "2306e0a4a4b24733a382d537e1889bd3", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "Training an adapter for a retrieval model", "1_output": [["Training an adapter for a retrieval model", "Title: On Discrete Painleve Equations Associated with the Lattice KdV Systems and the Painleve VI Equation\n\nAbstract: A new integrable nonautonomous nonlinear ordinary difference equation is presented which can be considered to be a discrete analogue of the Painleve V equation. Its derivation is based on the similarity reduction on the two-dimensional lattice of integrable partial difference equations of KdV type. The new equation which is referred to as GDP (generalised discrete Painleve equation) contains various ``discrete Painleve equations'' as subcases for special values/limits of the parameters, some of which were already given before in the literature. The general solution of the GDP can be expressed in terms of Painleve VI (PVI) transcendents. In fact, continuous PVI emerges as the equation obeyed by the solutions of the discrete equation in terms of the lattice parameters rather than the lattice variables that label the lattice sites. We show that the bilinear form of PVI is embedded naturally in the lattice systems leading to the GDP. Further results include the establishment of Baecklund and Schlesinger transformations for the GDP, the corresponding isomonodromic deformation problem, and the self-duality of its bilinear scheme."]], "1_corpus": "arxiv"}
|
5 |
+
{"tstamp": 1722363327.3402, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "55efd4d6039e4ba194eb41c38a3c6ea7", "0_model_name": "sentence-transformers/all-MiniLM-L6-v2", "0_prompt": "prompt optimization using a reward model", "0_output": [["prompt optimization using a reward model", "Title: Invariants of moduli spaces of abelian surfaces\n\nAbstract: Compactifications of moduli spaces of (1,p)-polarized abelian surfaces with level structures of canonical type have been described in great detail by Hulek, Kahn and Weintraub. The aim of this paper is to determine some invariants of smooth models of these moduli spaces. In particular, a geometric description of their canonical divisors is given and their Chern numbers are computed."]], "0_corpus": "arxiv", "1_conv_id": "720dd7663692404dbe4e7978fb23acb5", "1_model_name": "voyage-multilingual-2", "1_prompt": "prompt optimization using a reward model", "1_output": [["prompt optimization using a reward model", "Title: Reward Collapse in Aligning Large Language Models\n\nAbstract: The extraordinary capabilities of large language models (LLMs) such as ChatGPT and GPT-4 are in part unleashed by aligning them with reward models that are trained on human preferences, which are often represented as rankings of responses to prompts. In this paper, we document the phenomenon of \\textit{reward collapse}, an empirical observation where the prevailing ranking-based approach results in an \\textit{identical} reward distribution \\textit{regardless} of the prompts during the terminal phase of training. This outcome is undesirable as open-ended prompts like ``write a short story about your best friend'' should yield a continuous range of rewards for their completions, while specific prompts like ``what is the capital of New Zealand'' should generate either high or low rewards. Our theoretical investigation reveals that reward collapse is primarily due to the insufficiency of the ranking-based objective function to incorporate prompt-related information during optimization. This insight allows us to derive closed-form expressions for the reward distribution associated with a set of utility functions in an asymptotic regime. To overcome reward collapse, we introduce a prompt-aware optimization scheme that provably admits a prompt-dependent reward distribution within the interpolating regime. Our experimental results suggest that our proposed prompt-aware utility functions significantly alleviate reward collapse during the training of reward models."]], "1_corpus": "arxiv"}
|
6 |
+
{"tstamp": 1722363435.7841, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "4cb5f6a122cb452f867ed2739011cd9b", "0_model_name": "BM25", "0_prompt": "distilling the reranking ability of large language models to cross encoders", "0_output": [["distilling the reranking ability of large language models to cross encoders", "Title: ReFIT: Relevance Feedback from a Reranker during Inference\n\nAbstract: Retrieve-and-rerank is a prevalent framework in neural information retrieval, wherein a bi-encoder network initially retrieves a pre-defined number of candidates (e.g., K=100), which are then reranked by a more powerful cross-encoder model. While the reranker often yields improved candidate scores compared to the retriever, its scope is confined to only the top K retrieved candidates. As a result, the reranker cannot improve retrieval performance in terms of Recall@K. In this work, we propose to leverage the reranker to improve recall by making it provide relevance feedback to the retriever at inference time. Specifically, given a test instance during inference, we distill the reranker's predictions for that instance into the retriever's query representation using a lightweight update mechanism. The aim of the distillation loss is to align the retriever's candidate scores more closely with those produced by the reranker. The algorithm then proceeds by executing a second retrieval step using the updated query vector. We empirically demonstrate that this method, applicable to various retrieve-and-rerank frameworks, substantially enhances retrieval recall across multiple domains, languages, and modalities."]], "0_corpus": "arxiv", "1_conv_id": "44d3ff6d268a4decb6abdb3d5eb97904", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "distilling the reranking ability of large language models to cross encoders", "1_output": [["distilling the reranking ability of large language models to cross encoders", "Title: A Systematic Investigation of Distilling Large Language Models into Cross-Encoders for Passage Re-ranking\n\nAbstract: Cross-encoders distilled from large language models (LLMs) are often more effective re-rankers than cross-encoders fine-tuned on manually labeled data. However, the distilled models usually do not reach their teacher LLM's effectiveness. To investigate whether best practices for fine-tuning cross-encoders on manually labeled data (e.g., hard-negative sampling, deep sampling, and listwise loss functions) can help to improve LLM ranker distillation, we construct and release a new distillation dataset: Rank-DistiLLM. In our experiments, cross-encoders trained on Rank-DistiLLM reach the effectiveness of LLMs while being orders of magnitude more efficient. Our code and data is available at https://github.com/webis-de/msmarco-llm-distillation."]], "1_corpus": "arxiv"}
|
7 |
+
{"tstamp": 1722363538.7216, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "435649f6492e485c93e938c5af958974", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "How can I use an LLM to train an embedding model?", "0_output": [["How can I use an LLM to train an embedding model?", "Title: Language Models are Universal Embedders\n\nAbstract: In the large language model (LLM) revolution, embedding is a key component of various systems. For example, it is used to retrieve knowledge or memories for LLMs, to build content moderation filters, etc. As such cases span from English to other natural or programming languages, from retrieval to classification and beyond, it is desirable to build a unified embedding model rather than dedicated ones for each scenario. In this work, we make an initial step towards this goal, demonstrating that multiple languages (both natural and programming) pre-trained transformer decoders can embed universally when finetuned on limited English data. We provide a comprehensive practice with thorough evaluations. On English MTEB, our models achieve competitive performance on different embedding tasks by minimal training data. On other benchmarks, such as multilingual classification and code search, our models (without any supervision) perform comparably to, or even surpass heavily supervised baselines and/or APIs. These results provide evidence of a promising path towards building powerful unified embedders that can be applied across tasks and languages."]], "0_corpus": "arxiv", "1_conv_id": "8eee464cbf244db6b3e8f8a8cc8561d8", "1_model_name": "intfloat/e5-mistral-7b-instruct", "1_prompt": "How can I use an LLM to train an embedding model?", "1_output": [["How can I use an LLM to train an embedding model?", "Title: Embedding-Aligned Language Models\n\nAbstract: We propose a novel approach for training large language models (LLMs) to adhere to objectives defined within a latent embedding space. Our method leverages reinforcement learning (RL), treating a pre-trained LLM as an environment. Our embedding-aligned guided language (EAGLE) agent is trained to iteratively steer the LLM's generation towards optimal regions of the latent embedding space, w.r.t. some predefined criterion. We demonstrate the effectiveness of the EAGLE agent using the MovieLens 25M dataset to surface content gaps that satisfy latent user demand. We also demonstrate the benefit of using an optimal design of a state-dependent action set to improve EAGLE's efficiency. Our work paves the way for controlled and grounded text generation using LLMs, ensuring consistency with domain-specific knowledge and data representations."]], "1_corpus": "arxiv"}
|
data/retrieval_individual-8d38bb89-1e1a-471a-8b9e-35c1f784690e.jsonl
CHANGED
@@ -34,3 +34,9 @@
|
|
34 |
{"tstamp": 1722363001.185, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722363001.0044, "finish": 1722363001.185, "ip": "", "conv_id": "7c0329cf8d684b12ae62ec20f31b7307", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "when is the end of the football season", "output": [["when is the end of the football season", "NFL regular season\n\nHistory In its early years after , the NFL did not have a set schedule, and teams played as few as eight and as many as sixteen games, many against independent professional, college, or amateur teams. From through , they played from eleven to fourteen games per season, depending on the number of teams in the league. From through , each NFL team played 12 games per season. The American Football League began play in 1960 and introduced a balanced schedule of 14 games per team over a fifteen-week season, in which each of the eight teams played each of the other teams twice, with one bye week. Competition from the new league caused the NFL to expand and follow suit with a fourteen-game schedule in .\nAlso in 1961, the U.S. Congress passed the Sports Broadcasting Act of 1961 in response to a court decision which ruled that the NFL's method of negotiating television broadcasting rights violated antitrust laws. The law allows the league to sell the rights to all regular season and postseason games to the networks, but prohibits the league from directly competing with high school and college football games. Since high school and college teams typically play games on Friday and Saturday, respectively, the NFL cannot hold games on those days until those seasons generally end in mid-December."]], "corpus": "wikipedia"}
|
35 |
{"tstamp": 1722363167.9217, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363167.7503, "finish": 1722363167.9217, "ip": "", "conv_id": "19611b79c0b8467cb1ffaee054c1e14e", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "Training an adapter for a retrieval model", "output": [["Training an adapter for a retrieval model", "Title: Parameter-Efficient Sparse Retrievers and Rerankers using Adapters\n\nAbstract: Parameter-Efficient transfer learning with Adapters have been studied in Natural Language Processing (NLP) as an alternative to full fine-tuning. Adapters are memory-efficient and scale well with downstream tasks by training small bottle-neck layers added between transformer layers while keeping the large pretrained language model (PLMs) frozen. In spite of showing promising results in NLP, these methods are under-explored in Information Retrieval. While previous studies have only experimented with dense retriever or in a cross lingual retrieval scenario, in this paper we aim to complete the picture on the use of adapters in IR. First, we study adapters for SPLADE, a sparse retriever, for which adapters not only retain the efficiency and effectiveness otherwise achieved by finetuning, but are memory-efficient and orders of magnitude lighter to train. We observe that Adapters-SPLADE not only optimizes just 2\\% of training parameters, but outperforms fully fine-tuned counterpart and existing parameter-efficient dense IR models on IR benchmark datasets. Secondly, we address domain adaptation of neural retrieval thanks to adapters on cross-domain BEIR datasets and TripClick. Finally, we also consider knowledge sharing between rerankers and first stage rankers. Overall, our study complete the examination of adapters for neural IR"]], "corpus": "arxiv"}
|
36 |
{"tstamp": 1722363167.9217, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722363167.7503, "finish": 1722363167.9217, "ip": "", "conv_id": "2306e0a4a4b24733a382d537e1889bd3", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "Training an adapter for a retrieval model", "output": [["Training an adapter for a retrieval model", "Title: On Discrete Painleve Equations Associated with the Lattice KdV Systems and the Painleve VI Equation\n\nAbstract: A new integrable nonautonomous nonlinear ordinary difference equation is presented which can be considered to be a discrete analogue of the Painleve V equation. Its derivation is based on the similarity reduction on the two-dimensional lattice of integrable partial difference equations of KdV type. The new equation which is referred to as GDP (generalised discrete Painleve equation) contains various ``discrete Painleve equations'' as subcases for special values/limits of the parameters, some of which were already given before in the literature. The general solution of the GDP can be expressed in terms of Painleve VI (PVI) transcendents. In fact, continuous PVI emerges as the equation obeyed by the solutions of the discrete equation in terms of the lattice parameters rather than the lattice variables that label the lattice sites. We show that the bilinear form of PVI is embedded naturally in the lattice systems leading to the GDP. Further results include the establishment of Baecklund and Schlesinger transformations for the GDP, the corresponding isomonodromic deformation problem, and the self-duality of its bilinear scheme."]], "corpus": "arxiv"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
{"tstamp": 1722363001.185, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722363001.0044, "finish": 1722363001.185, "ip": "", "conv_id": "7c0329cf8d684b12ae62ec20f31b7307", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "when is the end of the football season", "output": [["when is the end of the football season", "NFL regular season\n\nHistory In its early years after , the NFL did not have a set schedule, and teams played as few as eight and as many as sixteen games, many against independent professional, college, or amateur teams. From through , they played from eleven to fourteen games per season, depending on the number of teams in the league. From through , each NFL team played 12 games per season. The American Football League began play in 1960 and introduced a balanced schedule of 14 games per team over a fifteen-week season, in which each of the eight teams played each of the other teams twice, with one bye week. Competition from the new league caused the NFL to expand and follow suit with a fourteen-game schedule in .\nAlso in 1961, the U.S. Congress passed the Sports Broadcasting Act of 1961 in response to a court decision which ruled that the NFL's method of negotiating television broadcasting rights violated antitrust laws. The law allows the league to sell the rights to all regular season and postseason games to the networks, but prohibits the league from directly competing with high school and college football games. Since high school and college teams typically play games on Friday and Saturday, respectively, the NFL cannot hold games on those days until those seasons generally end in mid-December."]], "corpus": "wikipedia"}
|
35 |
{"tstamp": 1722363167.9217, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363167.7503, "finish": 1722363167.9217, "ip": "", "conv_id": "19611b79c0b8467cb1ffaee054c1e14e", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "Training an adapter for a retrieval model", "output": [["Training an adapter for a retrieval model", "Title: Parameter-Efficient Sparse Retrievers and Rerankers using Adapters\n\nAbstract: Parameter-Efficient transfer learning with Adapters have been studied in Natural Language Processing (NLP) as an alternative to full fine-tuning. Adapters are memory-efficient and scale well with downstream tasks by training small bottle-neck layers added between transformer layers while keeping the large pretrained language model (PLMs) frozen. In spite of showing promising results in NLP, these methods are under-explored in Information Retrieval. While previous studies have only experimented with dense retriever or in a cross lingual retrieval scenario, in this paper we aim to complete the picture on the use of adapters in IR. First, we study adapters for SPLADE, a sparse retriever, for which adapters not only retain the efficiency and effectiveness otherwise achieved by finetuning, but are memory-efficient and orders of magnitude lighter to train. We observe that Adapters-SPLADE not only optimizes just 2\\% of training parameters, but outperforms fully fine-tuned counterpart and existing parameter-efficient dense IR models on IR benchmark datasets. Secondly, we address domain adaptation of neural retrieval thanks to adapters on cross-domain BEIR datasets and TripClick. Finally, we also consider knowledge sharing between rerankers and first stage rankers. Overall, our study complete the examination of adapters for neural IR"]], "corpus": "arxiv"}
|
36 |
{"tstamp": 1722363167.9217, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722363167.7503, "finish": 1722363167.9217, "ip": "", "conv_id": "2306e0a4a4b24733a382d537e1889bd3", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "Training an adapter for a retrieval model", "output": [["Training an adapter for a retrieval model", "Title: On Discrete Painleve Equations Associated with the Lattice KdV Systems and the Painleve VI Equation\n\nAbstract: A new integrable nonautonomous nonlinear ordinary difference equation is presented which can be considered to be a discrete analogue of the Painleve V equation. Its derivation is based on the similarity reduction on the two-dimensional lattice of integrable partial difference equations of KdV type. The new equation which is referred to as GDP (generalised discrete Painleve equation) contains various ``discrete Painleve equations'' as subcases for special values/limits of the parameters, some of which were already given before in the literature. The general solution of the GDP can be expressed in terms of Painleve VI (PVI) transcendents. In fact, continuous PVI emerges as the equation obeyed by the solutions of the discrete equation in terms of the lattice parameters rather than the lattice variables that label the lattice sites. We show that the bilinear form of PVI is embedded naturally in the lattice systems leading to the GDP. Further results include the establishment of Baecklund and Schlesinger transformations for the GDP, the corresponding isomonodromic deformation problem, and the self-duality of its bilinear scheme."]], "corpus": "arxiv"}
|
37 |
+
{"tstamp": 1722363281.9726, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722363281.4084, "finish": 1722363281.9726, "ip": "", "conv_id": "55efd4d6039e4ba194eb41c38a3c6ea7", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "prompt optimization using a reward model", "output": [["prompt optimization using a reward model", "Title: Invariants of moduli spaces of abelian surfaces\n\nAbstract: Compactifications of moduli spaces of (1,p)-polarized abelian surfaces with level structures of canonical type have been described in great detail by Hulek, Kahn and Weintraub. The aim of this paper is to determine some invariants of smooth models of these moduli spaces. In particular, a geometric description of their canonical divisors is given and their Chern numbers are computed."]], "corpus": "arxiv"}
|
38 |
+
{"tstamp": 1722363281.9726, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722363281.4084, "finish": 1722363281.9726, "ip": "", "conv_id": "720dd7663692404dbe4e7978fb23acb5", "model_name": "voyage-multilingual-2", "prompt": "prompt optimization using a reward model", "output": [["prompt optimization using a reward model", "Title: Reward Collapse in Aligning Large Language Models\n\nAbstract: The extraordinary capabilities of large language models (LLMs) such as ChatGPT and GPT-4 are in part unleashed by aligning them with reward models that are trained on human preferences, which are often represented as rankings of responses to prompts. In this paper, we document the phenomenon of \\textit{reward collapse}, an empirical observation where the prevailing ranking-based approach results in an \\textit{identical} reward distribution \\textit{regardless} of the prompts during the terminal phase of training. This outcome is undesirable as open-ended prompts like ``write a short story about your best friend'' should yield a continuous range of rewards for their completions, while specific prompts like ``what is the capital of New Zealand'' should generate either high or low rewards. Our theoretical investigation reveals that reward collapse is primarily due to the insufficiency of the ranking-based objective function to incorporate prompt-related information during optimization. This insight allows us to derive closed-form expressions for the reward distribution associated with a set of utility functions in an asymptotic regime. To overcome reward collapse, we introduce a prompt-aware optimization scheme that provably admits a prompt-dependent reward distribution within the interpolating regime. Our experimental results suggest that our proposed prompt-aware utility functions significantly alleviate reward collapse during the training of reward models."]], "corpus": "arxiv"}
|
39 |
+
{"tstamp": 1722363396.8587, "task_type": "retrieval", "type": "chat", "model": "BM25", "gen_params": {}, "start": 1722363396.5781, "finish": 1722363396.8587, "ip": "", "conv_id": "4cb5f6a122cb452f867ed2739011cd9b", "model_name": "BM25", "prompt": "distilling the reranking ability of large language models to cross encoders", "output": [["distilling the reranking ability of large language models to cross encoders", "Title: ReFIT: Relevance Feedback from a Reranker during Inference\n\nAbstract: Retrieve-and-rerank is a prevalent framework in neural information retrieval, wherein a bi-encoder network initially retrieves a pre-defined number of candidates (e.g., K=100), which are then reranked by a more powerful cross-encoder model. While the reranker often yields improved candidate scores compared to the retriever, its scope is confined to only the top K retrieved candidates. As a result, the reranker cannot improve retrieval performance in terms of Recall@K. In this work, we propose to leverage the reranker to improve recall by making it provide relevance feedback to the retriever at inference time. Specifically, given a test instance during inference, we distill the reranker's predictions for that instance into the retriever's query representation using a lightweight update mechanism. The aim of the distillation loss is to align the retriever's candidate scores more closely with those produced by the reranker. The algorithm then proceeds by executing a second retrieval step using the updated query vector. We empirically demonstrate that this method, applicable to various retrieve-and-rerank frameworks, substantially enhances retrieval recall across multiple domains, languages, and modalities."]], "corpus": "arxiv"}
|
40 |
+
{"tstamp": 1722363396.8587, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722363396.5781, "finish": 1722363396.8587, "ip": "", "conv_id": "44d3ff6d268a4decb6abdb3d5eb97904", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "distilling the reranking ability of large language models to cross encoders", "output": [["distilling the reranking ability of large language models to cross encoders", "Title: A Systematic Investigation of Distilling Large Language Models into Cross-Encoders for Passage Re-ranking\n\nAbstract: Cross-encoders distilled from large language models (LLMs) are often more effective re-rankers than cross-encoders fine-tuned on manually labeled data. However, the distilled models usually do not reach their teacher LLM's effectiveness. To investigate whether best practices for fine-tuning cross-encoders on manually labeled data (e.g., hard-negative sampling, deep sampling, and listwise loss functions) can help to improve LLM ranker distillation, we construct and release a new distillation dataset: Rank-DistiLLM. In our experiments, cross-encoders trained on Rank-DistiLLM reach the effectiveness of LLMs while being orders of magnitude more efficient. Our code and data is available at https://github.com/webis-de/msmarco-llm-distillation."]], "corpus": "arxiv"}
|
41 |
+
{"tstamp": 1722363468.2366, "task_type": "retrieval", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722363467.8896, "finish": 1722363468.2366, "ip": "", "conv_id": "435649f6492e485c93e938c5af958974", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": "How can I use an LLM to train an embedding model?", "output": [["How can I use an LLM to train an embedding model?", "Title: Language Models are Universal Embedders\n\nAbstract: In the large language model (LLM) revolution, embedding is a key component of various systems. For example, it is used to retrieve knowledge or memories for LLMs, to build content moderation filters, etc. As such cases span from English to other natural or programming languages, from retrieval to classification and beyond, it is desirable to build a unified embedding model rather than dedicated ones for each scenario. In this work, we make an initial step towards this goal, demonstrating that multiple languages (both natural and programming) pre-trained transformer decoders can embed universally when finetuned on limited English data. We provide a comprehensive practice with thorough evaluations. On English MTEB, our models achieve competitive performance on different embedding tasks by minimal training data. On other benchmarks, such as multilingual classification and code search, our models (without any supervision) perform comparably to, or even surpass heavily supervised baselines and/or APIs. These results provide evidence of a promising path towards building powerful unified embedders that can be applied across tasks and languages."]], "corpus": "arxiv"}
|
42 |
+
{"tstamp": 1722363468.2366, "task_type": "retrieval", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722363467.8896, "finish": 1722363468.2366, "ip": "", "conv_id": "8eee464cbf244db6b3e8f8a8cc8561d8", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": "How can I use an LLM to train an embedding model?", "output": [["How can I use an LLM to train an embedding model?", "Title: Embedding-Aligned Language Models\n\nAbstract: We propose a novel approach for training large language models (LLMs) to adhere to objectives defined within a latent embedding space. Our method leverages reinforcement learning (RL), treating a pre-trained LLM as an environment. Our embedding-aligned guided language (EAGLE) agent is trained to iteratively steer the LLM's generation towards optimal regions of the latent embedding space, w.r.t. some predefined criterion. We demonstrate the effectiveness of the EAGLE agent using the MovieLens 25M dataset to surface content gaps that satisfy latent user demand. We also demonstrate the benefit of using an optimal design of a state-dependent action set to improve EAGLE's efficiency. Our work paves the way for controlled and grounded text generation using LLMs, ensuring consistency with domain-specific knowledge and data representations."]], "corpus": "arxiv"}
|
data/sts_individual-8d38bb89-1e1a-471a-8b9e-35c1f784690e.jsonl
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
{"tstamp": 1722361062.1103, "task_type": "sts", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1722361061.4332, "finish": 1722361062.1103, "ip": "", "conv_id": "4e588cb43f8f4cc09b4fb9dbb387ae72", "model_name": "text-embedding-3-large", "txt0": "Forecasting has been easier recently due to the updated process we have today.", "txt1": "The estimating challenges retailers go up against have been intensified as of late by item multiplication in practically every classification.", "txt2": "The forecasting challenges retailers confront have been amplified in recent years by product proliferation in almost every category.", "output": ""}
|
2 |
{"tstamp": 1722361062.1103, "task_type": "sts", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722361061.4332, "finish": 1722361062.1103, "ip": "", "conv_id": "9a17451afc00432d8d948ed9b4423baa", "model_name": "Salesforce/SFR-Embedding-2_R", "txt0": "Forecasting has been easier recently due to the updated process we have today.", "txt1": "The estimating challenges retailers go up against have been intensified as of late by item multiplication in practically every classification.", "txt2": "The forecasting challenges retailers confront have been amplified in recent years by product proliferation in almost every category.", "output": ""}
|
|
|
|
|
|
1 |
{"tstamp": 1722361062.1103, "task_type": "sts", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1722361061.4332, "finish": 1722361062.1103, "ip": "", "conv_id": "4e588cb43f8f4cc09b4fb9dbb387ae72", "model_name": "text-embedding-3-large", "txt0": "Forecasting has been easier recently due to the updated process we have today.", "txt1": "The estimating challenges retailers go up against have been intensified as of late by item multiplication in practically every classification.", "txt2": "The forecasting challenges retailers confront have been amplified in recent years by product proliferation in almost every category.", "output": ""}
|
2 |
{"tstamp": 1722361062.1103, "task_type": "sts", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1722361061.4332, "finish": 1722361062.1103, "ip": "", "conv_id": "9a17451afc00432d8d948ed9b4423baa", "model_name": "Salesforce/SFR-Embedding-2_R", "txt0": "Forecasting has been easier recently due to the updated process we have today.", "txt1": "The estimating challenges retailers go up against have been intensified as of late by item multiplication in practically every classification.", "txt2": "The forecasting challenges retailers confront have been amplified in recent years by product proliferation in almost every category.", "output": ""}
|
3 |
+
{"tstamp": 1722363533.0121, "task_type": "sts", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1722363532.8625, "finish": 1722363533.0121, "ip": "", "conv_id": "a618ea1a2ed940ceb55925d1dac49f78", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "txt0": "She trained a neural network to recognize faces.", "txt1": "She developed an AI to identify human features.", "txt2": "She trained a new recruit to recognize faces.", "output": ""}
|
4 |
+
{"tstamp": 1722363533.0121, "task_type": "sts", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722363532.8625, "finish": 1722363533.0121, "ip": "", "conv_id": "996ff2d1e9ac48b29b9590a71fbe6a53", "model_name": "intfloat/e5-mistral-7b-instruct", "txt0": "She trained a neural network to recognize faces.", "txt1": "She developed an AI to identify human features.", "txt2": "She trained a new recruit to recognize faces.", "output": ""}
|