applied-ai-018 commited on
Commit
290f1c1
·
verified ·
1 Parent(s): ce9ed42

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00010-of-00532.arrow +3 -0
  2. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00040-of-00532.arrow +3 -0
  3. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00059-of-00532.arrow +3 -0
  4. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00070-of-00532.arrow +3 -0
  5. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00072-of-00532.arrow +3 -0
  6. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00074-of-00532.arrow +3 -0
  7. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00089-of-00532.arrow +3 -0
  8. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00100-of-00532.arrow +3 -0
  9. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00105-of-00532.arrow +3 -0
  10. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00126-of-00532.arrow +3 -0
  11. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00135-of-00532.arrow +3 -0
  12. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00136-of-00532.arrow +3 -0
  13. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00140-of-00532.arrow +3 -0
  14. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00200-of-00532.arrow +3 -0
  15. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00202-of-00532.arrow +3 -0
  16. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00204-of-00532.arrow +3 -0
  17. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00252-of-00532.arrow +3 -0
  18. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00256-of-00532.arrow +3 -0
  19. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00295-of-00532.arrow +3 -0
  20. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00320-of-00532.arrow +3 -0
  21. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00324-of-00532.arrow +3 -0
  22. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00326-of-00532.arrow +3 -0
  23. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00328-of-00532.arrow +3 -0
  24. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00344-of-00532.arrow +3 -0
  25. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00360-of-00532.arrow +3 -0
  26. bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00495-of-00532.arrow +3 -0
  27. bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/concat.py +103 -0
  28. bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11-1b3-ml-evalharness-results_lm-eval_global_step340500_2022-07-13-11-29-13.json +172 -0
  29. bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-12-22-45-57.json +0 -0
  30. bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-14-10-03-25.json +2169 -0
  31. bigscience/evaluation/results/tr11/conversion/json_to_markdown.py +307 -0
  32. bigscience/evaluation/results/tr11/opt/bslmeval.json +0 -0
  33. bigscience/evaluation/results/tr11/opt/humaneval_temp02.json +1 -0
  34. bigscience/evaluation/results/tr11/opt/humaneval_temp06.json +1 -0
  35. bigscience/evaluation/results/tr11/opt/humaneval_temp08.json +1 -0
  36. bigscience/evaluation/results/tr11/scripts/download_bsevalharness.py +21 -0
  37. bigscience/evaluation/results/tr11/scripts/multi_eurlex_tmp.slurm +63 -0
  38. bigscience/evaluation/results/tr11/scripts/report-to-csv.py +58 -0
  39. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_176b.slurm +128 -0
  40. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_350m.slurm +110 -0
  41. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_6b3.slurm +101 -0
  42. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_760m.slurm +110 -0
  43. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11-176b-ml.slurm +122 -0
  44. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11b-1b3-ml.slurm +122 -0
  45. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11d-750m-ml.slurm +120 -0
  46. bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11f-6b3-ml.slurm +121 -0
  47. bigscience/evaluation/results/tr11/scripts/run_evalharness_deepspeed.md +158 -0
  48. bigscience/evaluation/results/tr11/scripts/run_evalharness_deepspeed.slurm +98 -0
  49. bigscience/evaluation/results/tr11/scripts/run_evalharness_tr11b-1b3-ml.slurm +120 -0
  50. bigscience/evaluation/results/tr11/scripts/run_evalharness_tr11c-2b5-ml.slurm +120 -0
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00010-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4807894ff54ad64db5a649d7e90cb4e3e9f8a26bfb685b7f432ea1eb4394fc8b
3
+ size 502704664
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00040-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50de72f983c0cd96d1d7666f0f9954c469d528b36c125cf814cdda194dac659
3
+ size 501774384
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00059-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac08a2949eb916e497ce22f237fb4369a0c8fd1556bceae0b2fa0b1f6b5d7bd
3
+ size 501785176
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00070-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6020c4344c228e8755cae6ec4b1500137cc4783dcf962746166e971ca5b409f8
3
+ size 503043536
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00072-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6e3dc92ae3b90d778e052af7ad3e04e3a7ba64590cce7cd9283833e3528e303
3
+ size 504777888
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00074-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d25f7ef19eb2f2b03c30a64093aec0051903936d654e2b84e57ae5f3229796c
3
+ size 503353848
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00089-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10685c132e62df801092ae15c334f2d41e6b6c4dc3a57c654aa21de0fc6d385d
3
+ size 504698888
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00100-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cdf89b59c0c71e2a41f148011c746392a17b0bda4cee09cc32bf2f59d6abcec
3
+ size 504413528
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00105-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac97ee67f09f4ce7d2c1d749b876d0cf12163d1e2b6259022f6992c3862c1804
3
+ size 503532080
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00126-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06bdfb9f767e4686fbe816c6f17f7f7b30255420ea5602e5e28a5b6ccd2b3a33
3
+ size 501036656
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00135-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72409ff2a64d68480801880f7e07f96be0fcdc6d284cc6a1cbcf603500c12ac5
3
+ size 500714592
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00136-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b6689190dec993e818c370580e7693afcdd8ce0ec519327e1950c3e8d20657
3
+ size 508477840
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00140-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13af03d22307d0acc99bbe230f09229d5a05cd02dcf4a22d85d2f6fdc347c9d
3
+ size 505473128
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00200-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9129643c9a3b941f028355a008ed2930709edddb8ec96f9ffa536bd051a701d
3
+ size 506929072
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00202-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44962dbf63ef79c3b59afd465b7c574e777582fa1e5a4762b72ce5e142123e1e
3
+ size 503715992
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00204-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5360abb8c1653afae61d587e9219f360d1cb82fda2c7cd40158de1532a72a78e
3
+ size 500887496
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00252-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51ae7970b358b5731a8f124797e981f02fd1be35c0dc76299ce936886e5da17d
3
+ size 501961432
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00256-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5547b8a852311739cfb48ae53381c2e87f2e9673eea842e35919b9169337a301
3
+ size 504684008
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00295-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07d7823b5bd634d72fc0814a4cd2cacdf8e12da3d3ef75e9acf8b82ee985ebbd
3
+ size 505840960
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00320-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efeea6d8a6dca9c3ca851360cc61624a02c616ce3e53777280d807946ef1a107
3
+ size 504561128
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00324-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db0a503e57ccd733520c3fe775c261f06c241e68c3442a16f7d9a535fa7595e
3
+ size 506267152
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00326-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb76369cc5054c121fe828e02299ee8dfa8c38758b98505468c4d9c5d6248da
3
+ size 501325880
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00328-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd4ea31c7e8c9c4ec734346406d6155af92b2b1cceab5c0579e5470b0b852aea
3
+ size 506335368
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00344-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db08a0f23e31ade1361180c9ec554700b6118ec4628c909f8b7812d06973c740
3
+ size 502926768
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00360-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a1a4413cb358c425279de1492be218dd8c9584b4c70d2365a515ef0b51d2647
3
+ size 502754056
bigscience/data/oscar/cache/oscar/unshuffled_deduplicated_zh/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2/oscar-train-00495-of-00532.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9ebc298a4c86d42ec40d85b5a5ede8739dbd2569d1dfe8cc8a5b48893780733
3
+ size 506053128
bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/concat.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import re
4
+ from pathlib import Path
5
+ from re import Pattern
6
+ from typing import List, Dict
7
+
8
+
9
+ def get_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument("--results-dir", required=True, type=Path, help="Path to the list of results")
12
+ parser.add_argument("--concatenate-output-file", required=True, type=Path, help="Path to store the final output file")
13
+ return parser.parse_args()
14
+
15
+ MODEL = "tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500"
16
+ # MODEL = "global_step95000"
17
+ RESULTS_REGEX = re.compile(rf"(eai|bs)_results_lm-eval_{MODEL}_(\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2})_backup\.json")
18
+ RESULTS_REGEX = re.compile(rf"{MODEL}_*.json")
19
+ #tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-14-10-03-25.json
20
+ def get_all_files_that_match_results_in_folder(root_folder: Path) -> List[Path]:
21
+ json_files = []
22
+ for folder in root_folder.iterdir():
23
+ if folder.is_dir():
24
+ json_files += get_all_files_that_match_results_in_folder(folder)
25
+ else:
26
+ # it's actually a file
27
+ file = folder
28
+
29
+ #match = RESULTS_REGEX.match(file.name)
30
+
31
+ if not str(file.name).endswith("json"):
32
+ continue
33
+ else:
34
+ json_files.append(file)
35
+ return json_files
36
+
37
+ def sort_dict(dictionary: Dict) -> Dict:
38
+ results = {}
39
+
40
+ for key, value in sorted(dictionary.items()):
41
+ new_value = value
42
+
43
+ if isinstance(value, dict):
44
+ new_value = sort_dict(new_value)
45
+ elif isinstance(value, list):
46
+ new_value = sorted(value)
47
+
48
+ results[key] = new_value
49
+
50
+ return results
51
+
52
+ def main():
53
+ args = get_args()
54
+
55
+ # Get all json files
56
+ json_files = get_all_files_that_match_results_in_folder(args.results_dir)
57
+ print("GOT", json_files)
58
+ # Merge all json files
59
+ final_result = {
60
+ "results": {},
61
+ "versions": {}
62
+ }
63
+ for file in json_files:
64
+ with open(file, "r") as fi:
65
+ task_result = json.load(fi)
66
+
67
+ #match = RESULTS_REGEX.match(file.name)
68
+ #assert match is not None
69
+ prefix = "bs" if "bs" in file.name else "eai"#match.group(1)
70
+ datetime_string = file.name[file.name.index("global_step340500_") + len("global_step340500_"):].replace(".json", "")#match.group(2)
71
+
72
+ if prefix == "eai":
73
+ results_key = "results"
74
+ elif prefix == "bs":
75
+ results_key = "table_results"
76
+ else:
77
+ raise ValueError(f"Unsupported key: {prefix}")
78
+
79
+ for key, value in task_result[results_key].items():
80
+ if key not in final_result["results"]:
81
+ final_result["results"][key] = {
82
+ datetime_string: value
83
+ }
84
+ #else:
85
+ # assert datetime_string not in final_result["results"][key]
86
+ # final_result["results"][key][datetime_string] = value
87
+
88
+ for key, value in task_result["versions"].items():
89
+ final_result["versions"][key] = value
90
+
91
+ # We sort dict, better for serialization
92
+ print(final_result)
93
+ final_result = sort_dict(final_result)
94
+
95
+ # Save result
96
+ with open(args.concatenate_output_file, "w") as fo:
97
+ json.dump(final_result, fo, indent=2)
98
+
99
+ pass
100
+
101
+ if __name__ == "__main__":
102
+ main()
103
+
bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11-1b3-ml-evalharness-results_lm-eval_global_step340500_2022-07-13-11-29-13.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc": 0.23464163822525597,
5
+ "acc_stderr": 0.012383873560768673,
6
+ "acc_norm": 0.26791808873720135,
7
+ "acc_norm_stderr": 0.012942030195136423
8
+ },
9
+ "arc_easy": {
10
+ "acc": 0.5631313131313131,
11
+ "acc_stderr": 0.010177672928157678,
12
+ "acc_norm": 0.4810606060606061,
13
+ "acc_norm_stderr": 0.010252420496894487
14
+ },
15
+ "boolq": {
16
+ "acc": 0.617737003058104,
17
+ "acc_stderr": 0.008499149690449272
18
+ },
19
+ "copa": {
20
+ "acc": 0.7,
21
+ "acc_stderr": 0.046056618647183814
22
+ },
23
+ "headqa": {
24
+ "acc": 0.25419401896425964,
25
+ "acc_stderr": 0.008316509290190668,
26
+ "acc_norm": 0.29576951130561635,
27
+ "acc_norm_stderr": 0.008717251898361422
28
+ },
29
+ "hellaswag": {
30
+ "acc": 0.37621987651862177,
31
+ "acc_stderr": 0.004834461997944872,
32
+ "acc_norm": 0.46564429396534557,
33
+ "acc_norm_stderr": 0.004977988452502641
34
+ },
35
+ "lambada": {
36
+ "ppl": 12.583447597222621,
37
+ "ppl_stderr": 0.4021518609838198,
38
+ "acc": 0.46322530564719583,
39
+ "acc_stderr": 0.006947110835634445
40
+ },
41
+ "logiqa": {
42
+ "acc": 0.21658986175115208,
43
+ "acc_stderr": 0.016156860583178303,
44
+ "acc_norm": 0.28110599078341014,
45
+ "acc_norm_stderr": 0.017632374626460005
46
+ },
47
+ "mathqa": {
48
+ "acc": 0.2489112227805695,
49
+ "acc_stderr": 0.007915319798861361,
50
+ "acc_norm": 0.2422110552763819,
51
+ "acc_norm_stderr": 0.007842810183504986
52
+ },
53
+ "mc_taco": {
54
+ "em": 0.12537537537537538,
55
+ "f1": 0.4747075325110886
56
+ },
57
+ "mrpc": {
58
+ "acc": 0.6813725490196079,
59
+ "acc_stderr": 0.023095996571841474,
60
+ "f1": 0.8104956268221574,
61
+ "f1_stderr": 0.016329211455484924
62
+ },
63
+ "multirc": {
64
+ "acc": 0.011542497376705142,
65
+ "acc_stderr": 0.003461867320927179
66
+ },
67
+ "openbookqa": {
68
+ "acc": 0.214,
69
+ "acc_stderr": 0.01835979750238702,
70
+ "acc_norm": 0.298,
71
+ "acc_norm_stderr": 0.020475118092988978
72
+ },
73
+ "piqa": {
74
+ "acc": 0.6871599564744287,
75
+ "acc_stderr": 0.010817714425701112,
76
+ "acc_norm": 0.7002176278563657,
77
+ "acc_norm_stderr": 0.010689686967138092
78
+ },
79
+ "prost": {
80
+ "acc": 0.23505550811272416,
81
+ "acc_stderr": 0.0030979423271461875,
82
+ "acc_norm": 0.2670260461144321,
83
+ "acc_norm_stderr": 0.0032321702981822874
84
+ },
85
+ "pubmedqa": {
86
+ "acc": 0.56,
87
+ "acc_stderr": 0.015704987954361798
88
+ },
89
+ "qnli": {
90
+ "acc": 0.4962474830679114,
91
+ "acc_stderr": 0.006765220016415222
92
+ },
93
+ "qqp": {
94
+ "acc": 0.3681424684640119,
95
+ "acc_stderr": 0.0023986729832071816,
96
+ "f1": 0.5381138352498734,
97
+ "f1_stderr": 0.002555831569895799
98
+ },
99
+ "race": {
100
+ "acc": 0.3320574162679426,
101
+ "acc_stderr": 0.014575582129545914
102
+ },
103
+ "rte": {
104
+ "acc": 0.5342960288808665,
105
+ "acc_stderr": 0.030025579819366426
106
+ },
107
+ "sciq": {
108
+ "acc": 0.853,
109
+ "acc_stderr": 0.011203415395160335,
110
+ "acc_norm": 0.771,
111
+ "acc_norm_stderr": 0.013294199326613609
112
+ },
113
+ "sst": {
114
+ "acc": 0.6823394495412844,
115
+ "acc_stderr": 0.015775124845202545
116
+ },
117
+ "triviaqa": {
118
+ "acc": 0.0313798285158667,
119
+ "acc_stderr": 0.0016392014864795154
120
+ },
121
+ "webqs": {
122
+ "acc": 0.012795275590551181,
123
+ "acc_stderr": 0.0024938680596856277
124
+ },
125
+ "wic": {
126
+ "acc": 0.5,
127
+ "acc_stderr": 0.01981072129375818
128
+ },
129
+ "winogrande": {
130
+ "acc": 0.5730071033938438,
131
+ "acc_stderr": 0.013901878072575058
132
+ },
133
+ "wnli": {
134
+ "acc": 0.43661971830985913,
135
+ "acc_stderr": 0.0592793555841297
136
+ },
137
+ "wsc": {
138
+ "acc": 0.36538461538461536,
139
+ "acc_stderr": 0.0474473339327792
140
+ }
141
+ },
142
+ "versions": {
143
+ "arc_challenge": 0,
144
+ "arc_easy": 0,
145
+ "boolq": 1,
146
+ "copa": 0,
147
+ "headqa": 0,
148
+ "hellaswag": 0,
149
+ "lambada": 0,
150
+ "logiqa": 0,
151
+ "mathqa": 0,
152
+ "mc_taco": 0,
153
+ "mrpc": 0,
154
+ "multirc": 1,
155
+ "openbookqa": 0,
156
+ "piqa": 0,
157
+ "prost": 0,
158
+ "pubmedqa": 0,
159
+ "qnli": 0,
160
+ "qqp": 0,
161
+ "race": 1,
162
+ "rte": 0,
163
+ "sciq": 0,
164
+ "sst": 0,
165
+ "triviaqa": 0,
166
+ "webqs": 0,
167
+ "wic": 0,
168
+ "winogrande": 0,
169
+ "wnli": 1,
170
+ "wsc": 0
171
+ }
172
+ }
bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-12-22-45-57.json ADDED
The diff for this file is too large to render. See raw diff
 
bigscience/evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-14-10-03-25.json ADDED
@@ -0,0 +1,2169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": [
3
+ {
4
+ "task_name": "wic",
5
+ "prompt_name": "GPT-3-prompt",
6
+ "acc": 0.5,
7
+ "fixed_answer_choice_list": [
8
+ "No",
9
+ "Yes"
10
+ ],
11
+ "dataset_path": "super_glue",
12
+ "dataset_name": "wic",
13
+ "subset": null,
14
+ "prompt_id": "c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6",
15
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nQuestion: Is the word '{{word}}' used in the same sense in the two sentences above?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
16
+ "prompt_original_task": true,
17
+ "comment": "",
18
+ "acc_stderr": 0.01981072129375818
19
+ },
20
+ {
21
+ "task_name": "wic",
22
+ "prompt_name": "GPT-3-prompt",
23
+ "acc_norm": 0.5,
24
+ "fixed_answer_choice_list": [
25
+ "No",
26
+ "Yes"
27
+ ],
28
+ "dataset_path": "super_glue",
29
+ "dataset_name": "wic",
30
+ "subset": null,
31
+ "prompt_id": "c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6",
32
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nQuestion: Is the word '{{word}}' used in the same sense in the two sentences above?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
33
+ "prompt_original_task": true,
34
+ "comment": "",
35
+ "acc_norm_stderr": 0.01981072129375818
36
+ },
37
+ {
38
+ "task_name": "wic",
39
+ "prompt_name": "GPT-3-prompt-with-label",
40
+ "acc": 0.49216300940438873,
41
+ "fixed_answer_choice_list": [
42
+ "No",
43
+ "Yes"
44
+ ],
45
+ "dataset_path": "super_glue",
46
+ "dataset_name": "wic",
47
+ "subset": null,
48
+ "prompt_id": "d9e1db2a-ab0b-4621-bb41-01d5788d3873",
49
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nQuestion: Is the word '{{word}}' used in the same sense in the two sentences above? Yes, No?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
50
+ "prompt_original_task": true,
51
+ "comment": "",
52
+ "acc_stderr": 0.019808287657813832
53
+ },
54
+ {
55
+ "task_name": "wic",
56
+ "prompt_name": "GPT-3-prompt-with-label",
57
+ "acc_norm": 0.5,
58
+ "fixed_answer_choice_list": [
59
+ "No",
60
+ "Yes"
61
+ ],
62
+ "dataset_path": "super_glue",
63
+ "dataset_name": "wic",
64
+ "subset": null,
65
+ "prompt_id": "d9e1db2a-ab0b-4621-bb41-01d5788d3873",
66
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nQuestion: Is the word '{{word}}' used in the same sense in the two sentences above? Yes, No?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
67
+ "prompt_original_task": true,
68
+ "comment": "",
69
+ "acc_norm_stderr": 0.01981072129375818
70
+ },
71
+ {
72
+ "task_name": "wic",
73
+ "prompt_name": "affirmation_true_or_false",
74
+ "acc": 0.5,
75
+ "fixed_answer_choice_list": [
76
+ "False",
77
+ "True"
78
+ ],
79
+ "dataset_path": "super_glue",
80
+ "dataset_name": "wic",
81
+ "subset": null,
82
+ "prompt_id": "725b5ed0-7728-4890-95a4-a74cb7ae1bb4",
83
+ "prompt_jinja": "Sentence A: {{sentence1}}\nSentence B: {{sentence2}}\n\n\"{{word}}\" has a similar meaning in sentences A and B. True or False?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
84
+ "prompt_original_task": true,
85
+ "comment": "",
86
+ "acc_stderr": 0.01981072129375818
87
+ },
88
+ {
89
+ "task_name": "wic",
90
+ "prompt_name": "affirmation_true_or_false",
91
+ "acc_norm": 0.5078369905956113,
92
+ "fixed_answer_choice_list": [
93
+ "False",
94
+ "True"
95
+ ],
96
+ "dataset_path": "super_glue",
97
+ "dataset_name": "wic",
98
+ "subset": null,
99
+ "prompt_id": "725b5ed0-7728-4890-95a4-a74cb7ae1bb4",
100
+ "prompt_jinja": "Sentence A: {{sentence1}}\nSentence B: {{sentence2}}\n\n\"{{word}}\" has a similar meaning in sentences A and B. True or False?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
101
+ "prompt_original_task": true,
102
+ "comment": "",
103
+ "acc_norm_stderr": 0.019808287657813832
104
+ },
105
+ {
106
+ "task_name": "wic",
107
+ "prompt_name": "grammar_homework",
108
+ "acc": 0.5094043887147336,
109
+ "fixed_answer_choice_list": [
110
+ "No",
111
+ "Yes"
112
+ ],
113
+ "dataset_path": "super_glue",
114
+ "dataset_name": "wic",
115
+ "subset": null,
116
+ "prompt_id": "611d13dc-d414-4b9b-9204-e4f325e859e7",
117
+ "prompt_jinja": "Homework\n\nDecide whether the word \"{{word}}\" is used with the same meaning in the two following sentences. Answer by yes or no.\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
118
+ "prompt_original_task": true,
119
+ "comment": "",
120
+ "acc_stderr": 0.019807216763271497
121
+ },
122
+ {
123
+ "task_name": "wic",
124
+ "prompt_name": "grammar_homework",
125
+ "acc_norm": 0.49843260188087773,
126
+ "fixed_answer_choice_list": [
127
+ "No",
128
+ "Yes"
129
+ ],
130
+ "dataset_path": "super_glue",
131
+ "dataset_name": "wic",
132
+ "subset": null,
133
+ "prompt_id": "611d13dc-d414-4b9b-9204-e4f325e859e7",
134
+ "prompt_jinja": "Homework\n\nDecide whether the word \"{{word}}\" is used with the same meaning in the two following sentences. Answer by yes or no.\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
135
+ "prompt_original_task": true,
136
+ "comment": "",
137
+ "acc_norm_stderr": 0.019810623954060382
138
+ },
139
+ {
140
+ "task_name": "wic",
141
+ "prompt_name": "polysemous",
142
+ "acc": 0.512539184952978,
143
+ "fixed_answer_choice_list": [
144
+ "No",
145
+ "Yes"
146
+ ],
147
+ "dataset_path": "super_glue",
148
+ "dataset_name": "wic",
149
+ "subset": null,
150
+ "prompt_id": "dd2080cf-3117-49ba-9aff-c988a21fdb69",
151
+ "prompt_jinja": "The word \"{{word}}\" has multiple meanings. Does it have the same meaning in sentences 1 and 2? Yes or no?\n\nSentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
152
+ "prompt_original_task": true,
153
+ "comment": "",
154
+ "acc_stderr": 0.019804490588592596
155
+ },
156
+ {
157
+ "task_name": "wic",
158
+ "prompt_name": "polysemous",
159
+ "acc_norm": 0.49843260188087773,
160
+ "fixed_answer_choice_list": [
161
+ "No",
162
+ "Yes"
163
+ ],
164
+ "dataset_path": "super_glue",
165
+ "dataset_name": "wic",
166
+ "subset": null,
167
+ "prompt_id": "dd2080cf-3117-49ba-9aff-c988a21fdb69",
168
+ "prompt_jinja": "The word \"{{word}}\" has multiple meanings. Does it have the same meaning in sentences 1 and 2? Yes or no?\n\nSentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
169
+ "prompt_original_task": true,
170
+ "comment": "",
171
+ "acc_norm_stderr": 0.019810623954060382
172
+ },
173
+ {
174
+ "task_name": "wic",
175
+ "prompt_name": "question-context",
176
+ "acc": 0.5266457680250783,
177
+ "fixed_answer_choice_list": [
178
+ "No",
179
+ "Yes"
180
+ ],
181
+ "dataset_path": "super_glue",
182
+ "dataset_name": "wic",
183
+ "subset": null,
184
+ "prompt_id": "cfbc1637-10b8-4f20-a31c-55292f3cebd0",
185
+ "prompt_jinja": "Determine if the word '{{word}}' is used in the same way in the two sentences below. \n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
186
+ "prompt_original_task": true,
187
+ "comment": "",
188
+ "acc_stderr": 0.019782570188812167
189
+ },
190
+ {
191
+ "task_name": "wic",
192
+ "prompt_name": "question-context",
193
+ "acc_norm": 0.5031347962382445,
194
+ "fixed_answer_choice_list": [
195
+ "No",
196
+ "Yes"
197
+ ],
198
+ "dataset_path": "super_glue",
199
+ "dataset_name": "wic",
200
+ "subset": null,
201
+ "prompt_id": "cfbc1637-10b8-4f20-a31c-55292f3cebd0",
202
+ "prompt_jinja": "Determine if the word '{{word}}' is used in the same way in the two sentences below. \n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
203
+ "prompt_original_task": true,
204
+ "comment": "",
205
+ "acc_norm_stderr": 0.019810331932097542
206
+ },
207
+ {
208
+ "task_name": "wic",
209
+ "prompt_name": "question-context-meaning",
210
+ "acc": 0.5438871473354232,
211
+ "fixed_answer_choice_list": [
212
+ "No",
213
+ "Yes"
214
+ ],
215
+ "dataset_path": "super_glue",
216
+ "dataset_name": "wic",
217
+ "subset": null,
218
+ "prompt_id": "3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc",
219
+ "prompt_jinja": "Does the word \"{{word}}\" have the same meaning in these two sentences?\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
220
+ "prompt_original_task": true,
221
+ "comment": "",
222
+ "acc_stderr": 0.019734259601993404
223
+ },
224
+ {
225
+ "task_name": "wic",
226
+ "prompt_name": "question-context-meaning",
227
+ "acc_norm": 0.5015673981191222,
228
+ "fixed_answer_choice_list": [
229
+ "No",
230
+ "Yes"
231
+ ],
232
+ "dataset_path": "super_glue",
233
+ "dataset_name": "wic",
234
+ "subset": null,
235
+ "prompt_id": "3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc",
236
+ "prompt_jinja": "Does the word \"{{word}}\" have the same meaning in these two sentences?\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
237
+ "prompt_original_task": true,
238
+ "comment": "",
239
+ "acc_norm_stderr": 0.019810623954060382
240
+ },
241
+ {
242
+ "task_name": "wic",
243
+ "prompt_name": "question-context-meaning-with-label",
244
+ "acc": 0.5156739811912225,
245
+ "fixed_answer_choice_list": [
246
+ "No",
247
+ "Yes"
248
+ ],
249
+ "dataset_path": "super_glue",
250
+ "dataset_name": "wic",
251
+ "subset": null,
252
+ "prompt_id": "14e73f39-a0d1-44c2-b9a4-4e48f9f1608e",
253
+ "prompt_jinja": "Does the word \"{{word}}\" have the same meaning in these two sentences? Yes, No?\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
254
+ "prompt_original_task": true,
255
+ "comment": "",
256
+ "acc_stderr": 0.019800984955347847
257
+ },
258
+ {
259
+ "task_name": "wic",
260
+ "prompt_name": "question-context-meaning-with-label",
261
+ "acc_norm": 0.5015673981191222,
262
+ "fixed_answer_choice_list": [
263
+ "No",
264
+ "Yes"
265
+ ],
266
+ "dataset_path": "super_glue",
267
+ "dataset_name": "wic",
268
+ "subset": null,
269
+ "prompt_id": "14e73f39-a0d1-44c2-b9a4-4e48f9f1608e",
270
+ "prompt_jinja": "Does the word \"{{word}}\" have the same meaning in these two sentences? Yes, No?\n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
271
+ "prompt_original_task": true,
272
+ "comment": "",
273
+ "acc_norm_stderr": 0.019810623954060382
274
+ },
275
+ {
276
+ "task_name": "wic",
277
+ "prompt_name": "same_sense",
278
+ "acc": 0.5047021943573667,
279
+ "fixed_answer_choice_list": [
280
+ "No",
281
+ "Yes"
282
+ ],
283
+ "dataset_path": "super_glue",
284
+ "dataset_name": "wic",
285
+ "subset": null,
286
+ "prompt_id": "ce8b5a93-1841-4897-84db-b100f1c84f4b",
287
+ "prompt_jinja": "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\n\nDetermine whether the word \"{{word}}\" is used in the same sense in both sentences. Yes or no?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
288
+ "prompt_original_task": true,
289
+ "comment": "",
290
+ "acc_stderr": 0.019809845219259763
291
+ },
292
+ {
293
+ "task_name": "wic",
294
+ "prompt_name": "same_sense",
295
+ "acc_norm": 0.5,
296
+ "fixed_answer_choice_list": [
297
+ "No",
298
+ "Yes"
299
+ ],
300
+ "dataset_path": "super_glue",
301
+ "dataset_name": "wic",
302
+ "subset": null,
303
+ "prompt_id": "ce8b5a93-1841-4897-84db-b100f1c84f4b",
304
+ "prompt_jinja": "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\n\nDetermine whether the word \"{{word}}\" is used in the same sense in both sentences. Yes or no?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
305
+ "prompt_original_task": true,
306
+ "comment": "",
307
+ "acc_norm_stderr": 0.01981072129375818
308
+ },
309
+ {
310
+ "task_name": "wic",
311
+ "prompt_name": "similar-sense",
312
+ "acc": 0.542319749216301,
313
+ "fixed_answer_choice_list": [
314
+ "No",
315
+ "Yes"
316
+ ],
317
+ "dataset_path": "super_glue",
318
+ "dataset_name": "wic",
319
+ "subset": null,
320
+ "prompt_id": "f934a96d-fe4d-4075-aa47-5595b9a604c7",
321
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nSimilar sense of {{word}}?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
322
+ "prompt_original_task": true,
323
+ "comment": "",
324
+ "acc_stderr": 0.01973963328373276
325
+ },
326
+ {
327
+ "task_name": "wic",
328
+ "prompt_name": "similar-sense",
329
+ "acc_norm": 0.5,
330
+ "fixed_answer_choice_list": [
331
+ "No",
332
+ "Yes"
333
+ ],
334
+ "dataset_path": "super_glue",
335
+ "dataset_name": "wic",
336
+ "subset": null,
337
+ "prompt_id": "f934a96d-fe4d-4075-aa47-5595b9a604c7",
338
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nSimilar sense of {{word}}?\n||| {% if label != -1%}\n{{answer_choices[label]}}\n{% endif %}",
339
+ "prompt_original_task": true,
340
+ "comment": "",
341
+ "acc_norm_stderr": 0.01981072129375818
342
+ },
343
+ {
344
+ "task_name": "wsc",
345
+ "prompt_name": "GPT-3 Style",
346
+ "acc": 0.36538461538461536,
347
+ "fixed_answer_choice_list": [
348
+ "No",
349
+ "Yes"
350
+ ],
351
+ "dataset_path": "super_glue",
352
+ "dataset_name": "wsc.fixed",
353
+ "subset": null,
354
+ "prompt_id": "7d377293-d043-4b6c-8ec1-d61eaf14ec67",
355
+ "prompt_jinja": "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun \"{{ span2_text }}\" refer to {{ span1_text }}?\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
356
+ "prompt_original_task": true,
357
+ "comment": "",
358
+ "acc_stderr": 0.0474473339327792
359
+ },
360
+ {
361
+ "task_name": "wsc",
362
+ "prompt_name": "GPT-3 Style",
363
+ "acc_norm": 0.36538461538461536,
364
+ "fixed_answer_choice_list": [
365
+ "No",
366
+ "Yes"
367
+ ],
368
+ "dataset_path": "super_glue",
369
+ "dataset_name": "wsc.fixed",
370
+ "subset": null,
371
+ "prompt_id": "7d377293-d043-4b6c-8ec1-d61eaf14ec67",
372
+ "prompt_jinja": "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun \"{{ span2_text }}\" refer to {{ span1_text }}?\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
373
+ "prompt_original_task": true,
374
+ "comment": "",
375
+ "acc_norm_stderr": 0.0474473339327792
376
+ },
377
+ {
378
+ "task_name": "wsc",
379
+ "prompt_name": "I think they mean",
380
+ "acc": 0.36538461538461536,
381
+ "fixed_answer_choice_list": [
382
+ "No",
383
+ "Yes"
384
+ ],
385
+ "dataset_path": "super_glue",
386
+ "dataset_name": "wsc.fixed",
387
+ "subset": null,
388
+ "prompt_id": "4b3e29cc-ccb8-4e4c-a845-4935ca29cf34",
389
+ "prompt_jinja": "{{ text }} I think they mean \"{{ text.split(\" \")[span2_index:] | join(\" \") | replace(span2_text, span1_text) }}\" Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
390
+ "prompt_original_task": true,
391
+ "comment": "",
392
+ "acc_stderr": 0.0474473339327792
393
+ },
394
+ {
395
+ "task_name": "wsc",
396
+ "prompt_name": "I think they mean",
397
+ "acc_norm": 0.36538461538461536,
398
+ "fixed_answer_choice_list": [
399
+ "No",
400
+ "Yes"
401
+ ],
402
+ "dataset_path": "super_glue",
403
+ "dataset_name": "wsc.fixed",
404
+ "subset": null,
405
+ "prompt_id": "4b3e29cc-ccb8-4e4c-a845-4935ca29cf34",
406
+ "prompt_jinja": "{{ text }} I think they mean \"{{ text.split(\" \")[span2_index:] | join(\" \") | replace(span2_text, span1_text) }}\" Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
407
+ "prompt_original_task": true,
408
+ "comment": "",
409
+ "acc_norm_stderr": 0.0474473339327792
410
+ },
411
+ {
412
+ "task_name": "wsc",
413
+ "prompt_name": "Who or what is/are",
414
+ "acc": 0.40384615384615385,
415
+ "fixed_answer_choice_list": [
416
+ "No",
417
+ "Yes"
418
+ ],
419
+ "dataset_path": "super_glue",
420
+ "dataset_name": "wsc.fixed",
421
+ "subset": null,
422
+ "prompt_id": "d88f3e21-42dc-49a5-924d-69b764a14816",
423
+ "prompt_jinja": "{{ text }} \n{% if span2_text.lower() == \"they\" or span2_text.lower() == \"them\" %}\nQuestion: Who or what are \"{{ span2_text.lower() }}\"? {{ span1_text }}?\n{% else %}\nQuestion: Who or what is \"{{ span2_text.lower() }}\"? Is it {{ span1_text }}?\n{% endif %}\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
424
+ "prompt_original_task": true,
425
+ "comment": "",
426
+ "acc_stderr": 0.048346889526540184
427
+ },
428
+ {
429
+ "task_name": "wsc",
430
+ "prompt_name": "Who or what is/are",
431
+ "acc_norm": 0.36538461538461536,
432
+ "fixed_answer_choice_list": [
433
+ "No",
434
+ "Yes"
435
+ ],
436
+ "dataset_path": "super_glue",
437
+ "dataset_name": "wsc.fixed",
438
+ "subset": null,
439
+ "prompt_id": "d88f3e21-42dc-49a5-924d-69b764a14816",
440
+ "prompt_jinja": "{{ text }} \n{% if span2_text.lower() == \"they\" or span2_text.lower() == \"them\" %}\nQuestion: Who or what are \"{{ span2_text.lower() }}\"? {{ span1_text }}?\n{% else %}\nQuestion: Who or what is \"{{ span2_text.lower() }}\"? Is it {{ span1_text }}?\n{% endif %}\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
441
+ "prompt_original_task": true,
442
+ "comment": "",
443
+ "acc_norm_stderr": 0.0474473339327792
444
+ },
445
+ {
446
+ "task_name": "wsc",
447
+ "prompt_name": "by p they mean",
448
+ "acc": 0.36538461538461536,
449
+ "fixed_answer_choice_list": [
450
+ "No",
451
+ "Yes"
452
+ ],
453
+ "dataset_path": "super_glue",
454
+ "dataset_name": "wsc.fixed",
455
+ "subset": null,
456
+ "prompt_id": "23361c5d-b67f-4c2a-9da7-16301c55d0e1",
457
+ "prompt_jinja": "{{ text }} Here, by \"{{ span2_text }}\" they mean \"{{ span1_text }}\". Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
458
+ "prompt_original_task": true,
459
+ "comment": "",
460
+ "acc_stderr": 0.0474473339327792
461
+ },
462
+ {
463
+ "task_name": "wsc",
464
+ "prompt_name": "by p they mean",
465
+ "acc_norm": 0.36538461538461536,
466
+ "fixed_answer_choice_list": [
467
+ "No",
468
+ "Yes"
469
+ ],
470
+ "dataset_path": "super_glue",
471
+ "dataset_name": "wsc.fixed",
472
+ "subset": null,
473
+ "prompt_id": "23361c5d-b67f-4c2a-9da7-16301c55d0e1",
474
+ "prompt_jinja": "{{ text }} Here, by \"{{ span2_text }}\" they mean \"{{ span1_text }}\". Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
475
+ "prompt_original_task": true,
476
+ "comment": "",
477
+ "acc_norm_stderr": 0.0474473339327792
478
+ },
479
+ {
480
+ "task_name": "wsc",
481
+ "prompt_name": "does p stand for",
482
+ "acc": 0.375,
483
+ "fixed_answer_choice_list": [
484
+ "No",
485
+ "Yes"
486
+ ],
487
+ "dataset_path": "super_glue",
488
+ "dataset_name": "wsc.fixed",
489
+ "subset": null,
490
+ "prompt_id": "7482d24f-cf45-4013-b82d-369489fc958b",
491
+ "prompt_jinja": "{{ text }} Here, does \"{{ span2_text.lower() }}\" stand for {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
492
+ "prompt_original_task": true,
493
+ "comment": "",
494
+ "acc_stderr": 0.04770204856076104
495
+ },
496
+ {
497
+ "task_name": "wsc",
498
+ "prompt_name": "does p stand for",
499
+ "acc_norm": 0.36538461538461536,
500
+ "fixed_answer_choice_list": [
501
+ "No",
502
+ "Yes"
503
+ ],
504
+ "dataset_path": "super_glue",
505
+ "dataset_name": "wsc.fixed",
506
+ "subset": null,
507
+ "prompt_id": "7482d24f-cf45-4013-b82d-369489fc958b",
508
+ "prompt_jinja": "{{ text }} Here, does \"{{ span2_text.lower() }}\" stand for {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
509
+ "prompt_original_task": true,
510
+ "comment": "",
511
+ "acc_norm_stderr": 0.0474473339327792
512
+ },
513
+ {
514
+ "task_name": "wsc",
515
+ "prompt_name": "does the pronoun refer to",
516
+ "acc": 0.5480769230769231,
517
+ "fixed_answer_choice_list": [
518
+ "No",
519
+ "Yes"
520
+ ],
521
+ "dataset_path": "super_glue",
522
+ "dataset_name": "wsc.fixed",
523
+ "subset": null,
524
+ "prompt_id": "212fb8b1-8436-4f64-8f37-a9094fe029f4",
525
+ "prompt_jinja": "{{ text }} In the previous sentence, does the pronoun \"{{ span2_text.lower() }}\" refer to {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
526
+ "prompt_original_task": true,
527
+ "comment": "",
528
+ "acc_stderr": 0.049038186969314335
529
+ },
530
+ {
531
+ "task_name": "wsc",
532
+ "prompt_name": "does the pronoun refer to",
533
+ "acc_norm": 0.36538461538461536,
534
+ "fixed_answer_choice_list": [
535
+ "No",
536
+ "Yes"
537
+ ],
538
+ "dataset_path": "super_glue",
539
+ "dataset_name": "wsc.fixed",
540
+ "subset": null,
541
+ "prompt_id": "212fb8b1-8436-4f64-8f37-a9094fe029f4",
542
+ "prompt_jinja": "{{ text }} In the previous sentence, does the pronoun \"{{ span2_text.lower() }}\" refer to {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
543
+ "prompt_original_task": true,
544
+ "comment": "",
545
+ "acc_norm_stderr": 0.0474473339327792
546
+ },
547
+ {
548
+ "task_name": "wsc",
549
+ "prompt_name": "in other words",
550
+ "acc": 0.36538461538461536,
551
+ "fixed_answer_choice_list": [
552
+ "False",
553
+ "True"
554
+ ],
555
+ "dataset_path": "super_glue",
556
+ "dataset_name": "wsc.fixed",
557
+ "subset": null,
558
+ "prompt_id": "2f17f18b-6daa-44ef-a2dd-dddaf04aec0e",
559
+ "prompt_jinja": "{{ text }} \n\nIn other words, {{ text.split(\" \")[span2_index:] | join(\" \") | replace(span2_text, span1_text) }} True or false? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
560
+ "prompt_original_task": true,
561
+ "comment": "",
562
+ "acc_stderr": 0.0474473339327792
563
+ },
564
+ {
565
+ "task_name": "wsc",
566
+ "prompt_name": "in other words",
567
+ "acc_norm": 0.5288461538461539,
568
+ "fixed_answer_choice_list": [
569
+ "False",
570
+ "True"
571
+ ],
572
+ "dataset_path": "super_glue",
573
+ "dataset_name": "wsc.fixed",
574
+ "subset": null,
575
+ "prompt_id": "2f17f18b-6daa-44ef-a2dd-dddaf04aec0e",
576
+ "prompt_jinja": "{{ text }} \n\nIn other words, {{ text.split(\" \")[span2_index:] | join(\" \") | replace(span2_text, span1_text) }} True or false? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
577
+ "prompt_original_task": true,
578
+ "comment": "",
579
+ "acc_norm_stderr": 0.04918440626354964
580
+ },
581
+ {
582
+ "task_name": "wsc",
583
+ "prompt_name": "p is/are r",
584
+ "acc": 0.36538461538461536,
585
+ "fixed_answer_choice_list": [
586
+ "False",
587
+ "True"
588
+ ],
589
+ "dataset_path": "super_glue",
590
+ "dataset_name": "wsc.fixed",
591
+ "subset": null,
592
+ "prompt_id": "87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6",
593
+ "prompt_jinja": "Context: {{ text }} \n\n{% if span2_text.lower() == \"they\" or span2_text.lower() == \"them\" %}\nQuestion: \"{{ span2_text }}\" are {{ span1_text }}. True or false?\n{% else %}\nQuestion: \"{{ span2_text }}\" is {{ span1_text }}. True or false?\n{% endif %}\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
594
+ "prompt_original_task": true,
595
+ "comment": "",
596
+ "acc_stderr": 0.0474473339327792
597
+ },
598
+ {
599
+ "task_name": "wsc",
600
+ "prompt_name": "p is/are r",
601
+ "acc_norm": 0.34615384615384615,
602
+ "fixed_answer_choice_list": [
603
+ "False",
604
+ "True"
605
+ ],
606
+ "dataset_path": "super_glue",
607
+ "dataset_name": "wsc.fixed",
608
+ "subset": null,
609
+ "prompt_id": "87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6",
610
+ "prompt_jinja": "Context: {{ text }} \n\n{% if span2_text.lower() == \"they\" or span2_text.lower() == \"them\" %}\nQuestion: \"{{ span2_text }}\" are {{ span1_text }}. True or false?\n{% else %}\nQuestion: \"{{ span2_text }}\" is {{ span1_text }}. True or false?\n{% endif %}\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
611
+ "prompt_original_task": true,
612
+ "comment": "",
613
+ "acc_norm_stderr": 0.04687634642174987
614
+ },
615
+ {
616
+ "task_name": "wsc",
617
+ "prompt_name": "replaced with",
618
+ "acc": 0.6153846153846154,
619
+ "fixed_answer_choice_list": [
620
+ "No",
621
+ "Yes"
622
+ ],
623
+ "dataset_path": "super_glue",
624
+ "dataset_name": "wsc.fixed",
625
+ "subset": null,
626
+ "prompt_id": "809eacd0-2f6c-4e3a-b52a-57c783879d36",
627
+ "prompt_jinja": "{{ text }} In the previous sentence, can the pronoun \"{{ span2_text }}\" be replaced with \"{{ span1_text }}\"? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
628
+ "prompt_original_task": true,
629
+ "comment": "",
630
+ "acc_stderr": 0.047936688680750406
631
+ },
632
+ {
633
+ "task_name": "wsc",
634
+ "prompt_name": "replaced with",
635
+ "acc_norm": 0.36538461538461536,
636
+ "fixed_answer_choice_list": [
637
+ "No",
638
+ "Yes"
639
+ ],
640
+ "dataset_path": "super_glue",
641
+ "dataset_name": "wsc.fixed",
642
+ "subset": null,
643
+ "prompt_id": "809eacd0-2f6c-4e3a-b52a-57c783879d36",
644
+ "prompt_jinja": "{{ text }} In the previous sentence, can the pronoun \"{{ span2_text }}\" be replaced with \"{{ span1_text }}\"? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
645
+ "prompt_original_task": true,
646
+ "comment": "",
647
+ "acc_norm_stderr": 0.0474473339327792
648
+ },
649
+ {
650
+ "task_name": "wsc",
651
+ "prompt_name": "the pronoun refers to",
652
+ "acc": 0.36538461538461536,
653
+ "fixed_answer_choice_list": [
654
+ "False",
655
+ "True"
656
+ ],
657
+ "dataset_path": "super_glue",
658
+ "dataset_name": "wsc.fixed",
659
+ "subset": null,
660
+ "prompt_id": "aae24b54-c3a7-4f69-8b77-f6dc115988f8",
661
+ "prompt_jinja": "{{ text }} \nIn the passage above, the pronoun \"{{ span2_text }}\" refers to {{ span1_text }}. True or false? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
662
+ "prompt_original_task": true,
663
+ "comment": "",
664
+ "acc_stderr": 0.0474473339327792
665
+ },
666
+ {
667
+ "task_name": "wsc",
668
+ "prompt_name": "the pronoun refers to",
669
+ "acc_norm": 0.5865384615384616,
670
+ "fixed_answer_choice_list": [
671
+ "False",
672
+ "True"
673
+ ],
674
+ "dataset_path": "super_glue",
675
+ "dataset_name": "wsc.fixed",
676
+ "subset": null,
677
+ "prompt_id": "aae24b54-c3a7-4f69-8b77-f6dc115988f8",
678
+ "prompt_jinja": "{{ text }} \nIn the passage above, the pronoun \"{{ span2_text }}\" refers to {{ span1_text }}. True or false? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
679
+ "prompt_original_task": true,
680
+ "comment": "",
681
+ "acc_norm_stderr": 0.04852294969729053
682
+ },
683
+ {
684
+ "task_name": "wnli",
685
+ "prompt_name": "confident",
686
+ "acc": 0.43661971830985913,
687
+ "fixed_answer_choice_list": [
688
+ "not confident",
689
+ "very confident"
690
+ ],
691
+ "dataset_path": "glue",
692
+ "dataset_name": "wnli",
693
+ "subset": null,
694
+ "prompt_id": "10c354ee-6f4e-4b04-91e1-29e999a8f3e7",
695
+ "prompt_jinja": "If it's true that\n{{sentence1}}\nhow {{\"confident\"}} should I be that\n{{sentence2}}\n{{\"very confident or not confident?\"}}\n|||\n{{answer_choices[label]}}",
696
+ "prompt_original_task": true,
697
+ "comment": "",
698
+ "acc_stderr": 0.0592793555841297
699
+ },
700
+ {
701
+ "task_name": "wnli",
702
+ "prompt_name": "confident",
703
+ "acc_norm": 0.43661971830985913,
704
+ "fixed_answer_choice_list": [
705
+ "not confident",
706
+ "very confident"
707
+ ],
708
+ "dataset_path": "glue",
709
+ "dataset_name": "wnli",
710
+ "subset": null,
711
+ "prompt_id": "10c354ee-6f4e-4b04-91e1-29e999a8f3e7",
712
+ "prompt_jinja": "If it's true that\n{{sentence1}}\nhow {{\"confident\"}} should I be that\n{{sentence2}}\n{{\"very confident or not confident?\"}}\n|||\n{{answer_choices[label]}}",
713
+ "prompt_original_task": true,
714
+ "comment": "",
715
+ "acc_norm_stderr": 0.0592793555841297
716
+ },
717
+ {
718
+ "task_name": "wnli",
719
+ "prompt_name": "entailment explained",
720
+ "acc": 0.39436619718309857,
721
+ "fixed_answer_choice_list": [
722
+ "no",
723
+ "yes"
724
+ ],
725
+ "dataset_path": "glue",
726
+ "dataset_name": "wnli",
727
+ "subset": null,
728
+ "prompt_id": "3a0e46cb-0b96-4972-83f6-29a6c6a09ba9",
729
+ "prompt_jinja": "{{\"Entailment\"}} means that the second sentence follows from the first sentence. Are the following two sentences an example of entailment?\n{{sentence1}}\n{{sentence2}}\n|||\n{{answer_choices[label]}}",
730
+ "prompt_original_task": true,
731
+ "comment": "",
732
+ "acc_stderr": 0.058412510854444266
733
+ },
734
+ {
735
+ "task_name": "wnli",
736
+ "prompt_name": "entailment explained",
737
+ "acc_norm": 0.43661971830985913,
738
+ "fixed_answer_choice_list": [
739
+ "no",
740
+ "yes"
741
+ ],
742
+ "dataset_path": "glue",
743
+ "dataset_name": "wnli",
744
+ "subset": null,
745
+ "prompt_id": "3a0e46cb-0b96-4972-83f6-29a6c6a09ba9",
746
+ "prompt_jinja": "{{\"Entailment\"}} means that the second sentence follows from the first sentence. Are the following two sentences an example of entailment?\n{{sentence1}}\n{{sentence2}}\n|||\n{{answer_choices[label]}}",
747
+ "prompt_original_task": true,
748
+ "comment": "",
749
+ "acc_norm_stderr": 0.0592793555841297
750
+ },
751
+ {
752
+ "task_name": "wnli",
753
+ "prompt_name": "imply",
754
+ "acc": 0.4225352112676056,
755
+ "fixed_answer_choice_list": [
756
+ "no",
757
+ "yes"
758
+ ],
759
+ "dataset_path": "glue",
760
+ "dataset_name": "wnli",
761
+ "subset": null,
762
+ "prompt_id": "a2ce492b-dfd0-4f04-bc44-70c7867ba231",
763
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nDoes the first sentence imply the second sentence?\n|||\n{{answer_choices[label]}}",
764
+ "prompt_original_task": true,
765
+ "comment": "",
766
+ "acc_stderr": 0.05903984205682581
767
+ },
768
+ {
769
+ "task_name": "wnli",
770
+ "prompt_name": "imply",
771
+ "acc_norm": 0.43661971830985913,
772
+ "fixed_answer_choice_list": [
773
+ "no",
774
+ "yes"
775
+ ],
776
+ "dataset_path": "glue",
777
+ "dataset_name": "wnli",
778
+ "subset": null,
779
+ "prompt_id": "a2ce492b-dfd0-4f04-bc44-70c7867ba231",
780
+ "prompt_jinja": "{{sentence1}}\n{{sentence2}}\nDoes the first sentence imply the second sentence?\n|||\n{{answer_choices[label]}}",
781
+ "prompt_original_task": true,
782
+ "comment": "",
783
+ "acc_norm_stderr": 0.0592793555841297
784
+ },
785
+ {
786
+ "task_name": "wnli",
787
+ "prompt_name": "justified",
788
+ "acc": 0.43661971830985913,
789
+ "fixed_answer_choice_list": [
790
+ "no",
791
+ "yes"
792
+ ],
793
+ "dataset_path": "glue",
794
+ "dataset_name": "wnli",
795
+ "subset": null,
796
+ "prompt_id": "a244158a-a248-4e34-bef7-66e269dd0815",
797
+ "prompt_jinja": "Someone told me \"{{sentence1}}\" Now, I think that \"{{sentence2}}\" Am I justified in thinking this?\n|||\n{{answer_choices[label]}}",
798
+ "prompt_original_task": true,
799
+ "comment": "",
800
+ "acc_stderr": 0.0592793555841297
801
+ },
802
+ {
803
+ "task_name": "wnli",
804
+ "prompt_name": "justified",
805
+ "acc_norm": 0.43661971830985913,
806
+ "fixed_answer_choice_list": [
807
+ "no",
808
+ "yes"
809
+ ],
810
+ "dataset_path": "glue",
811
+ "dataset_name": "wnli",
812
+ "subset": null,
813
+ "prompt_id": "a244158a-a248-4e34-bef7-66e269dd0815",
814
+ "prompt_jinja": "Someone told me \"{{sentence1}}\" Now, I think that \"{{sentence2}}\" Am I justified in thinking this?\n|||\n{{answer_choices[label]}}",
815
+ "prompt_original_task": true,
816
+ "comment": "",
817
+ "acc_norm_stderr": 0.0592793555841297
818
+ },
819
+ {
820
+ "task_name": "wnli",
821
+ "prompt_name": "mean",
822
+ "acc": 0.6619718309859155,
823
+ "fixed_answer_choice_list": [
824
+ "no",
825
+ "yes"
826
+ ],
827
+ "dataset_path": "glue",
828
+ "dataset_name": "wnli",
829
+ "subset": null,
830
+ "prompt_id": "75f89b05-5a81-401b-8a04-8239211a9a95",
831
+ "prompt_jinja": "Assume that the following is true:\n{{sentence1}}\nDoes this mean that \"{{sentence2}}\"?\n|||\n{{answer_choices[label]}}",
832
+ "prompt_original_task": true,
833
+ "comment": "",
834
+ "acc_stderr": 0.05653887739133513
835
+ },
836
+ {
837
+ "task_name": "wnli",
838
+ "prompt_name": "mean",
839
+ "acc_norm": 0.43661971830985913,
840
+ "fixed_answer_choice_list": [
841
+ "no",
842
+ "yes"
843
+ ],
844
+ "dataset_path": "glue",
845
+ "dataset_name": "wnli",
846
+ "subset": null,
847
+ "prompt_id": "75f89b05-5a81-401b-8a04-8239211a9a95",
848
+ "prompt_jinja": "Assume that the following is true:\n{{sentence1}}\nDoes this mean that \"{{sentence2}}\"?\n|||\n{{answer_choices[label]}}",
849
+ "prompt_original_task": true,
850
+ "comment": "",
851
+ "acc_norm_stderr": 0.0592793555841297
852
+ },
853
+ {
854
+ "task_name": "gsarti/flores_101_afr",
855
+ "prompt_name": null,
856
+ "word_perplexity": 139324.0466654445
857
+ },
858
+ {
859
+ "task_name": "gsarti/flores_101_afr",
860
+ "prompt_name": null,
861
+ "byte_perplexity": 7.049422805555328
862
+ },
863
+ {
864
+ "task_name": "gsarti/flores_101_afr",
865
+ "prompt_name": null,
866
+ "bits_per_byte": 2.8175051369933213
867
+ },
868
+ {
869
+ "task_name": "gsarti/flores_101_amh",
870
+ "prompt_name": null,
871
+ "word_perplexity": 105036774.30501972
872
+ },
873
+ {
874
+ "task_name": "gsarti/flores_101_amh",
875
+ "prompt_name": null,
876
+ "byte_perplexity": 4.172368790188039
877
+ },
878
+ {
879
+ "task_name": "gsarti/flores_101_amh",
880
+ "prompt_name": null,
881
+ "bits_per_byte": 2.0608666814101815
882
+ },
883
+ {
884
+ "task_name": "gsarti/flores_101_ara",
885
+ "prompt_name": null,
886
+ "word_perplexity": 674.8640314665696
887
+ },
888
+ {
889
+ "task_name": "gsarti/flores_101_ara",
890
+ "prompt_name": null,
891
+ "byte_perplexity": 1.8400375612633983
892
+ },
893
+ {
894
+ "task_name": "gsarti/flores_101_ara",
895
+ "prompt_name": null,
896
+ "bits_per_byte": 0.8797352167688847
897
+ },
898
+ {
899
+ "task_name": "gsarti/flores_101_hye",
900
+ "prompt_name": null,
901
+ "word_perplexity": 99262887.01092263
902
+ },
903
+ {
904
+ "task_name": "gsarti/flores_101_hye",
905
+ "prompt_name": null,
906
+ "byte_perplexity": 3.7481249397064547
907
+ },
908
+ {
909
+ "task_name": "gsarti/flores_101_hye",
910
+ "prompt_name": null,
911
+ "bits_per_byte": 1.906169044483402
912
+ },
913
+ {
914
+ "task_name": "gsarti/flores_101_asm",
915
+ "prompt_name": null,
916
+ "word_perplexity": 6763188828222.085
917
+ },
918
+ {
919
+ "task_name": "gsarti/flores_101_asm",
920
+ "prompt_name": null,
921
+ "byte_perplexity": 5.497254736157445
922
+ },
923
+ {
924
+ "task_name": "gsarti/flores_101_asm",
925
+ "prompt_name": null,
926
+ "bits_per_byte": 2.458711333673663
927
+ },
928
+ {
929
+ "task_name": "gsarti/flores_101_ast",
930
+ "prompt_name": null,
931
+ "word_perplexity": 10657.272913539553
932
+ },
933
+ {
934
+ "task_name": "gsarti/flores_101_ast",
935
+ "prompt_name": null,
936
+ "byte_perplexity": 4.260251728273795
937
+ },
938
+ {
939
+ "task_name": "gsarti/flores_101_ast",
940
+ "prompt_name": null,
941
+ "bits_per_byte": 2.0909386784329675
942
+ },
943
+ {
944
+ "task_name": "gsarti/flores_101_azj",
945
+ "prompt_name": null,
946
+ "word_perplexity": 45923924.18878753
947
+ },
948
+ {
949
+ "task_name": "gsarti/flores_101_azj",
950
+ "prompt_name": null,
951
+ "byte_perplexity": 7.691396328945705
952
+ },
953
+ {
954
+ "task_name": "gsarti/flores_101_azj",
955
+ "prompt_name": null,
956
+ "bits_per_byte": 2.9432455349850195
957
+ },
958
+ {
959
+ "task_name": "gsarti/flores_101_bel",
960
+ "prompt_name": null,
961
+ "word_perplexity": 23935692.781315073
962
+ },
963
+ {
964
+ "task_name": "gsarti/flores_101_bel",
965
+ "prompt_name": null,
966
+ "byte_perplexity": 3.7706591215465943
967
+ },
968
+ {
969
+ "task_name": "gsarti/flores_101_bel",
970
+ "prompt_name": null,
971
+ "bits_per_byte": 1.914816732584341
972
+ },
973
+ {
974
+ "task_name": "gsarti/flores_101_ben",
975
+ "prompt_name": null,
976
+ "word_perplexity": 2480418685142.412
977
+ },
978
+ {
979
+ "task_name": "gsarti/flores_101_ben",
980
+ "prompt_name": null,
981
+ "byte_perplexity": 5.074281765515423
982
+ },
983
+ {
984
+ "task_name": "gsarti/flores_101_ben",
985
+ "prompt_name": null,
986
+ "bits_per_byte": 2.3432036318231058
987
+ },
988
+ {
989
+ "task_name": "gsarti/flores_101_bos",
990
+ "prompt_name": null,
991
+ "word_perplexity": 229622.13691086147
992
+ },
993
+ {
994
+ "task_name": "gsarti/flores_101_bos",
995
+ "prompt_name": null,
996
+ "byte_perplexity": 6.343363734045183
997
+ },
998
+ {
999
+ "task_name": "gsarti/flores_101_bos",
1000
+ "prompt_name": null,
1001
+ "bits_per_byte": 2.665248069942796
1002
+ },
1003
+ {
1004
+ "task_name": "gsarti/flores_101_bul",
1005
+ "prompt_name": null,
1006
+ "word_perplexity": 194851.13344620814
1007
+ },
1008
+ {
1009
+ "task_name": "gsarti/flores_101_bul",
1010
+ "prompt_name": null,
1011
+ "byte_perplexity": 2.8553687444403257
1012
+ },
1013
+ {
1014
+ "task_name": "gsarti/flores_101_bul",
1015
+ "prompt_name": null,
1016
+ "bits_per_byte": 1.5136770683283687
1017
+ },
1018
+ {
1019
+ "task_name": "gsarti/flores_101_mya",
1020
+ "prompt_name": null,
1021
+ "word_perplexity": 5.887577237013639e+18
1022
+ },
1023
+ {
1024
+ "task_name": "gsarti/flores_101_mya",
1025
+ "prompt_name": null,
1026
+ "byte_perplexity": 2.657561458464019
1027
+ },
1028
+ {
1029
+ "task_name": "gsarti/flores_101_mya",
1030
+ "prompt_name": null,
1031
+ "bits_per_byte": 1.4101030557435918
1032
+ },
1033
+ {
1034
+ "task_name": "gsarti/flores_101_cat",
1035
+ "prompt_name": null,
1036
+ "word_perplexity": 179.13123174533087
1037
+ },
1038
+ {
1039
+ "task_name": "gsarti/flores_101_cat",
1040
+ "prompt_name": null,
1041
+ "byte_perplexity": 2.358207169698056
1042
+ },
1043
+ {
1044
+ "task_name": "gsarti/flores_101_cat",
1045
+ "prompt_name": null,
1046
+ "bits_per_byte": 1.2376904653775254
1047
+ },
1048
+ {
1049
+ "task_name": "gsarti/flores_101_ceb",
1050
+ "prompt_name": null,
1051
+ "word_perplexity": 113330.67154113152
1052
+ },
1053
+ {
1054
+ "task_name": "gsarti/flores_101_ceb",
1055
+ "prompt_name": null,
1056
+ "byte_perplexity": 6.896481056329736
1057
+ },
1058
+ {
1059
+ "task_name": "gsarti/flores_101_ceb",
1060
+ "prompt_name": null,
1061
+ "bits_per_byte": 2.7858604115174295
1062
+ },
1063
+ {
1064
+ "task_name": "gsarti/flores_101_zho_simpl",
1065
+ "prompt_name": null,
1066
+ "word_perplexity": 1.0554528210220222e+21
1067
+ },
1068
+ {
1069
+ "task_name": "gsarti/flores_101_zho_simpl",
1070
+ "prompt_name": null,
1071
+ "byte_perplexity": 2.322457417595381
1072
+ },
1073
+ {
1074
+ "task_name": "gsarti/flores_101_zho_simpl",
1075
+ "prompt_name": null,
1076
+ "bits_per_byte": 1.2156521449449949
1077
+ },
1078
+ {
1079
+ "task_name": "gsarti/flores_101_zho_trad",
1080
+ "prompt_name": null,
1081
+ "word_perplexity": 4.787781515987923e+24
1082
+ },
1083
+ {
1084
+ "task_name": "gsarti/flores_101_zho_trad",
1085
+ "prompt_name": null,
1086
+ "byte_perplexity": 2.5709177552415134
1087
+ },
1088
+ {
1089
+ "task_name": "gsarti/flores_101_zho_trad",
1090
+ "prompt_name": null,
1091
+ "bits_per_byte": 1.3622834584784203
1092
+ },
1093
+ {
1094
+ "task_name": "gsarti/flores_101_hrv",
1095
+ "prompt_name": null,
1096
+ "word_perplexity": 307789.1462790266
1097
+ },
1098
+ {
1099
+ "task_name": "gsarti/flores_101_hrv",
1100
+ "prompt_name": null,
1101
+ "byte_perplexity": 6.50559790827845
1102
+ },
1103
+ {
1104
+ "task_name": "gsarti/flores_101_hrv",
1105
+ "prompt_name": null,
1106
+ "bits_per_byte": 2.7016816564307984
1107
+ },
1108
+ {
1109
+ "task_name": "gsarti/flores_101_ces",
1110
+ "prompt_name": null,
1111
+ "word_perplexity": 625101.1441414964
1112
+ },
1113
+ {
1114
+ "task_name": "gsarti/flores_101_ces",
1115
+ "prompt_name": null,
1116
+ "byte_perplexity": 6.126526835715164
1117
+ },
1118
+ {
1119
+ "task_name": "gsarti/flores_101_ces",
1120
+ "prompt_name": null,
1121
+ "bits_per_byte": 2.6150694333085327
1122
+ },
1123
+ {
1124
+ "task_name": "gsarti/flores_101_dan",
1125
+ "prompt_name": null,
1126
+ "word_perplexity": 71695.50336412797
1127
+ },
1128
+ {
1129
+ "task_name": "gsarti/flores_101_dan",
1130
+ "prompt_name": null,
1131
+ "byte_perplexity": 5.778786323448377
1132
+ },
1133
+ {
1134
+ "task_name": "gsarti/flores_101_dan",
1135
+ "prompt_name": null,
1136
+ "bits_per_byte": 2.5307665257708245
1137
+ },
1138
+ {
1139
+ "task_name": "gsarti/flores_101_nld",
1140
+ "prompt_name": null,
1141
+ "word_perplexity": 13951.877058430618
1142
+ },
1143
+ {
1144
+ "task_name": "gsarti/flores_101_nld",
1145
+ "prompt_name": null,
1146
+ "byte_perplexity": 4.535651709856251
1147
+ },
1148
+ {
1149
+ "task_name": "gsarti/flores_101_nld",
1150
+ "prompt_name": null,
1151
+ "bits_per_byte": 2.1813098607926804
1152
+ },
1153
+ {
1154
+ "task_name": "gsarti/flores_101_eng",
1155
+ "prompt_name": null,
1156
+ "word_perplexity": 75.56480997823662
1157
+ },
1158
+ {
1159
+ "task_name": "gsarti/flores_101_eng",
1160
+ "prompt_name": null,
1161
+ "byte_perplexity": 2.061283234268159
1162
+ },
1163
+ {
1164
+ "task_name": "gsarti/flores_101_eng",
1165
+ "prompt_name": null,
1166
+ "bits_per_byte": 1.0435427545613876
1167
+ },
1168
+ {
1169
+ "task_name": "gsarti/flores_101_est",
1170
+ "prompt_name": null,
1171
+ "word_perplexity": 92602633.82439691
1172
+ },
1173
+ {
1174
+ "task_name": "gsarti/flores_101_est",
1175
+ "prompt_name": null,
1176
+ "byte_perplexity": 10.131736127467489
1177
+ },
1178
+ {
1179
+ "task_name": "gsarti/flores_101_est",
1180
+ "prompt_name": null,
1181
+ "bits_per_byte": 3.340809503762674
1182
+ },
1183
+ {
1184
+ "task_name": "gsarti/flores_101_tgl",
1185
+ "prompt_name": null,
1186
+ "word_perplexity": 87554.31770184237
1187
+ },
1188
+ {
1189
+ "task_name": "gsarti/flores_101_tgl",
1190
+ "prompt_name": null,
1191
+ "byte_perplexity": 6.256957969905079
1192
+ },
1193
+ {
1194
+ "task_name": "gsarti/flores_101_tgl",
1195
+ "prompt_name": null,
1196
+ "bits_per_byte": 2.645461413001105
1197
+ },
1198
+ {
1199
+ "task_name": "gsarti/flores_101_fin",
1200
+ "prompt_name": null,
1201
+ "word_perplexity": 91621886.60145952
1202
+ },
1203
+ {
1204
+ "task_name": "gsarti/flores_101_fin",
1205
+ "prompt_name": null,
1206
+ "byte_perplexity": 7.5129644427067355
1207
+ },
1208
+ {
1209
+ "task_name": "gsarti/flores_101_fin",
1210
+ "prompt_name": null,
1211
+ "bits_per_byte": 2.9093822743068216
1212
+ },
1213
+ {
1214
+ "task_name": "gsarti/flores_101_fra",
1215
+ "prompt_name": null,
1216
+ "word_perplexity": 89.45884576931464
1217
+ },
1218
+ {
1219
+ "task_name": "gsarti/flores_101_fra",
1220
+ "prompt_name": null,
1221
+ "byte_perplexity": 2.0177390037335385
1222
+ },
1223
+ {
1224
+ "task_name": "gsarti/flores_101_fra",
1225
+ "prompt_name": null,
1226
+ "bits_per_byte": 1.0127395726746855
1227
+ },
1228
+ {
1229
+ "task_name": "gsarti/flores_101_ful",
1230
+ "prompt_name": null,
1231
+ "word_perplexity": 908715.1423017589
1232
+ },
1233
+ {
1234
+ "task_name": "gsarti/flores_101_ful",
1235
+ "prompt_name": null,
1236
+ "byte_perplexity": 11.810263420287875
1237
+ },
1238
+ {
1239
+ "task_name": "gsarti/flores_101_ful",
1240
+ "prompt_name": null,
1241
+ "bits_per_byte": 3.561969238361191
1242
+ },
1243
+ {
1244
+ "task_name": "gsarti/flores_101_glg",
1245
+ "prompt_name": null,
1246
+ "word_perplexity": 1537.3193913761668
1247
+ },
1248
+ {
1249
+ "task_name": "gsarti/flores_101_glg",
1250
+ "prompt_name": null,
1251
+ "byte_perplexity": 3.2214647330840154
1252
+ },
1253
+ {
1254
+ "task_name": "gsarti/flores_101_glg",
1255
+ "prompt_name": null,
1256
+ "bits_per_byte": 1.6877168009728167
1257
+ },
1258
+ {
1259
+ "task_name": "gsarti/flores_101_lug",
1260
+ "prompt_name": null,
1261
+ "word_perplexity": 32046806.791237485
1262
+ },
1263
+ {
1264
+ "task_name": "gsarti/flores_101_lug",
1265
+ "prompt_name": null,
1266
+ "byte_perplexity": 9.285708185212261
1267
+ },
1268
+ {
1269
+ "task_name": "gsarti/flores_101_lug",
1270
+ "prompt_name": null,
1271
+ "bits_per_byte": 3.2150119431528754
1272
+ },
1273
+ {
1274
+ "task_name": "gsarti/flores_101_kat",
1275
+ "prompt_name": null,
1276
+ "word_perplexity": 1133105340.614723
1277
+ },
1278
+ {
1279
+ "task_name": "gsarti/flores_101_kat",
1280
+ "prompt_name": null,
1281
+ "byte_perplexity": 2.5184571084900518
1282
+ },
1283
+ {
1284
+ "task_name": "gsarti/flores_101_kat",
1285
+ "prompt_name": null,
1286
+ "bits_per_byte": 1.3325401608568794
1287
+ },
1288
+ {
1289
+ "task_name": "gsarti/flores_101_deu",
1290
+ "prompt_name": null,
1291
+ "word_perplexity": 5647.282599404732
1292
+ },
1293
+ {
1294
+ "task_name": "gsarti/flores_101_deu",
1295
+ "prompt_name": null,
1296
+ "byte_perplexity": 3.361758059911202
1297
+ },
1298
+ {
1299
+ "task_name": "gsarti/flores_101_deu",
1300
+ "prompt_name": null,
1301
+ "bits_per_byte": 1.7492158999678582
1302
+ },
1303
+ {
1304
+ "task_name": "gsarti/flores_101_ell",
1305
+ "prompt_name": null,
1306
+ "word_perplexity": 102751.5248402687
1307
+ },
1308
+ {
1309
+ "task_name": "gsarti/flores_101_ell",
1310
+ "prompt_name": null,
1311
+ "byte_perplexity": 2.6139607239932805
1312
+ },
1313
+ {
1314
+ "task_name": "gsarti/flores_101_ell",
1315
+ "prompt_name": null,
1316
+ "bits_per_byte": 1.3862374641150543
1317
+ },
1318
+ {
1319
+ "task_name": "gsarti/flores_101_guj",
1320
+ "prompt_name": null,
1321
+ "word_perplexity": 133216198508.6925
1322
+ },
1323
+ {
1324
+ "task_name": "gsarti/flores_101_guj",
1325
+ "prompt_name": null,
1326
+ "byte_perplexity": 5.125904532570054
1327
+ },
1328
+ {
1329
+ "task_name": "gsarti/flores_101_guj",
1330
+ "prompt_name": null,
1331
+ "bits_per_byte": 2.357806609400009
1332
+ },
1333
+ {
1334
+ "task_name": "gsarti/flores_101_hau",
1335
+ "prompt_name": null,
1336
+ "word_perplexity": 730749.6449046461
1337
+ },
1338
+ {
1339
+ "task_name": "gsarti/flores_101_hau",
1340
+ "prompt_name": null,
1341
+ "byte_perplexity": 11.049458818357667
1342
+ },
1343
+ {
1344
+ "task_name": "gsarti/flores_101_hau",
1345
+ "prompt_name": null,
1346
+ "bits_per_byte": 3.4659038057537184
1347
+ },
1348
+ {
1349
+ "task_name": "gsarti/flores_101_heb",
1350
+ "prompt_name": null,
1351
+ "word_perplexity": 880255.4148832298
1352
+ },
1353
+ {
1354
+ "task_name": "gsarti/flores_101_heb",
1355
+ "prompt_name": null,
1356
+ "byte_perplexity": 3.7036842387723694
1357
+ },
1358
+ {
1359
+ "task_name": "gsarti/flores_101_heb",
1360
+ "prompt_name": null,
1361
+ "bits_per_byte": 1.8889611054621571
1362
+ },
1363
+ {
1364
+ "task_name": "gsarti/flores_101_hin",
1365
+ "prompt_name": null,
1366
+ "word_perplexity": 453226793.5348556
1367
+ },
1368
+ {
1369
+ "task_name": "gsarti/flores_101_hin",
1370
+ "prompt_name": null,
1371
+ "byte_perplexity": 4.581311639568996
1372
+ },
1373
+ {
1374
+ "task_name": "gsarti/flores_101_hin",
1375
+ "prompt_name": null,
1376
+ "bits_per_byte": 2.195760704215568
1377
+ },
1378
+ {
1379
+ "task_name": "gsarti/flores_101_hun",
1380
+ "prompt_name": null,
1381
+ "word_perplexity": 8545882.19823639
1382
+ },
1383
+ {
1384
+ "task_name": "gsarti/flores_101_hun",
1385
+ "prompt_name": null,
1386
+ "byte_perplexity": 7.19531655942431
1387
+ },
1388
+ {
1389
+ "task_name": "gsarti/flores_101_hun",
1390
+ "prompt_name": null,
1391
+ "bits_per_byte": 2.8470581600253615
1392
+ },
1393
+ {
1394
+ "task_name": "gsarti/flores_101_isl",
1395
+ "prompt_name": null,
1396
+ "word_perplexity": 3947458.536983725
1397
+ },
1398
+ {
1399
+ "task_name": "gsarti/flores_101_isl",
1400
+ "prompt_name": null,
1401
+ "byte_perplexity": 8.812045732299993
1402
+ },
1403
+ {
1404
+ "task_name": "gsarti/flores_101_isl",
1405
+ "prompt_name": null,
1406
+ "bits_per_byte": 3.1394769822824644
1407
+ },
1408
+ {
1409
+ "task_name": "gsarti/flores_101_ibo",
1410
+ "prompt_name": null,
1411
+ "word_perplexity": 99576.38125028457
1412
+ },
1413
+ {
1414
+ "task_name": "gsarti/flores_101_ibo",
1415
+ "prompt_name": null,
1416
+ "byte_perplexity": 6.06807351892086
1417
+ },
1418
+ {
1419
+ "task_name": "gsarti/flores_101_ibo",
1420
+ "prompt_name": null,
1421
+ "bits_per_byte": 2.6012385649422316
1422
+ },
1423
+ {
1424
+ "task_name": "gsarti/flores_101_ind",
1425
+ "prompt_name": null,
1426
+ "word_perplexity": 299.41864562936706
1427
+ },
1428
+ {
1429
+ "task_name": "gsarti/flores_101_ind",
1430
+ "prompt_name": null,
1431
+ "byte_perplexity": 2.2193428661828962
1432
+ },
1433
+ {
1434
+ "task_name": "gsarti/flores_101_ind",
1435
+ "prompt_name": null,
1436
+ "bits_per_byte": 1.1501325666473412
1437
+ },
1438
+ {
1439
+ "task_name": "gsarti/flores_101_gle",
1440
+ "prompt_name": null,
1441
+ "word_perplexity": 1548851.5929806433
1442
+ },
1443
+ {
1444
+ "task_name": "gsarti/flores_101_gle",
1445
+ "prompt_name": null,
1446
+ "byte_perplexity": 9.712259930753122
1447
+ },
1448
+ {
1449
+ "task_name": "gsarti/flores_101_gle",
1450
+ "prompt_name": null,
1451
+ "bits_per_byte": 3.2798070331865063
1452
+ },
1453
+ {
1454
+ "task_name": "gsarti/flores_101_ita",
1455
+ "prompt_name": null,
1456
+ "word_perplexity": 1951.0663459405935
1457
+ },
1458
+ {
1459
+ "task_name": "gsarti/flores_101_ita",
1460
+ "prompt_name": null,
1461
+ "byte_perplexity": 3.238337491305615
1462
+ },
1463
+ {
1464
+ "task_name": "gsarti/flores_101_ita",
1465
+ "prompt_name": null,
1466
+ "bits_per_byte": 1.695253347487448
1467
+ },
1468
+ {
1469
+ "task_name": "gsarti/flores_101_jpn",
1470
+ "prompt_name": null,
1471
+ "word_perplexity": 6.0024027118732196e+69
1472
+ },
1473
+ {
1474
+ "task_name": "gsarti/flores_101_jpn",
1475
+ "prompt_name": null,
1476
+ "byte_perplexity": 2.907038023970581
1477
+ },
1478
+ {
1479
+ "task_name": "gsarti/flores_101_jpn",
1480
+ "prompt_name": null,
1481
+ "bits_per_byte": 1.539549942005635
1482
+ },
1483
+ {
1484
+ "task_name": "gsarti/flores_101_jav",
1485
+ "prompt_name": null,
1486
+ "word_perplexity": 956961.3940329206
1487
+ },
1488
+ {
1489
+ "task_name": "gsarti/flores_101_jav",
1490
+ "prompt_name": null,
1491
+ "byte_perplexity": 7.460632752007581
1492
+ },
1493
+ {
1494
+ "task_name": "gsarti/flores_101_jav",
1495
+ "prompt_name": null,
1496
+ "bits_per_byte": 2.899297993680408
1497
+ },
1498
+ {
1499
+ "task_name": "gsarti/flores_101_kea",
1500
+ "prompt_name": null,
1501
+ "word_perplexity": 438558.0012817139
1502
+ },
1503
+ {
1504
+ "task_name": "gsarti/flores_101_kea",
1505
+ "prompt_name": null,
1506
+ "byte_perplexity": 9.281572608888562
1507
+ },
1508
+ {
1509
+ "task_name": "gsarti/flores_101_kea",
1510
+ "prompt_name": null,
1511
+ "bits_per_byte": 3.2143692668645976
1512
+ },
1513
+ {
1514
+ "task_name": "gsarti/flores_101_kam",
1515
+ "prompt_name": null,
1516
+ "word_perplexity": 4288601.196402131
1517
+ },
1518
+ {
1519
+ "task_name": "gsarti/flores_101_kam",
1520
+ "prompt_name": null,
1521
+ "byte_perplexity": 11.436917146974627
1522
+ },
1523
+ {
1524
+ "task_name": "gsarti/flores_101_kam",
1525
+ "prompt_name": null,
1526
+ "bits_per_byte": 3.515626316920499
1527
+ },
1528
+ {
1529
+ "task_name": "gsarti/flores_101_kan",
1530
+ "prompt_name": null,
1531
+ "word_perplexity": 5.3861539364992216e+16
1532
+ },
1533
+ {
1534
+ "task_name": "gsarti/flores_101_kan",
1535
+ "prompt_name": null,
1536
+ "byte_perplexity": 5.274956219477929
1537
+ },
1538
+ {
1539
+ "task_name": "gsarti/flores_101_kan",
1540
+ "prompt_name": null,
1541
+ "bits_per_byte": 2.3991591199422513
1542
+ },
1543
+ {
1544
+ "task_name": "gsarti/flores_101_kaz",
1545
+ "prompt_name": null,
1546
+ "word_perplexity": 89537342.10068764
1547
+ },
1548
+ {
1549
+ "task_name": "gsarti/flores_101_kaz",
1550
+ "prompt_name": null,
1551
+ "byte_perplexity": 3.5945005448756477
1552
+ },
1553
+ {
1554
+ "task_name": "gsarti/flores_101_kaz",
1555
+ "prompt_name": null,
1556
+ "bits_per_byte": 1.845791322405974
1557
+ }
1558
+ ],
1559
+ "versions": {
1560
+ "wic+GPT-3-prompt": 0,
1561
+ "wic+GPT-3-prompt-with-label": 0,
1562
+ "wic+affirmation_true_or_false": 0,
1563
+ "wic+grammar_homework": 0,
1564
+ "wic+polysemous": 0,
1565
+ "wic+question-context": 0,
1566
+ "wic+question-context-meaning": 0,
1567
+ "wic+question-context-meaning-with-label": 0,
1568
+ "wic+same_sense": 0,
1569
+ "wic+similar-sense": 0,
1570
+ "wsc+GPT-3 Style": 0,
1571
+ "wsc+I think they mean": 0,
1572
+ "wsc+Who or what is/are": 0,
1573
+ "wsc+by p they mean": 0,
1574
+ "wsc+does p stand for": 0,
1575
+ "wsc+does the pronoun refer to": 0,
1576
+ "wsc+in other words": 0,
1577
+ "wsc+p is/are r": 0,
1578
+ "wsc+replaced with": 0,
1579
+ "wsc+the pronoun refers to": 0,
1580
+ "wnli+confident": 1,
1581
+ "wnli+entailment explained": 1,
1582
+ "wnli+imply": 1,
1583
+ "wnli+justified": 1,
1584
+ "wnli+mean": 1,
1585
+ "gsarti/flores_101_afr+null": 0,
1586
+ "gsarti/flores_101_amh+null": 0,
1587
+ "gsarti/flores_101_ara+null": 0,
1588
+ "gsarti/flores_101_hye+null": 0,
1589
+ "gsarti/flores_101_asm+null": 0,
1590
+ "gsarti/flores_101_ast+null": 0,
1591
+ "gsarti/flores_101_azj+null": 0,
1592
+ "gsarti/flores_101_bel+null": 0,
1593
+ "gsarti/flores_101_ben+null": 0,
1594
+ "gsarti/flores_101_bos+null": 0,
1595
+ "gsarti/flores_101_bul+null": 0,
1596
+ "gsarti/flores_101_mya+null": 0,
1597
+ "gsarti/flores_101_cat+null": 0,
1598
+ "gsarti/flores_101_ceb+null": 0,
1599
+ "gsarti/flores_101_zho_simpl+null": 0,
1600
+ "gsarti/flores_101_zho_trad+null": 0,
1601
+ "gsarti/flores_101_hrv+null": 0,
1602
+ "gsarti/flores_101_ces+null": 0,
1603
+ "gsarti/flores_101_dan+null": 0,
1604
+ "gsarti/flores_101_nld+null": 0,
1605
+ "gsarti/flores_101_eng+null": 0,
1606
+ "gsarti/flores_101_est+null": 0,
1607
+ "gsarti/flores_101_tgl+null": 0,
1608
+ "gsarti/flores_101_fin+null": 0,
1609
+ "gsarti/flores_101_fra+null": 0,
1610
+ "gsarti/flores_101_ful+null": 0,
1611
+ "gsarti/flores_101_glg+null": 0,
1612
+ "gsarti/flores_101_lug+null": 0,
1613
+ "gsarti/flores_101_kat+null": 0,
1614
+ "gsarti/flores_101_deu+null": 0,
1615
+ "gsarti/flores_101_ell+null": 0,
1616
+ "gsarti/flores_101_guj+null": 0,
1617
+ "gsarti/flores_101_hau+null": 0,
1618
+ "gsarti/flores_101_heb+null": 0,
1619
+ "gsarti/flores_101_hin+null": 0,
1620
+ "gsarti/flores_101_hun+null": 0,
1621
+ "gsarti/flores_101_isl+null": 0,
1622
+ "gsarti/flores_101_ibo+null": 0,
1623
+ "gsarti/flores_101_ind+null": 0,
1624
+ "gsarti/flores_101_gle+null": 0,
1625
+ "gsarti/flores_101_ita+null": 0,
1626
+ "gsarti/flores_101_jpn+null": 0,
1627
+ "gsarti/flores_101_jav+null": 0,
1628
+ "gsarti/flores_101_kea+null": 0,
1629
+ "gsarti/flores_101_kam+null": 0,
1630
+ "gsarti/flores_101_kan+null": 0,
1631
+ "gsarti/flores_101_kaz+null": 0
1632
+ },
1633
+ "table_results": {
1634
+ "wic+GPT-3-prompt": {
1635
+ "task_name": "wic",
1636
+ "prompt_name": "GPT-3-prompt",
1637
+ "acc": 0.5,
1638
+ "acc_stderr": 0.01981072129375818,
1639
+ "acc_norm": 0.5,
1640
+ "acc_norm_stderr": 0.01981072129375818
1641
+ },
1642
+ "wic+GPT-3-prompt-with-label": {
1643
+ "task_name": "wic",
1644
+ "prompt_name": "GPT-3-prompt-with-label",
1645
+ "acc": 0.49216300940438873,
1646
+ "acc_stderr": 0.019808287657813832,
1647
+ "acc_norm": 0.5,
1648
+ "acc_norm_stderr": 0.01981072129375818
1649
+ },
1650
+ "wic+affirmation_true_or_false": {
1651
+ "task_name": "wic",
1652
+ "prompt_name": "affirmation_true_or_false",
1653
+ "acc": 0.5,
1654
+ "acc_stderr": 0.01981072129375818,
1655
+ "acc_norm": 0.5078369905956113,
1656
+ "acc_norm_stderr": 0.019808287657813832
1657
+ },
1658
+ "wic+grammar_homework": {
1659
+ "task_name": "wic",
1660
+ "prompt_name": "grammar_homework",
1661
+ "acc": 0.5094043887147336,
1662
+ "acc_stderr": 0.019807216763271497,
1663
+ "acc_norm": 0.49843260188087773,
1664
+ "acc_norm_stderr": 0.019810623954060382
1665
+ },
1666
+ "wic+polysemous": {
1667
+ "task_name": "wic",
1668
+ "prompt_name": "polysemous",
1669
+ "acc": 0.512539184952978,
1670
+ "acc_stderr": 0.019804490588592596,
1671
+ "acc_norm": 0.49843260188087773,
1672
+ "acc_norm_stderr": 0.019810623954060382
1673
+ },
1674
+ "wic+question-context": {
1675
+ "task_name": "wic",
1676
+ "prompt_name": "question-context",
1677
+ "acc": 0.5266457680250783,
1678
+ "acc_stderr": 0.019782570188812167,
1679
+ "acc_norm": 0.5031347962382445,
1680
+ "acc_norm_stderr": 0.019810331932097542
1681
+ },
1682
+ "wic+question-context-meaning": {
1683
+ "task_name": "wic",
1684
+ "prompt_name": "question-context-meaning",
1685
+ "acc": 0.5438871473354232,
1686
+ "acc_stderr": 0.019734259601993404,
1687
+ "acc_norm": 0.5015673981191222,
1688
+ "acc_norm_stderr": 0.019810623954060382
1689
+ },
1690
+ "wic+question-context-meaning-with-label": {
1691
+ "task_name": "wic",
1692
+ "prompt_name": "question-context-meaning-with-label",
1693
+ "acc": 0.5156739811912225,
1694
+ "acc_stderr": 0.019800984955347847,
1695
+ "acc_norm": 0.5015673981191222,
1696
+ "acc_norm_stderr": 0.019810623954060382
1697
+ },
1698
+ "wic+same_sense": {
1699
+ "task_name": "wic",
1700
+ "prompt_name": "same_sense",
1701
+ "acc": 0.5047021943573667,
1702
+ "acc_stderr": 0.019809845219259763,
1703
+ "acc_norm": 0.5,
1704
+ "acc_norm_stderr": 0.01981072129375818
1705
+ },
1706
+ "wic+similar-sense": {
1707
+ "task_name": "wic",
1708
+ "prompt_name": "similar-sense",
1709
+ "acc": 0.542319749216301,
1710
+ "acc_stderr": 0.01973963328373276,
1711
+ "acc_norm": 0.5,
1712
+ "acc_norm_stderr": 0.01981072129375818
1713
+ },
1714
+ "wsc+GPT-3 Style": {
1715
+ "task_name": "wsc",
1716
+ "prompt_name": "GPT-3 Style",
1717
+ "acc": 0.36538461538461536,
1718
+ "acc_stderr": 0.0474473339327792,
1719
+ "acc_norm": 0.36538461538461536,
1720
+ "acc_norm_stderr": 0.0474473339327792
1721
+ },
1722
+ "wsc+I think they mean": {
1723
+ "task_name": "wsc",
1724
+ "prompt_name": "I think they mean",
1725
+ "acc": 0.36538461538461536,
1726
+ "acc_stderr": 0.0474473339327792,
1727
+ "acc_norm": 0.36538461538461536,
1728
+ "acc_norm_stderr": 0.0474473339327792
1729
+ },
1730
+ "wsc+Who or what is/are": {
1731
+ "task_name": "wsc",
1732
+ "prompt_name": "Who or what is/are",
1733
+ "acc": 0.40384615384615385,
1734
+ "acc_stderr": 0.048346889526540184,
1735
+ "acc_norm": 0.36538461538461536,
1736
+ "acc_norm_stderr": 0.0474473339327792
1737
+ },
1738
+ "wsc+by p they mean": {
1739
+ "task_name": "wsc",
1740
+ "prompt_name": "by p they mean",
1741
+ "acc": 0.36538461538461536,
1742
+ "acc_stderr": 0.0474473339327792,
1743
+ "acc_norm": 0.36538461538461536,
1744
+ "acc_norm_stderr": 0.0474473339327792
1745
+ },
1746
+ "wsc+does p stand for": {
1747
+ "task_name": "wsc",
1748
+ "prompt_name": "does p stand for",
1749
+ "acc": 0.375,
1750
+ "acc_stderr": 0.04770204856076104,
1751
+ "acc_norm": 0.36538461538461536,
1752
+ "acc_norm_stderr": 0.0474473339327792
1753
+ },
1754
+ "wsc+does the pronoun refer to": {
1755
+ "task_name": "wsc",
1756
+ "prompt_name": "does the pronoun refer to",
1757
+ "acc": 0.5480769230769231,
1758
+ "acc_stderr": 0.049038186969314335,
1759
+ "acc_norm": 0.36538461538461536,
1760
+ "acc_norm_stderr": 0.0474473339327792
1761
+ },
1762
+ "wsc+in other words": {
1763
+ "task_name": "wsc",
1764
+ "prompt_name": "in other words",
1765
+ "acc": 0.36538461538461536,
1766
+ "acc_stderr": 0.0474473339327792,
1767
+ "acc_norm": 0.5288461538461539,
1768
+ "acc_norm_stderr": 0.04918440626354964
1769
+ },
1770
+ "wsc+p is/are r": {
1771
+ "task_name": "wsc",
1772
+ "prompt_name": "p is/are r",
1773
+ "acc": 0.36538461538461536,
1774
+ "acc_stderr": 0.0474473339327792,
1775
+ "acc_norm": 0.34615384615384615,
1776
+ "acc_norm_stderr": 0.04687634642174987
1777
+ },
1778
+ "wsc+replaced with": {
1779
+ "task_name": "wsc",
1780
+ "prompt_name": "replaced with",
1781
+ "acc": 0.6153846153846154,
1782
+ "acc_stderr": 0.047936688680750406,
1783
+ "acc_norm": 0.36538461538461536,
1784
+ "acc_norm_stderr": 0.0474473339327792
1785
+ },
1786
+ "wsc+the pronoun refers to": {
1787
+ "task_name": "wsc",
1788
+ "prompt_name": "the pronoun refers to",
1789
+ "acc": 0.36538461538461536,
1790
+ "acc_stderr": 0.0474473339327792,
1791
+ "acc_norm": 0.5865384615384616,
1792
+ "acc_norm_stderr": 0.04852294969729053
1793
+ },
1794
+ "wnli+confident": {
1795
+ "task_name": "wnli",
1796
+ "prompt_name": "confident",
1797
+ "acc": 0.43661971830985913,
1798
+ "acc_stderr": 0.0592793555841297,
1799
+ "acc_norm": 0.43661971830985913,
1800
+ "acc_norm_stderr": 0.0592793555841297
1801
+ },
1802
+ "wnli+entailment explained": {
1803
+ "task_name": "wnli",
1804
+ "prompt_name": "entailment explained",
1805
+ "acc": 0.39436619718309857,
1806
+ "acc_stderr": 0.058412510854444266,
1807
+ "acc_norm": 0.43661971830985913,
1808
+ "acc_norm_stderr": 0.0592793555841297
1809
+ },
1810
+ "wnli+imply": {
1811
+ "task_name": "wnli",
1812
+ "prompt_name": "imply",
1813
+ "acc": 0.4225352112676056,
1814
+ "acc_stderr": 0.05903984205682581,
1815
+ "acc_norm": 0.43661971830985913,
1816
+ "acc_norm_stderr": 0.0592793555841297
1817
+ },
1818
+ "wnli+justified": {
1819
+ "task_name": "wnli",
1820
+ "prompt_name": "justified",
1821
+ "acc": 0.43661971830985913,
1822
+ "acc_stderr": 0.0592793555841297,
1823
+ "acc_norm": 0.43661971830985913,
1824
+ "acc_norm_stderr": 0.0592793555841297
1825
+ },
1826
+ "wnli+mean": {
1827
+ "task_name": "wnli",
1828
+ "prompt_name": "mean",
1829
+ "acc": 0.6619718309859155,
1830
+ "acc_stderr": 0.05653887739133513,
1831
+ "acc_norm": 0.43661971830985913,
1832
+ "acc_norm_stderr": 0.0592793555841297
1833
+ },
1834
+ "gsarti/flores_101_afr+null": {
1835
+ "task_name": "gsarti/flores_101_afr",
1836
+ "prompt_name": "null",
1837
+ "word_perplexity": 139324.0466654445,
1838
+ "byte_perplexity": 7.049422805555328,
1839
+ "bits_per_byte": 2.8175051369933213
1840
+ },
1841
+ "gsarti/flores_101_amh+null": {
1842
+ "task_name": "gsarti/flores_101_amh",
1843
+ "prompt_name": "null",
1844
+ "word_perplexity": 105036774.30501972,
1845
+ "byte_perplexity": 4.172368790188039,
1846
+ "bits_per_byte": 2.0608666814101815
1847
+ },
1848
+ "gsarti/flores_101_ara+null": {
1849
+ "task_name": "gsarti/flores_101_ara",
1850
+ "prompt_name": "null",
1851
+ "word_perplexity": 674.8640314665696,
1852
+ "byte_perplexity": 1.8400375612633983,
1853
+ "bits_per_byte": 0.8797352167688847
1854
+ },
1855
+ "gsarti/flores_101_hye+null": {
1856
+ "task_name": "gsarti/flores_101_hye",
1857
+ "prompt_name": "null",
1858
+ "word_perplexity": 99262887.01092263,
1859
+ "byte_perplexity": 3.7481249397064547,
1860
+ "bits_per_byte": 1.906169044483402
1861
+ },
1862
+ "gsarti/flores_101_asm+null": {
1863
+ "task_name": "gsarti/flores_101_asm",
1864
+ "prompt_name": "null",
1865
+ "word_perplexity": 6763188828222.085,
1866
+ "byte_perplexity": 5.497254736157445,
1867
+ "bits_per_byte": 2.458711333673663
1868
+ },
1869
+ "gsarti/flores_101_ast+null": {
1870
+ "task_name": "gsarti/flores_101_ast",
1871
+ "prompt_name": "null",
1872
+ "word_perplexity": 10657.272913539553,
1873
+ "byte_perplexity": 4.260251728273795,
1874
+ "bits_per_byte": 2.0909386784329675
1875
+ },
1876
+ "gsarti/flores_101_azj+null": {
1877
+ "task_name": "gsarti/flores_101_azj",
1878
+ "prompt_name": "null",
1879
+ "word_perplexity": 45923924.18878753,
1880
+ "byte_perplexity": 7.691396328945705,
1881
+ "bits_per_byte": 2.9432455349850195
1882
+ },
1883
+ "gsarti/flores_101_bel+null": {
1884
+ "task_name": "gsarti/flores_101_bel",
1885
+ "prompt_name": "null",
1886
+ "word_perplexity": 23935692.781315073,
1887
+ "byte_perplexity": 3.7706591215465943,
1888
+ "bits_per_byte": 1.914816732584341
1889
+ },
1890
+ "gsarti/flores_101_ben+null": {
1891
+ "task_name": "gsarti/flores_101_ben",
1892
+ "prompt_name": "null",
1893
+ "word_perplexity": 2480418685142.412,
1894
+ "byte_perplexity": 5.074281765515423,
1895
+ "bits_per_byte": 2.3432036318231058
1896
+ },
1897
+ "gsarti/flores_101_bos+null": {
1898
+ "task_name": "gsarti/flores_101_bos",
1899
+ "prompt_name": "null",
1900
+ "word_perplexity": 229622.13691086147,
1901
+ "byte_perplexity": 6.343363734045183,
1902
+ "bits_per_byte": 2.665248069942796
1903
+ },
1904
+ "gsarti/flores_101_bul+null": {
1905
+ "task_name": "gsarti/flores_101_bul",
1906
+ "prompt_name": "null",
1907
+ "word_perplexity": 194851.13344620814,
1908
+ "byte_perplexity": 2.8553687444403257,
1909
+ "bits_per_byte": 1.5136770683283687
1910
+ },
1911
+ "gsarti/flores_101_mya+null": {
1912
+ "task_name": "gsarti/flores_101_mya",
1913
+ "prompt_name": "null",
1914
+ "word_perplexity": 5.887577237013639e+18,
1915
+ "byte_perplexity": 2.657561458464019,
1916
+ "bits_per_byte": 1.4101030557435918
1917
+ },
1918
+ "gsarti/flores_101_cat+null": {
1919
+ "task_name": "gsarti/flores_101_cat",
1920
+ "prompt_name": "null",
1921
+ "word_perplexity": 179.13123174533087,
1922
+ "byte_perplexity": 2.358207169698056,
1923
+ "bits_per_byte": 1.2376904653775254
1924
+ },
1925
+ "gsarti/flores_101_ceb+null": {
1926
+ "task_name": "gsarti/flores_101_ceb",
1927
+ "prompt_name": "null",
1928
+ "word_perplexity": 113330.67154113152,
1929
+ "byte_perplexity": 6.896481056329736,
1930
+ "bits_per_byte": 2.7858604115174295
1931
+ },
1932
+ "gsarti/flores_101_zho_simpl+null": {
1933
+ "task_name": "gsarti/flores_101_zho_simpl",
1934
+ "prompt_name": "null",
1935
+ "word_perplexity": 1.0554528210220222e+21,
1936
+ "byte_perplexity": 2.322457417595381,
1937
+ "bits_per_byte": 1.2156521449449949
1938
+ },
1939
+ "gsarti/flores_101_zho_trad+null": {
1940
+ "task_name": "gsarti/flores_101_zho_trad",
1941
+ "prompt_name": "null",
1942
+ "word_perplexity": 4.787781515987923e+24,
1943
+ "byte_perplexity": 2.5709177552415134,
1944
+ "bits_per_byte": 1.3622834584784203
1945
+ },
1946
+ "gsarti/flores_101_hrv+null": {
1947
+ "task_name": "gsarti/flores_101_hrv",
1948
+ "prompt_name": "null",
1949
+ "word_perplexity": 307789.1462790266,
1950
+ "byte_perplexity": 6.50559790827845,
1951
+ "bits_per_byte": 2.7016816564307984
1952
+ },
1953
+ "gsarti/flores_101_ces+null": {
1954
+ "task_name": "gsarti/flores_101_ces",
1955
+ "prompt_name": "null",
1956
+ "word_perplexity": 625101.1441414964,
1957
+ "byte_perplexity": 6.126526835715164,
1958
+ "bits_per_byte": 2.6150694333085327
1959
+ },
1960
+ "gsarti/flores_101_dan+null": {
1961
+ "task_name": "gsarti/flores_101_dan",
1962
+ "prompt_name": "null",
1963
+ "word_perplexity": 71695.50336412797,
1964
+ "byte_perplexity": 5.778786323448377,
1965
+ "bits_per_byte": 2.5307665257708245
1966
+ },
1967
+ "gsarti/flores_101_nld+null": {
1968
+ "task_name": "gsarti/flores_101_nld",
1969
+ "prompt_name": "null",
1970
+ "word_perplexity": 13951.877058430618,
1971
+ "byte_perplexity": 4.535651709856251,
1972
+ "bits_per_byte": 2.1813098607926804
1973
+ },
1974
+ "gsarti/flores_101_eng+null": {
1975
+ "task_name": "gsarti/flores_101_eng",
1976
+ "prompt_name": "null",
1977
+ "word_perplexity": 75.56480997823662,
1978
+ "byte_perplexity": 2.061283234268159,
1979
+ "bits_per_byte": 1.0435427545613876
1980
+ },
1981
+ "gsarti/flores_101_est+null": {
1982
+ "task_name": "gsarti/flores_101_est",
1983
+ "prompt_name": "null",
1984
+ "word_perplexity": 92602633.82439691,
1985
+ "byte_perplexity": 10.131736127467489,
1986
+ "bits_per_byte": 3.340809503762674
1987
+ },
1988
+ "gsarti/flores_101_tgl+null": {
1989
+ "task_name": "gsarti/flores_101_tgl",
1990
+ "prompt_name": "null",
1991
+ "word_perplexity": 87554.31770184237,
1992
+ "byte_perplexity": 6.256957969905079,
1993
+ "bits_per_byte": 2.645461413001105
1994
+ },
1995
+ "gsarti/flores_101_fin+null": {
1996
+ "task_name": "gsarti/flores_101_fin",
1997
+ "prompt_name": "null",
1998
+ "word_perplexity": 91621886.60145952,
1999
+ "byte_perplexity": 7.5129644427067355,
2000
+ "bits_per_byte": 2.9093822743068216
2001
+ },
2002
+ "gsarti/flores_101_fra+null": {
2003
+ "task_name": "gsarti/flores_101_fra",
2004
+ "prompt_name": "null",
2005
+ "word_perplexity": 89.45884576931464,
2006
+ "byte_perplexity": 2.0177390037335385,
2007
+ "bits_per_byte": 1.0127395726746855
2008
+ },
2009
+ "gsarti/flores_101_ful+null": {
2010
+ "task_name": "gsarti/flores_101_ful",
2011
+ "prompt_name": "null",
2012
+ "word_perplexity": 908715.1423017589,
2013
+ "byte_perplexity": 11.810263420287875,
2014
+ "bits_per_byte": 3.561969238361191
2015
+ },
2016
+ "gsarti/flores_101_glg+null": {
2017
+ "task_name": "gsarti/flores_101_glg",
2018
+ "prompt_name": "null",
2019
+ "word_perplexity": 1537.3193913761668,
2020
+ "byte_perplexity": 3.2214647330840154,
2021
+ "bits_per_byte": 1.6877168009728167
2022
+ },
2023
+ "gsarti/flores_101_lug+null": {
2024
+ "task_name": "gsarti/flores_101_lug",
2025
+ "prompt_name": "null",
2026
+ "word_perplexity": 32046806.791237485,
2027
+ "byte_perplexity": 9.285708185212261,
2028
+ "bits_per_byte": 3.2150119431528754
2029
+ },
2030
+ "gsarti/flores_101_kat+null": {
2031
+ "task_name": "gsarti/flores_101_kat",
2032
+ "prompt_name": "null",
2033
+ "word_perplexity": 1133105340.614723,
2034
+ "byte_perplexity": 2.5184571084900518,
2035
+ "bits_per_byte": 1.3325401608568794
2036
+ },
2037
+ "gsarti/flores_101_deu+null": {
2038
+ "task_name": "gsarti/flores_101_deu",
2039
+ "prompt_name": "null",
2040
+ "word_perplexity": 5647.282599404732,
2041
+ "byte_perplexity": 3.361758059911202,
2042
+ "bits_per_byte": 1.7492158999678582
2043
+ },
2044
+ "gsarti/flores_101_ell+null": {
2045
+ "task_name": "gsarti/flores_101_ell",
2046
+ "prompt_name": "null",
2047
+ "word_perplexity": 102751.5248402687,
2048
+ "byte_perplexity": 2.6139607239932805,
2049
+ "bits_per_byte": 1.3862374641150543
2050
+ },
2051
+ "gsarti/flores_101_guj+null": {
2052
+ "task_name": "gsarti/flores_101_guj",
2053
+ "prompt_name": "null",
2054
+ "word_perplexity": 133216198508.6925,
2055
+ "byte_perplexity": 5.125904532570054,
2056
+ "bits_per_byte": 2.357806609400009
2057
+ },
2058
+ "gsarti/flores_101_hau+null": {
2059
+ "task_name": "gsarti/flores_101_hau",
2060
+ "prompt_name": "null",
2061
+ "word_perplexity": 730749.6449046461,
2062
+ "byte_perplexity": 11.049458818357667,
2063
+ "bits_per_byte": 3.4659038057537184
2064
+ },
2065
+ "gsarti/flores_101_heb+null": {
2066
+ "task_name": "gsarti/flores_101_heb",
2067
+ "prompt_name": "null",
2068
+ "word_perplexity": 880255.4148832298,
2069
+ "byte_perplexity": 3.7036842387723694,
2070
+ "bits_per_byte": 1.8889611054621571
2071
+ },
2072
+ "gsarti/flores_101_hin+null": {
2073
+ "task_name": "gsarti/flores_101_hin",
2074
+ "prompt_name": "null",
2075
+ "word_perplexity": 453226793.5348556,
2076
+ "byte_perplexity": 4.581311639568996,
2077
+ "bits_per_byte": 2.195760704215568
2078
+ },
2079
+ "gsarti/flores_101_hun+null": {
2080
+ "task_name": "gsarti/flores_101_hun",
2081
+ "prompt_name": "null",
2082
+ "word_perplexity": 8545882.19823639,
2083
+ "byte_perplexity": 7.19531655942431,
2084
+ "bits_per_byte": 2.8470581600253615
2085
+ },
2086
+ "gsarti/flores_101_isl+null": {
2087
+ "task_name": "gsarti/flores_101_isl",
2088
+ "prompt_name": "null",
2089
+ "word_perplexity": 3947458.536983725,
2090
+ "byte_perplexity": 8.812045732299993,
2091
+ "bits_per_byte": 3.1394769822824644
2092
+ },
2093
+ "gsarti/flores_101_ibo+null": {
2094
+ "task_name": "gsarti/flores_101_ibo",
2095
+ "prompt_name": "null",
2096
+ "word_perplexity": 99576.38125028457,
2097
+ "byte_perplexity": 6.06807351892086,
2098
+ "bits_per_byte": 2.6012385649422316
2099
+ },
2100
+ "gsarti/flores_101_ind+null": {
2101
+ "task_name": "gsarti/flores_101_ind",
2102
+ "prompt_name": "null",
2103
+ "word_perplexity": 299.41864562936706,
2104
+ "byte_perplexity": 2.2193428661828962,
2105
+ "bits_per_byte": 1.1501325666473412
2106
+ },
2107
+ "gsarti/flores_101_gle+null": {
2108
+ "task_name": "gsarti/flores_101_gle",
2109
+ "prompt_name": "null",
2110
+ "word_perplexity": 1548851.5929806433,
2111
+ "byte_perplexity": 9.712259930753122,
2112
+ "bits_per_byte": 3.2798070331865063
2113
+ },
2114
+ "gsarti/flores_101_ita+null": {
2115
+ "task_name": "gsarti/flores_101_ita",
2116
+ "prompt_name": "null",
2117
+ "word_perplexity": 1951.0663459405935,
2118
+ "byte_perplexity": 3.238337491305615,
2119
+ "bits_per_byte": 1.695253347487448
2120
+ },
2121
+ "gsarti/flores_101_jpn+null": {
2122
+ "task_name": "gsarti/flores_101_jpn",
2123
+ "prompt_name": "null",
2124
+ "word_perplexity": 6.0024027118732196e+69,
2125
+ "byte_perplexity": 2.907038023970581,
2126
+ "bits_per_byte": 1.539549942005635
2127
+ },
2128
+ "gsarti/flores_101_jav+null": {
2129
+ "task_name": "gsarti/flores_101_jav",
2130
+ "prompt_name": "null",
2131
+ "word_perplexity": 956961.3940329206,
2132
+ "byte_perplexity": 7.460632752007581,
2133
+ "bits_per_byte": 2.899297993680408
2134
+ },
2135
+ "gsarti/flores_101_kea+null": {
2136
+ "task_name": "gsarti/flores_101_kea",
2137
+ "prompt_name": "null",
2138
+ "word_perplexity": 438558.0012817139,
2139
+ "byte_perplexity": 9.281572608888562,
2140
+ "bits_per_byte": 3.2143692668645976
2141
+ },
2142
+ "gsarti/flores_101_kam+null": {
2143
+ "task_name": "gsarti/flores_101_kam",
2144
+ "prompt_name": "null",
2145
+ "word_perplexity": 4288601.196402131,
2146
+ "byte_perplexity": 11.436917146974627,
2147
+ "bits_per_byte": 3.515626316920499
2148
+ },
2149
+ "gsarti/flores_101_kan+null": {
2150
+ "task_name": "gsarti/flores_101_kan",
2151
+ "prompt_name": "null",
2152
+ "word_perplexity": 5.3861539364992216e+16,
2153
+ "byte_perplexity": 5.274956219477929,
2154
+ "bits_per_byte": 2.3991591199422513
2155
+ },
2156
+ "gsarti/flores_101_kaz+null": {
2157
+ "task_name": "gsarti/flores_101_kaz",
2158
+ "prompt_name": "null",
2159
+ "word_perplexity": 89537342.10068764,
2160
+ "byte_perplexity": 3.5945005448756477,
2161
+ "bits_per_byte": 1.845791322405974
2162
+ }
2163
+ },
2164
+ "config": {
2165
+ "adaptive_seq_len": true,
2166
+ "num_fewshot": 0,
2167
+ "bootstrap_iters": 100000
2168
+ }
2169
+ }
bigscience/evaluation/results/tr11/conversion/json_to_markdown.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Table example:
3
+
4
+ | Task | Language | Metric | BLOOM-176B | OPT-176B |
5
+ |:--------|:-----------------|:------------------------|-------------:|------------:|
6
+ | arc_challenge | eng | acc | 0.4112627986348123 | 0.4121160409556314 |
7
+
8
+
9
+ Metadata example:
10
+
11
+ model-index:
12
+ - name: bart-large-cnn-samsum
13
+ results:
14
+ - task:
15
+ type: summarization
16
+ name: Summarization
17
+ dataset:
18
+ name: 'SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization'
19
+ type: samsum
20
+ metrics:
21
+ - name: Validation ROGUE-1
22
+ type: rogue-1
23
+ value: 42.621
24
+ - name: Validation ROGUE-2
25
+ type: rogue-2
26
+ value: 21.9825
27
+ - name: Validation ROGUE-L
28
+ type: rogue-l
29
+ value: 33.034
30
+ - name: Test ROGUE-1
31
+ type: rogue-1
32
+ value: 41.3174
33
+ - name: Test ROGUE-2
34
+ type: rogue-2
35
+ value: 20.8716
36
+ - name: Test ROGUE-L
37
+ type: rogue-l
38
+ value: 32.1337
39
+ - task:
40
+ type: summarization
41
+ name: Summarization
42
+ dataset:
43
+ name: samsum
44
+ type: samsum
45
+ config: samsum
46
+ split: test
47
+ metrics:
48
+ - name: ROUGE-1
49
+ type: rouge
50
+ value: 41.3282
51
+ verified: true
52
+ - name: ROUGE-2
53
+ type: rouge
54
+ value: 20.8755
55
+ verified: true
56
+ - name: ROUGE-L
57
+ type: rouge
58
+ value: 32.1353
59
+ verified: true
60
+ - name: ROUGE-LSUM
61
+ type: rouge
62
+ value: 38.401
63
+ verified: true
64
+ - name: loss
65
+ type: loss
66
+ value: 1.4297215938568115
67
+ verified: true
68
+ - name: gen_len
69
+ type: gen_len
70
+ value: 60.0757
71
+ verified: true
72
+ """
73
+
74
+ import json
75
+ import statistics
76
+
77
+ FILE_NAMES = ["bslmeval", "humaneval_temp02", "humaneval_temp06", "humaneval_temp08"]
78
+
79
+ # Optionally subselect tasks
80
+ SELECTED_LIST = [
81
+ "winogrande"
82
+ ]
83
+
84
+ with open("bloom2b5/bslmeval.json", "r") as f:
85
+ bloom_bslmeval = json.load(f)
86
+
87
+ with open("opt/bslmeval.json", "r") as f:
88
+ opt_bslmeval = json.load(f)
89
+
90
+
91
+
92
+ results_formatted = {}
93
+ for task_name in bloom_bslmeval["results"]:
94
+ #if task_name not in SELECTED_LIST:
95
+ # continue
96
+ date_keys = list(bloom_bslmeval["results"][task_name].keys())
97
+ assert len(date_keys) == 1
98
+ metrics = bloom_bslmeval["results"][task_name][date_keys[0]]
99
+
100
+ lang = "eng"
101
+ if "gsarti/flores_101_" in task_name:
102
+ lang = task_name.replace("gsarti/flores_101_", "").replace("+null", "")
103
+ elif "lambada_mt_de" in task_name:
104
+ lang = "deu"
105
+ elif "lambada_mt_en" in task_name:
106
+ lang = "eng"
107
+ elif "lambada_mt_es" in task_name:
108
+ lang = "esp"
109
+ elif "lambada_mt_it" in task_name:
110
+ lang = "ita"
111
+ elif "lambada" == task_name:
112
+ continue
113
+ elif "crows_pairs_french" in task_name:
114
+ lang = "fra"
115
+ elif "headqa" == task_name:
116
+ lang = "esp"
117
+
118
+ if "acc" in metrics:
119
+ main_metric_name = "acc ↑"
120
+ elif "byte_perplexity" in metrics:
121
+ main_metric_name = "byte_perplexity ↓"
122
+ elif "pass@100" in metrics:
123
+ main_metric_name = "pass@100 ↑"
124
+ elif "em" in metrics:
125
+ main_metric_name = "em ↑"
126
+
127
+ date_keys_opt = list(opt_bslmeval["results"][task_name].keys())
128
+ score_opt = opt_bslmeval["results"][task_name][date_keys_opt[0]][main_metric_name[:-2]]
129
+
130
+ fin_task_name = metrics.get("task_name", task_name)
131
+
132
+ results_formatted.setdefault(fin_task_name, {})
133
+ results_formatted[fin_task_name].setdefault("prompts", [])
134
+ results_formatted[fin_task_name].setdefault("all_metrics", [])
135
+ results_formatted[fin_task_name].setdefault("main_metrics", [])
136
+
137
+ if "prompt_name" in metrics:
138
+ results_formatted[fin_task_name]["prompts"].append(metrics["prompt_name"])
139
+ results_formatted[fin_task_name]["name"] = fin_task_name
140
+ results_formatted[fin_task_name]["lang"] = lang
141
+ results_formatted[fin_task_name]["all_metrics"].append(metrics) # [{name: score}]
142
+ results_formatted[fin_task_name]["main_metrics"].append((main_metric_name, metrics[main_metric_name[:-2]], score_opt))
143
+ results_formatted[fin_task_name]["type"] = "text-generation"
144
+
145
+ # Take Median of scores
146
+ for k, v in results_formatted.items():
147
+ if "prompts" in v and len(v["prompts"]) > 1:
148
+ assert len(v["all_metrics"]) == len(v["main_metrics"])
149
+ num_scores = len(v["main_metrics"])
150
+
151
+ bloom_median = statistics.median([triplet[1] for triplet in v["main_metrics"]])
152
+ opt_median = statistics.median([triplet[2] for triplet in v["main_metrics"]])
153
+
154
+ results_formatted[k]["main_metrics"] = [(
155
+ v["main_metrics"][0][0],
156
+ bloom_median,
157
+ opt_median,
158
+ )]
159
+
160
+ results_formatted[k]["name"] = results_formatted[k]["name"] + f" (Median of {num_scores} prompts)"
161
+
162
+
163
+
164
+ def keep_best_score(new_eval, old_eval):
165
+ for k, v in new_eval.items():
166
+ old_eval[k] = max(old_eval[k], v)
167
+ return old_eval
168
+
169
+ for i, temp in enumerate(["02", "06", "08"]):
170
+ with open(f"bloom/humaneval_temp{temp}.json", "r") as f:
171
+ if i > 0:
172
+ keep_best_score(json.load(f), bloom_humaneval)
173
+ else:
174
+ bloom_humaneval = json.load(f)
175
+ with open(f"opt/humaneval_temp{temp}.json", "r") as f:
176
+ if i > 0:
177
+ keep_best_score(json.load(f), opt_humaneval)
178
+ else:
179
+ opt_humaneval = json.load(f)
180
+
181
+ results_formatted["humaneval"] = {
182
+ "name": "humaneval",
183
+ "lang": "python",
184
+ "all_metrics": [bloom_humaneval], # [{name: score}]
185
+ "main_metrics": [(f"{name} ↑", score, opt_humaneval[name]) for name, score in bloom_humaneval.items()],
186
+ "type": "text-generation"
187
+ }
188
+
189
+
190
+
191
+ # Add multilingual average
192
+ for k, v in results_formatted.items():
193
+ if "prompts" in v and len(v["prompts"]) > 1 and len(v["main_metrics"]) > 1:
194
+ assert len(v["all_metrics"]) == len(v["main_metrics"]), f"{k}, {len(v['all_metrics'])}, {len(v['main_metrics'])}"
195
+ num_scores = len(v["main_metrics"])
196
+
197
+ bloom_median = statistics.median([triplet[1] for triplet in v["main_metrics"]])
198
+ opt_median = statistics.median([triplet[2] for triplet in v["main_metrics"]])
199
+
200
+ results_formatted[k]["main_metrics"] = [(
201
+ v["main_metrics"][0][0],
202
+ bloom_median,
203
+ opt_median,
204
+ )]
205
+
206
+ results_formatted[k]["name"] = results_formatted[k]["name"] + f" (Median of {num_scores} prompts)"
207
+
208
+ """Optional aggregated statistics
209
+ bloom_mean = statistics.mean([triplet[1] for k,v in results_formatted.items() for triplet in v["main_metrics"] if v["lang"] == "eng"])
210
+ opt_mean = statistics.mean([triplet[2] for k,v in results_formatted.items() for triplet in v["main_metrics"] if v["lang"] == "eng"])
211
+
212
+ results_formatted["mean_eng"] = {
213
+ "name": "mean_eng ↑",
214
+ "lang": "eng",
215
+ "all_metrics": [{"mean": bloom_mean}], # [{name: score}]
216
+ "main_metrics": [("mean", bloom_mean, opt_mean)],
217
+ "type": "text-generation"
218
+ }
219
+
220
+ bloom_mean = statistics.mean([triplet[1] for k,v in results_formatted.items() for triplet in v["main_metrics"] if "flores" in k])
221
+ opt_mean = statistics.mean([triplet[2] for k,v in results_formatted.items() for triplet in v["main_metrics"] if "flores" in k])
222
+
223
+ results_formatted["mean_multilingual"] = {
224
+ "name": "mean_multilingual (Flores) ↓",
225
+ "lang": "mul",
226
+ "all_metrics": [{"mean": bloom_mean}], # [{name: score}]
227
+ "main_metrics": [("mean", bloom_mean, opt_mean)],
228
+ "type": "text-generation"
229
+ }
230
+
231
+ main_metrics = ([triplet for k,v in results_formatted.items() for triplet in v["main_metrics"]])
232
+
233
+ bloom_best_on, opt_best_on = 0,0
234
+ for (name, bloom, opt) in main_metrics:
235
+ if name[:-2] in ["acc", "em"] or "pass" in name:
236
+ if bloom > opt:
237
+ bloom_best_on += 1
238
+ elif bloom < opt:
239
+ opt_best_on += 1
240
+ elif name[:-2] in ["byte_perplexity"]:
241
+ if bloom < opt:
242
+ bloom_best_on += 1
243
+ elif bloom > opt:
244
+ opt_best_on += 1
245
+ """
246
+ ### Markdown Table ###
247
+
248
+ HEADER = "| Task | Language | Metric | BLOOM-350M | BLOOM-750M | BLOOM-1B3 | BLOOM-2B5 | BLOOM-6B3 | BLOOM-176B |"
249
+ SEP = "|:----|:----|:----|:----:|"
250
+ ONE_LINE = "| {} | {} | {} | {} |"
251
+
252
+ TABLE_STRING = "\n".join([HEADER, SEP])
253
+
254
+ for task_name, res_dict in results_formatted.items():
255
+ for (name, score, score_opt) in res_dict["main_metrics"]:
256
+ TABLE_STRING += "\n" + ONE_LINE.format(
257
+ res_dict["name"],
258
+ res_dict["lang"],
259
+ name,
260
+ round(score, 3),
261
+ round(score_opt, 3),
262
+ )
263
+
264
+ with open("./mdtable.txt", "w") as f:
265
+ f.write(TABLE_STRING)
266
+
267
+
268
+
269
+ ### Metadata ###
270
+
271
+ HEADER = "model-index:"
272
+ MODEL = "- name: bloom"
273
+ RES = " results:"
274
+
275
+ META_STRING = "\n".join([HEADER, MODEL, RES])
276
+
277
+ ONE_TASK = " - task:\n type: {}\n name: {}\n dataset:\n name: {}\n type: {}\n metrics:"
278
+ ONE_METRIC = " - name: {}\n type: {}\n value: {}\n verified: false"
279
+
280
+ for task_name, res_dict in results_formatted.items():
281
+ META_STRING += "\n" + ONE_TASK.format(
282
+ res_dict["type"],
283
+ res_dict["type"].replace("-", " "),
284
+ task_name,
285
+ task_name,
286
+ )
287
+ for (name, score, score_opt) in res_dict["main_metrics"]:
288
+ META_STRING += "\n" + ONE_METRIC.format(
289
+ name.split(" ")[0],
290
+ name.split(" ")[0],
291
+ score
292
+ )
293
+ """
294
+ for metrics in res_dict["all_metrics"]:
295
+ for metric_name, metric in metrics.items():
296
+ if isinstance(metric, str):
297
+ continue
298
+ META_STRING += "\n" + ONE_METRIC.format(
299
+ metric_name,
300
+ metric_name,
301
+ metric
302
+ )
303
+ """
304
+
305
+
306
+ with open("./mdmeta.txt", "w") as f:
307
+ f.write(META_STRING)
bigscience/evaluation/results/tr11/opt/bslmeval.json ADDED
The diff for this file is too large to render. See raw diff
 
bigscience/evaluation/results/tr11/opt/humaneval_temp02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.0, "pass@10": 0.0, "pass@100": 0.0}
bigscience/evaluation/results/tr11/opt/humaneval_temp06.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 3.0487804878048808e-05, "pass@10": 0.0003048780487804881, "pass@100": 0.003048780487804878}
bigscience/evaluation/results/tr11/opt/humaneval_temp08.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.0, "pass@10": 0.0, "pass@100": 0.0}
bigscience/evaluation/results/tr11/scripts/download_bsevalharness.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Downloads the specified taks in the evaluation harness
2
+ # This is particularly useful when running in environments where the GPU nodes
3
+ # do not have internet access. This way we can pre-download them and use the cached data-set during evaluation.
4
+
5
+ from lm_eval import tasks
6
+ from lm_eval.tasks import ALL_TASKS
7
+ import argparse
8
+ import os
9
+
10
+
11
+ parser = argparse.ArgumentParser(description='Download evaluation harness', allow_abbrev=False)
12
+ parser.add_argument('--task_list', type=str, default = "all", help='Either "all" or comma separated list of tasks to download.')
13
+ args = parser.parse_args()
14
+
15
+ def main():
16
+ task_list = ALL_TASKS if args.task_list == 'all' else args.task_list.split(',')
17
+ tasks.get_task_dict_promptsource(task_list)
18
+
19
+ if __name__ == '__main__':
20
+ main()
21
+
bigscience/evaluation/results/tr11/scripts/multi_eurlex_tmp.slurm ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=multieurlex
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --constraint=a100
9
+ #SBATCH --reservation=hug
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ conda activate muennighofflmevalgen
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # defining the right environment variables
22
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
23
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
24
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
25
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+ export TOKENIZERS_PARALLELISM=false
29
+
30
+ # Converted transformer checkpoint
31
+ #MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3/bloom-7b1
32
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmixv2lossseq
33
+
34
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
35
+
36
+ DATASETS_AND_CONFIGS=(
37
+ multi_eurlex_mt,multi,"version-fr-en-source+target"
38
+ multi_eurlex_mt,multi,"version-en-fr-source+target"
39
+ multi_eurlex_mt,multi,"a_good_translation-fr-en-source+target"
40
+ multi_eurlex_mt,multi,"a_good_translation-en-fr-source+target"
41
+ multi_eurlex_mt,multi,"prev_doc-en-fr"
42
+ multi_eurlex_mt,multi,"prev_doc-fr-en"
43
+ )
44
+
45
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
46
+ echo $ARGUMENT
47
+
48
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
49
+
50
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
51
+ python main.py \
52
+ --model_api_name 'hf-causal' \
53
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
54
+ --device cuda \
55
+ --batch_size 16 \
56
+ --no_tracking \
57
+ --task_name $dataset_name \
58
+ --template_names $template_name \
59
+ --bootstrap_iters 10 \
60
+ --num_fewshot 0 \
61
+ --limit 500
62
+
63
+ echo "END TIME: $(date)"
bigscience/evaluation/results/tr11/scripts/report-to-csv.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # this script converts results.json:
4
+ #
5
+ # "results": {
6
+ # "arc_challenge": {
7
+ # "acc": 0.24232081911262798,
8
+ # "acc_stderr": 0.01252159329580012,
9
+ # "acc_norm": 0.2764505119453925,
10
+ # "acc_norm_stderr": 0.013069662474252425
11
+ # },
12
+ #
13
+ # into a format expected by a spreadsheet, which is:
14
+ #
15
+ # task metric value err
16
+ # arc_challenge acc xxx yyy
17
+ # arc_challenge acc_norm xxx yyy
18
+ # arc_challenge f1 xxx yyy
19
+ #
20
+ # usage:
21
+ # report-to-csv.py results.json
22
+
23
+
24
+ import sys
25
+ import json
26
+ import io
27
+ import csv
28
+
29
+ results_file = sys.argv[1]
30
+
31
+ csv_file = results_file.replace("json", "csv")
32
+
33
+ print(f"Converting {results_file} to {csv_file}")
34
+
35
+ with io.open(results_file, 'r', encoding='utf-8') as f:
36
+ results = json.load(f)
37
+
38
+ with io.open(csv_file, 'w', encoding='utf-8') as f:
39
+
40
+ writer = csv.writer(f)
41
+ writer.writerow(["task", "metric", "value", "err", "version"])
42
+
43
+ versions = results["versions"]
44
+
45
+ for k,v in sorted(results["results"].items()):
46
+ if k not in versions:
47
+ versions[k] = -1
48
+
49
+ if "acc" in v:
50
+ writer.writerow([k, "acc", v["acc"], v["acc_stderr"], versions[k]])
51
+ if "acc_norm" in v:
52
+ writer.writerow([k, "acc_norm", v["acc_norm"], v["acc_norm_stderr"], versions[k]])
53
+ if "f1" in v:
54
+ writer.writerow([k, "f1", v["f1"], v["f1_stderr"] if "f1_stderr" in v else "", versions[k]])
55
+ # if "ppl" in v:
56
+ # writer.writerow([k, "ppl", v["ppl"], v["ppl_stderr"], versions[k]])
57
+ # if "em" in v:
58
+ # writer.writerow([k, "em", v["em"], v["em_stderr"] if "em_stderr" in v else "", versions[k]])
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_176b.slurm ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=genbseval
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=1
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ conda activate muennighofflmevalgen
20
+
21
+ echo "START TIME: $(date)"
22
+
23
+ # defining the right environment variables
24
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
25
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
26
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
27
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+ export TOKENIZERS_PARALLELISM=false
31
+
32
+ # Converted transformer checkpoint
33
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/uan68tv-model-conversion/bloom
34
+
35
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
36
+
37
+
38
+ DATASETS_AND_CONFIGS=(
39
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
40
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
41
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
42
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
43
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
44
+ GEM/wiki_lingua_en,en,"article_summary_en"
45
+ GEM/wiki_lingua_en,en,"write_abstract_en"
46
+ GEM/wiki_lingua_en,en,"summarize_above_en"
47
+ GEM/wiki_lingua_en,en,"rephrase_en"
48
+ GEM/wiki_lingua_en,en,"tldr_en"
49
+ GEM/wiki_lingua_es,es,"article_summary_es"
50
+ GEM/wiki_lingua_es,es,"write_abstract_es"
51
+ GEM/wiki_lingua_es,es,"summarize_above_es"
52
+ GEM/wiki_lingua_es,es,"rephrase_es"
53
+ GEM/wiki_lingua_es,es,"tldr_es"
54
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
55
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
56
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
57
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
58
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
59
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
60
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
61
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
62
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
63
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
64
+ GEM/wiki_lingua_id,id,"article_summary_id"
65
+ GEM/wiki_lingua_id,id,"write_abstract_id"
66
+ GEM/wiki_lingua_id,id,"summarize_above_id"
67
+ GEM/wiki_lingua_id,id,"rephrase_id"
68
+ GEM/wiki_lingua_id,id,"tldr_id"
69
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
70
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
71
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
72
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
73
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
74
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
75
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
76
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
77
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
78
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
79
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
80
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
81
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
82
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
83
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
84
+ )
85
+
86
+ DATASETS_AND_CONFIGS=(
87
+ wmt14_fr_en,fr-en,"version-en-fr-target"
88
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
89
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
90
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
91
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
92
+ wmt14_fr_en,fr-en,"version-fr-en-target"
93
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
94
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
95
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
96
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
97
+ )
98
+
99
+ DATASETS_AND_CONFIGS=(
100
+ GEM/web_nlg_en,en,"PALM_prompt"
101
+ GEM/web_nlg_en,en,"explicit-graph-description-2"
102
+ GEM/web_nlg_en,en,"implicit-graph-description"
103
+ GEM/web_nlg_en,en,"non-explicit-description"
104
+ GEM/web_nlg_en,en,"use-category"
105
+ GEM/web_nlg_ru,ru,"PALM_prompt"
106
+ GEM/web_nlg_ru,ru,"explicit-graph-description-2-Russian"
107
+ GEM/web_nlg_ru,ru,"implicit-graph-description-Russian"
108
+ GEM/web_nlg_ru,ru,"non-explicit-description-Russian"
109
+ GEM/web_nlg_ru,ru,"use-category-Russian"
110
+ )
111
+
112
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
113
+ echo $ARGUMENT
114
+
115
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
116
+
117
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
118
+ python main.py \
119
+ --model_api_name 'hf-causal' \
120
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=bfloat16 \
121
+ --device cuda \
122
+ --batch_size 8 \
123
+ --no_tracking \
124
+ --task_name $dataset_name \
125
+ --template_names $template_name \
126
+ --bootstrap_iters 10
127
+
128
+ echo "END TIME: $(date)"
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_350m.slurm ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-generation-350m
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ conda activate muennighofflmevalgen
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # defining the right environment variables
21
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
22
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
23
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
24
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+ export TOKENIZERS_PARALLELISM=false
28
+
29
+ # Converted transformer checkpoint
30
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/commun/experiments/muennighoff/bloomckpt/350m/bloom-350m
31
+
32
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
33
+
34
+ # WMT19 ZH-EN does not work
35
+ DATASETS_AND_CONFIGS=(
36
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
37
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
38
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
39
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
40
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
41
+ GEM/wiki_lingua_en,en,"article_summary_en"
42
+ GEM/wiki_lingua_en,en,"write_abstract_en"
43
+ GEM/wiki_lingua_en,en,"summarize_above_en"
44
+ GEM/wiki_lingua_en,en,"rephrase_en"
45
+ GEM/wiki_lingua_en,en,"tldr_en"
46
+ GEM/wiki_lingua_es,es,"article_summary_es"
47
+ GEM/wiki_lingua_es,es,"write_abstract_es"
48
+ GEM/wiki_lingua_es,es,"summarize_above_es"
49
+ GEM/wiki_lingua_es,es,"rephrase_es"
50
+ GEM/wiki_lingua_es,es,"tldr_es"
51
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
52
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
53
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
54
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
55
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
56
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
57
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
58
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
59
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
60
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
61
+ GEM/wiki_lingua_id,id,"article_summary_id"
62
+ GEM/wiki_lingua_id,id,"write_abstract_id"
63
+ GEM/wiki_lingua_id,id,"summarize_above_id"
64
+ GEM/wiki_lingua_id,id,"rephrase_id"
65
+ GEM/wiki_lingua_id,id,"tldr_id"
66
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
67
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
68
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
69
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
70
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
71
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
72
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
73
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
74
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
75
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
76
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
77
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
78
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
79
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
80
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
81
+ )
82
+
83
+ #GEM/wiki_lingua_ar,ar,"article_summary_ar"
84
+ #GEM/wiki_lingua_ar,ar,"write_abstract_ar"
85
+ #GEM/wiki_lingua_ar,ar,"summarize_above_ar"
86
+ #GEM/wiki_lingua_ar,ar,"rephrase_ar"
87
+ #GEM/wiki_lingua_ar,ar,"tldr_ar"
88
+ #GEM/wiki_lingua_zh,zh,"article_summary_zh"
89
+ #GEM/wiki_lingua_zh,zh,"write_abstract_zh"
90
+ #GEM/wiki_lingua_zh,zh,"summarize_above_zh"
91
+ #GEM/wiki_lingua_zh,zh,"rephrase_zh"
92
+ #GEM/wiki_lingua_zh,zh,"tldr_zh"
93
+
94
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
95
+ echo $ARGUMENT
96
+
97
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
98
+
99
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
100
+ python main.py \
101
+ --model_api_name 'hf-causal' \
102
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
103
+ --device cuda \
104
+ --batch_size 16 \
105
+ --no_tracking \
106
+ --task_name $dataset_name \
107
+ --template_names $template_name \
108
+ --bootstrap_iters 10
109
+
110
+ echo "END TIME: $(date)"
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_6b3.slurm ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=evaluate_t0
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --constraint=a100
9
+ #SBATCH --reservation=hug
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ conda activate muennighofflmevalgen
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # defining the right environment variables
22
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
23
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
24
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
25
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+ export TOKENIZERS_PARALLELISM=false
29
+
30
+ # Converted transformer checkpoint
31
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3/bloom-7b1
32
+
33
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
34
+
35
+ # WMT19 ZH-EN does not work
36
+ DATASETS_AND_CONFIGS=(
37
+ GEM/wiki_lingua_en,en,"article_summary_en"
38
+ GEM/wiki_lingua_en,en,"write_abstract_en"
39
+ GEM/wiki_lingua_en,en,"summarize_above_en"
40
+ GEM/wiki_lingua_en,en,"rephrase_en"
41
+ GEM/wiki_lingua_en,en,"tldr_en"
42
+ GEM/wiki_lingua_es,es,"article_summary_es"
43
+ GEM/wiki_lingua_es,es,"write_abstract_es"
44
+ GEM/wiki_lingua_es,es,"summarize_above_es"
45
+ GEM/wiki_lingua_es,es,"rephrase_es"
46
+ GEM/wiki_lingua_es,es,"tldr_es"
47
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
48
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
49
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
50
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
51
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
52
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
53
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
54
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
55
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
56
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
57
+ GEM/wiki_lingua_id,id,"article_summary_id"
58
+ GEM/wiki_lingua_id,id,"write_abstract_id"
59
+ GEM/wiki_lingua_id,id,"summarize_above_id"
60
+ GEM/wiki_lingua_id,id,"rephrase_id"
61
+ GEM/wiki_lingua_id,id,"tldr_id"
62
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
63
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
64
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
65
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
66
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
67
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
68
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
69
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
70
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
71
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
72
+ )
73
+
74
+ #GEM/wiki_lingua_ar,ar,"article_summary_ar"
75
+ #GEM/wiki_lingua_ar,ar,"write_abstract_ar"
76
+ #GEM/wiki_lingua_ar,ar,"summarize_above_ar"
77
+ #GEM/wiki_lingua_ar,ar,"rephrase_ar"
78
+ #GEM/wiki_lingua_ar,ar,"tldr_ar"
79
+ #GEM/wiki_lingua_zh,zh,"article_summary_zh"
80
+ #GEM/wiki_lingua_zh,zh,"write_abstract_zh"
81
+ #GEM/wiki_lingua_zh,zh,"summarize_above_zh"
82
+ #GEM/wiki_lingua_zh,zh,"rephrase_zh"
83
+ #GEM/wiki_lingua_zh,zh,"tldr_zh"
84
+
85
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
86
+ echo $ARGUMENT
87
+
88
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
89
+
90
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
91
+ python main.py \
92
+ --model_api_name 'hf-causal' \
93
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
94
+ --device cuda \
95
+ --batch_size 16 \
96
+ --no_tracking \
97
+ --task_name $dataset_name \
98
+ --template_names $template_name \
99
+ --bootstrap_iters 10
100
+
101
+ echo "END TIME: $(date)"
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_generation_760m.slurm ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-generation-760m
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ conda activate muennighofflmevalgen
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # defining the right environment variables
21
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
22
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
23
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
24
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+ export TOKENIZERS_PARALLELISM=false
28
+
29
+ # Converted transformer checkpoint
30
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/760m/bloom-760m
31
+
32
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
33
+
34
+ # WMT19 ZH-EN does not work
35
+ DATASETS_AND_CONFIGS=(
36
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
37
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
38
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
39
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
40
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
41
+ GEM/wiki_lingua_en,en,"article_summary_en"
42
+ GEM/wiki_lingua_en,en,"write_abstract_en"
43
+ GEM/wiki_lingua_en,en,"summarize_above_en"
44
+ GEM/wiki_lingua_en,en,"rephrase_en"
45
+ GEM/wiki_lingua_en,en,"tldr_en"
46
+ GEM/wiki_lingua_es,es,"article_summary_es"
47
+ GEM/wiki_lingua_es,es,"write_abstract_es"
48
+ GEM/wiki_lingua_es,es,"summarize_above_es"
49
+ GEM/wiki_lingua_es,es,"rephrase_es"
50
+ GEM/wiki_lingua_es,es,"tldr_es"
51
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
52
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
53
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
54
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
55
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
56
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
57
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
58
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
59
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
60
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
61
+ GEM/wiki_lingua_id,id,"article_summary_id"
62
+ GEM/wiki_lingua_id,id,"write_abstract_id"
63
+ GEM/wiki_lingua_id,id,"summarize_above_id"
64
+ GEM/wiki_lingua_id,id,"rephrase_id"
65
+ GEM/wiki_lingua_id,id,"tldr_id"
66
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
67
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
68
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
69
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
70
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
71
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
72
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
73
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
74
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
75
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
76
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
77
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
78
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
79
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
80
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
81
+ )
82
+
83
+ #GEM/wiki_lingua_ar,ar,"article_summary_ar"
84
+ #GEM/wiki_lingua_ar,ar,"write_abstract_ar"
85
+ #GEM/wiki_lingua_ar,ar,"summarize_above_ar"
86
+ #GEM/wiki_lingua_ar,ar,"rephrase_ar"
87
+ #GEM/wiki_lingua_ar,ar,"tldr_ar"
88
+ #GEM/wiki_lingua_zh,zh,"article_summary_zh"
89
+ #GEM/wiki_lingua_zh,zh,"write_abstract_zh"
90
+ #GEM/wiki_lingua_zh,zh,"summarize_above_zh"
91
+ #GEM/wiki_lingua_zh,zh,"rephrase_zh"
92
+ #GEM/wiki_lingua_zh,zh,"tldr_zh"
93
+
94
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
95
+ echo $ARGUMENT
96
+
97
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
98
+
99
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
100
+ python main.py \
101
+ --model_api_name 'hf-causal' \
102
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
103
+ --device cuda \
104
+ --batch_size 16 \
105
+ --no_tracking \
106
+ --task_name $dataset_name \
107
+ --template_names $template_name \
108
+ --bootstrap_iters 10
109
+
110
+ echo "END TIME: $(date)"
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11-176b-ml.slurm ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11-176b-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # a unique identifier for the current eval ideally correspnding to the modelname
23
+ VARIANT="tr11-176b-ml-bsevalharness"
24
+
25
+
26
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step90000
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+
36
+ cd $MEGATRON_DEEPSPEED_REPO
37
+
38
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
39
+
40
+ PP_SIZE=8
41
+ TP_SIZE=1
42
+ SEQ_LEN=2048
43
+
44
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
45
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
46
+ EVAL_MICRO_BATCH_SIZE=1
47
+
48
+ #dummy arguments to make megatron happy.
49
+ MEGATRON_REQUIRED_ARGS=" \
50
+ --num-layers -1 \
51
+ --hidden-size -1 \
52
+ --num-attention-heads -1 \
53
+ --seq-length -1 \
54
+ --max-position-embeddings -1 \
55
+ "
56
+
57
+
58
+ ZERO_STAGE=0
59
+
60
+ config_json="./ds_config.json"
61
+
62
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
63
+ cat <<EOT > $config_json
64
+ {
65
+ "train_micro_batch_size_per_gpu": 1,
66
+ "train_batch_size": 1,
67
+ "gradient_clipping": 1.0,
68
+ "zero_optimization": {
69
+ "stage": $ZERO_STAGE
70
+ },
71
+ "bf16": {
72
+ "enabled": true
73
+ },
74
+ "steps_per_print": 2000,
75
+ "wall_clock_breakdown": false
76
+ }
77
+ EOT
78
+
79
+
80
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
81
+ --load $CHECKPOINT_PATH \
82
+ --results_path $VARIANT-results.json \
83
+ --tensor-model-parallel-size $TP_SIZE \
84
+ --pipeline-model-parallel-size $PP_SIZE \
85
+ --tokenizer-type PretrainedFromHF \
86
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
87
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
88
+ --no-load-optim \
89
+ --no-load-rng \
90
+ --bf16 \
91
+ --inference \
92
+ --seq-length $SEQ_LEN \
93
+ --task_list wnli \
94
+ --deepspeed \
95
+ --deepspeed_config ds_config.json \
96
+ --intermed_results \
97
+ --adaptive_seq_len \
98
+ --micro_bs_multiplier 16 \
99
+ --offloadearly \
100
+ $MEGATRON_REQUIRED_ARGS \
101
+ "
102
+
103
+ GPUS_PER_NODE=8
104
+ NNODES=$SLURM_NNODES
105
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
106
+ MASTER_PORT=6000
107
+ export LAUNCHER="python -u -m torch.distributed.run \
108
+ --nproc_per_node $GPUS_PER_NODE \
109
+ --nnodes $NNODES \
110
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
111
+ --rdzv_backend c10d \
112
+ --max_restarts 0 \
113
+ --tee 3 \
114
+ "
115
+
116
+ export CUDA_LAUNCH_BLOCKING=1
117
+
118
+ echo $LAUNCHER $CMD
119
+
120
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
121
+
122
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11b-1b3-ml.slurm ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11b-1b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # a unique identifier for the current eval ideally correspnding to the modelname
23
+ VARIANT="tr11b-1b3-ml-bsevalharness"
24
+
25
+
26
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11b-1B3-ml/checkpoints/main/global_step340500
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+ export TOKENIZERS_PARALLELISM=false
36
+
37
+ cd $MEGATRON_DEEPSPEED_REPO
38
+
39
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
40
+
41
+ PP_SIZE=1
42
+ TP_SIZE=1
43
+ SEQ_LEN=2048
44
+
45
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
46
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
47
+ EVAL_MICRO_BATCH_SIZE=1
48
+
49
+ #dummy arguments to make megatron happy.
50
+ MEGATRON_REQUIRED_ARGS=" \
51
+ --num-layers -1 \
52
+ --hidden-size -1 \
53
+ --num-attention-heads -1 \
54
+ --seq-length -1 \
55
+ --max-position-embeddings -1 \
56
+ "
57
+
58
+
59
+ ZERO_STAGE=0
60
+
61
+ config_json="./ds_config.json"
62
+
63
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
64
+ cat <<EOT > $config_json
65
+ {
66
+ "train_micro_batch_size_per_gpu": 1,
67
+ "train_batch_size": 1,
68
+ "gradient_clipping": 1.0,
69
+ "zero_optimization": {
70
+ "stage": $ZERO_STAGE
71
+ },
72
+ "bf16": {
73
+ "enabled": false
74
+ },
75
+ "steps_per_print": 2000,
76
+ "wall_clock_breakdown": false
77
+ }
78
+ EOT
79
+
80
+
81
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
82
+ --load $CHECKPOINT_PATH \
83
+ --results_path $VARIANT-results.json \
84
+ --tensor-model-parallel-size $TP_SIZE \
85
+ --pipeline-model-parallel-size $PP_SIZE \
86
+ --tokenizer-type PretrainedFromHF \
87
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
88
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
89
+ --no-load-optim \
90
+ --no-load-rng \
91
+ --inference \
92
+ --seq-length $SEQ_LEN \
93
+ --task_list axb,axg,boolq,cb,cola,copa,crows_pairs_english,crows_pairs_french,diabla,e2e_nlg_cleaned,mnli,mnli_mismatched,multirc,piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
94
+ --eval_fp32 \
95
+ --deepspeed \
96
+ --deepspeed_config ds_config.json \
97
+ --intermed_results \
98
+ --adaptive_seq_len \
99
+ --micro_bs_multiplier 8 \
100
+ $MEGATRON_REQUIRED_ARGS \
101
+ "
102
+
103
+ GPUS_PER_NODE=1
104
+ NNODES=$SLURM_NNODES
105
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
106
+ MASTER_PORT=6000
107
+ export LAUNCHER="python -u -m torch.distributed.run \
108
+ --nproc_per_node $GPUS_PER_NODE \
109
+ --nnodes $NNODES \
110
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
111
+ --rdzv_backend c10d \
112
+ --max_restarts 0 \
113
+ --tee 3 \
114
+ "
115
+
116
+ export CUDA_LAUNCH_BLOCKING=1
117
+
118
+ echo $LAUNCHER $CMD
119
+
120
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
121
+
122
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11d-750m-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11d-760m-ml
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # a unique identifier for the current eval ideally correspnding to the modelname
21
+ VARIANT="tr11d-760m-ml-bsevalharness"
22
+
23
+
24
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11d-760M-ml/checkpoints/main/global_step660750
25
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bslmeval/Megatron-DeepSpeed
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+
29
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
30
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
31
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
32
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
33
+ export TOKENIZERS_PARALLELISM=false
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --inference \
90
+ --seq-length $SEQ_LEN \
91
+ --task_list axb,axg,boolq,cb,cola,copa,crows_pairs_english,crows_pairs_french,diabla,e2e_nlg_cleaned,mnli,mnli_mismatched,multirc,piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
92
+ --eval_fp32 \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 4 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6002
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_bsevalharness_tr11f-6b3-ml.slurm ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11f-6b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # a unique identifier for the current eval ideally correspnding to the modelname
23
+ VARIANT="tr11f-6b3-ml-bsevalharness"
24
+
25
+
26
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml/checkpoints/main/global_step337500
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bslmeval/Megatron-DeepSpeed
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+ export TOKENIZERS_PARALLELISM=false
36
+
37
+ cd $MEGATRON_DEEPSPEED_REPO
38
+
39
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
40
+
41
+ PP_SIZE=1
42
+ TP_SIZE=1
43
+ SEQ_LEN=2048
44
+
45
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
46
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
47
+ EVAL_MICRO_BATCH_SIZE=1
48
+
49
+ #dummy arguments to make megatron happy.
50
+ MEGATRON_REQUIRED_ARGS=" \
51
+ --num-layers -1 \
52
+ --hidden-size -1 \
53
+ --num-attention-heads -1 \
54
+ --seq-length -1 \
55
+ --max-position-embeddings -1 \
56
+ "
57
+
58
+
59
+ ZERO_STAGE=0
60
+
61
+ config_json="./ds_config.json"
62
+
63
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
64
+ cat <<EOT > $config_json
65
+ {
66
+ "train_micro_batch_size_per_gpu": 1,
67
+ "train_batch_size": 1,
68
+ "gradient_clipping": 1.0,
69
+ "zero_optimization": {
70
+ "stage": $ZERO_STAGE
71
+ },
72
+ "bf16": {
73
+ "enabled": false
74
+ },
75
+ "steps_per_print": 2000,
76
+ "wall_clock_breakdown": false
77
+ }
78
+ EOT
79
+
80
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
81
+ --load $CHECKPOINT_PATH \
82
+ --results_path $VARIANT-results.json \
83
+ --tensor-model-parallel-size $TP_SIZE \
84
+ --pipeline-model-parallel-size $PP_SIZE \
85
+ --tokenizer-type PretrainedFromHF \
86
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
87
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
88
+ --no-load-optim \
89
+ --no-load-rng \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list axb,axg,boolq,cb,cola,copa,crows_pairs_english,crows_pairs_french,diabla,e2e_nlg_cleaned,mnli,mnli_mismatched,multirc,piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
93
+ --eval_fp32 \
94
+ --deepspeed \
95
+ --deepspeed_config ds_config.json \
96
+ --intermed_results \
97
+ --adaptive_seq_len \
98
+ --micro_bs_multiplier 8 \
99
+ $MEGATRON_REQUIRED_ARGS \
100
+ "
101
+
102
+ GPUS_PER_NODE=1
103
+ NNODES=$SLURM_NNODES
104
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
105
+ MASTER_PORT=6000
106
+ export LAUNCHER="python -u -m torch.distributed.run \
107
+ --nproc_per_node $GPUS_PER_NODE \
108
+ --nnodes $NNODES \
109
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
110
+ --rdzv_backend c10d \
111
+ --max_restarts 0 \
112
+ --tee 3 \
113
+ "
114
+
115
+ export CUDA_LAUNCH_BLOCKING=1
116
+
117
+ echo $LAUNCHER $CMD
118
+
119
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
120
+
121
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_evalharness_deepspeed.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to run lm-eval on Megatron-DeepSpeed checkpoint using the original setup
2
+
3
+ This particular setup uses the normal deepspeed checkpoint and requires no conversion to Megatron-LM.
4
+
5
+ This doc assumes usage on JZ, so some peculiar requirements in places. Ignore these if you're not running this on JZ.
6
+
7
+ ## Prerequisites
8
+
9
+ 1. Install software
10
+
11
+ On login console with external network
12
+
13
+ Get lm-eval harness (https://github.com/EleutherAI/lm-evaluation-harness) and `best-download==0.0.7` needed to download some tasks.
14
+ ```
15
+ start-prod
16
+ pip install best-download==0.0.7
17
+ pip install git+https://github.com/EleutherAI/lm-evaluation-harness
18
+ ```
19
+
20
+ 2. Pre-download needed datasets
21
+
22
+ some symlinks due to lm-harness' issues with relative position of data
23
+ ```
24
+ mkdir data
25
+ ln -s `pwd`/data tasks/eval_harness/data
26
+ ```
27
+ Also make sure `data` is not on one of the limited paritions like WORKSF.
28
+
29
+ Then install datasets for the tasks:
30
+ ```
31
+ python ./tasks/eval_harness/download.py --task_list
32
+ arc_challenge,arc_easy,boolq,copa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc
33
+ ```
34
+ and make sure that `export HF_DATASETS_OFFLINE=1`
35
+
36
+ If there are things like custom tokenizers, pre-download those too, e.g.:
37
+
38
+ ```
39
+ python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bigscience/oscar_13_languages_alpha_weight')"
40
+ ```
41
+ and make sure that `export TRANSFORMERS_OFFLINE=1` is in the script.
42
+ You know there is a custom tokenizer if the training script had something like:
43
+
44
+ ```
45
+ --tokenizer-type PretrainedFromHF \
46
+ --tokenizer-name-or-path bigscience/oscar_13_languages_alpha_weight \
47
+ ```
48
+
49
+ 3. Prepare the slurm script
50
+
51
+ Prepare the run script, replace `variant` with a unique identifier for the current eval so that multiple evals could run in parallel and not all log into the same `results.json` file. so, e.g., `tr9c-1B3-swiglu`
52
+
53
+ ```
54
+ cp examples/run_evalharness_deepspeed.slurm run_evalharness-variant.slurm
55
+ ```
56
+
57
+ now edit `run_evalharness-variant.slurm`
58
+
59
+
60
+ Note that the eval code knows to pull the original training args from the checkpoint, so we don't need to pass any of those. And we just need to setup the evaluation args.
61
+
62
+ Note that for the bigscience lm-eval-harness fork (https://github.com/bigscience-workshop/lm-evaluation-harness), the corresponding scripts are `evaluate_bsevalharness.py` & `run_bsevalharness_tr11-176b-ml.slurm`.
63
+
64
+ 1. Edit:
65
+
66
+ ```
67
+ PP_SIZE=1
68
+ TP_SIZE=1
69
+ ```
70
+ to match the eval topology. If the model fits into 1 gpu, then there is nothing to change.
71
+
72
+ The eval script will automatically reshape the model if it was of a different topology.
73
+
74
+
75
+ 2. Adjust the following to fit the chosen GPU. As of last check for 1.3B model the settings are one of:
76
+ ```
77
+ EVAL_MICRO_BATCH_SIZE=6 # 16GB GPU 1.3B model
78
+ EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
79
+ ```
80
+
81
+ If you get OOM lower it further.
82
+
83
+ 3. If not using the Deepspeed path, disable it by removing:
84
+
85
+ ```
86
+ --deepspeed \
87
+ --deepspeed_config ds_config.json \
88
+ ```
89
+
90
+ If you didn't disable it and the program crashed on checkpoint loading unable to find some key, disable deepspeed as explained above.
91
+
92
+ 4. Additional flags
93
+
94
+ - To reduce the amount of iterations for stderr estimation, use e.g. `--bootstrap_iters 2`. This saves 1-2 minutes per dataset.
95
+ - To print intermediate results when running multiple tasks use `--intermed_results`.
96
+ - To reduce the bubble when setting PP use the flag `--micro_bs_multiplier`. Reducing `--micro-batch-size` may be needed when increasing the multiplier.
97
+ - Running the 176B model with PP=8, `--micro_bs_multiplier 8` & `--micro-batch-size 4` produced the fastest results for PiQA on 1 node in 2min18s.
98
+
99
+ ## Eval
100
+
101
+ Currently it takes 2-3 hours to run on 32GB for 1.3B model, 6-7h for 16GB GPU, so a 20h slurm job should be enough.
102
+
103
+ When ready, launch:
104
+ ```
105
+ sbatch ./run_evalharness-variant.slurm
106
+ ```
107
+
108
+ To monitor progress:
109
+ ```
110
+ tail -f tail -f $VARIANT-eval-harness.log
111
+ ```
112
+ where the variant is what you set `$VARIANT` to in the slurm script.
113
+
114
+ The template is set up for 16GB gpu since they are easier to get by. If you change to 32GB, adjust:
115
+ ```
116
+ #SBATCH --constraint=v100-32g
117
+ ...
118
+ EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
119
+ ```
120
+
121
+
122
+ Note that the original ETA at the start of the run can be 10x too longer than the actual outcome. For example it may suggest 18 hours but will complete in 2 hours.
123
+
124
+
125
+ ## Short eval
126
+
127
+ if you just want to quickly test that everything can run to the end, edit `tasks/eval_harness/evaluate.py`, e.g. to run only 10 batches:
128
+ ```
129
+ - results = evaluator.evaluate(adaptor, task_dict, False, 0, None)
130
+ + results = evaluator.evaluate(adaptor, task_dict, False, 0, 10)
131
+ ```
132
+
133
+ (XXX: could be a cmd line option so that code won't need to be modified)
134
+
135
+
136
+ ## Import into spreadsheet
137
+
138
+ https://docs.google.com/spreadsheets/d/1CI8Q9RCblLRzUOPJ6ViqBmo284-8ojluQ-CmaEuhuv0/edit?usp=sharing
139
+
140
+ Note that the spreadsheet format is quite different, so use this script:
141
+ ```
142
+ ./tasks/eval_harness/report-to-csv.py results.json
143
+ ```
144
+ to reformat the json results into csv while changing its shape to match the spreadsheet format
145
+
146
+ Since some records might be missing or extraneous here is the best way to do it:
147
+
148
+ 1. copy the data from first 2 columns to some place under the main spreadsheet
149
+
150
+ 2. put the pointer to the 3rd column next to where the 2 first columns were copied.
151
+
152
+ 3. import `results.csv` using file-> import -> file ->
153
+
154
+ Import location: Replace data at selected cell
155
+
156
+ 4. Now it should be easy to align the new records with the old ones - delete irrelevant records and Insert->Cells where data is missing until the first 2 columns match
157
+
158
+ 5. now create 2 cols in the main table on top and now it should be safe to Copy-n-Paste the 2-col data range, without the task/metrics columns into the newly created space.
bigscience/evaluation/results/tr11/scripts/run_evalharness_deepspeed.slurm ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=eval-harness-deepspeed
3
+ #SBATCH --constraint=v100-16g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@gpu
12
+
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-prod
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # a unique identifier for the current eval so that multiple evals could run in parallel and not all log into the same "results.json" file.
21
+ VARIANT="tr9c-1B3-swiglu"
22
+
23
+ CHECKPOINT_PATH=/gpfsdsstore/projects/rech/six/commun/checkpoints/tr3m-1B3-emb-norm-pile/global_step296023
24
+ MEGATRON_DEEPSPEED_REPO=/gpfsssd/worksf/projects/rech/six/commun/code/eval/Megatron-DeepSpeed
25
+
26
+ # you want these 2 on JZ, and pre-download/cache any datasets/tokenizers/models
27
+ # but comment these out if you're running on a node with Internet access
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ # eval topology
34
+ PP_SIZE=1
35
+ TP_SIZE=1
36
+
37
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
38
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
39
+ SEQ_LEN=2048
40
+
41
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
42
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
43
+
44
+ EVAL_MICRO_BATCH_SIZE=6 # 16GB GPU 1.3B model
45
+ #EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
46
+
47
+
48
+ #dummy arguments to make megatron happy.
49
+ MEGATRON_REQUIRED_ARGS=" \
50
+ --num-layers -1 \
51
+ --hidden-size -1 \
52
+ --num-attention-heads -1 \
53
+ --seq-length -1 \
54
+ --max-position-embeddings -1
55
+ "
56
+
57
+
58
+ ZERO_STAGE=0
59
+
60
+ config_json="./ds_config.json"
61
+ cat <<EOT > $config_json
62
+ {
63
+ "train_micro_batch_size_per_gpu": 1,
64
+ "train_batch_size": 1,
65
+ "zero_optimization": { "stage": $ZERO_STAGE },
66
+ "fp16": { "enabled": true },
67
+ "steps_per_print": 2000,
68
+ "wall_clock_breakdown": false
69
+ }
70
+ EOT
71
+
72
+ CMD="./tasks/eval_harness/evaluate.py \
73
+ --load $CHECKPOINT_PATH \
74
+ --results_path $VARIANT-results.json \
75
+ --tensor-model-parallel-size $TP_SIZE \
76
+ --pipeline-model-parallel-size $PP_SIZE \
77
+ --vocab-file $VOCAB_FILE \
78
+ --merge-file $MERGE_FILE \
79
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
80
+ --no-load-optim \
81
+ --no-load-rng \
82
+ --inference \
83
+ --deepspeed \
84
+ --deepspeed_config ds_config.json \
85
+ --seq-length $SEQ_LEN \
86
+ --adaptive_seq_len \
87
+ --eval_fp32 \
88
+ --task_list arc_challenge,arc_easy,boolq,copa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sst,webqs,wic,winogrande,wnli,wsc,triviaqa,sciq \
89
+ $MEGATRON_REQUIRED_ARGS \
90
+ "
91
+
92
+ N_GPUS=1
93
+ LAUNCHER="deepspeed --num_gpus $N_GPUS"
94
+ echo $LAUNCHER $CMD
95
+
96
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
97
+
98
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_evalharness_tr11b-1b3-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11b-2b5-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11b-1b3-ml-evalharness"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11b-1B3-ml/checkpoints/main/global_step340500
26
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --eval_fp32 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 8 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6000
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
bigscience/evaluation/results/tr11/scripts/run_evalharness_tr11c-2b5-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11b-2b5-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11b-2b5-ml-evalharness"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml/checkpoints/main/global_step337250
26
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --eval_fp32 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 8 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6000
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log