kyujinpy commited on
Commit
2b30f1a
·
1 Parent(s): 2c89156

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -50,7 +50,7 @@ license: cc-by-nc-4.0
50
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.7388 | 0.7626 | 0.7808 | 0.7979 |
51
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.7436 | 0.7927 | 0.8037 | 0.8259 |
52
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.5916 | 0.6437 | 0.6474 | 0.6677 |
53
- | KO-platypus2-13B | NaN | NaN | NaN | NaN |
54
  > Question Answering (QA)
55
 
56
  ### HellaSwag (F1)
@@ -65,7 +65,7 @@ license: cc-by-nc-4.0
65
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4518 | 0.466751 | 0.4726 | 0.4828 |
66
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4562 | 0.4657 | 0.4698 | 0.4774 |
67
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.4136 | 0.4320 | 0.4315 | 0.4426 |
68
- | KO-platypus2-13B | NaN | NaN | NaN | NaN |
69
  > Natural Language Inference (NLI; 자연어 추론 평가)
70
 
71
  ### BoolQ (F1)
@@ -80,7 +80,7 @@ license: cc-by-nc-4.0
80
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.3607 | 0.679743 | 0.6801 | 0.6622 |
81
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.5786 | 0.6977 | 0.7084 | 0.7144 |
82
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.3428 | 0.7499 | 0.7957 | 0.7676 |
83
- | KO-platypus2-13B | NaN | NaN | NaN | NaN |
84
  > Question Answering (QA)
85
 
86
  ### SentiNeg (F1)
@@ -95,7 +95,7 @@ license: cc-by-nc-4.0
95
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4855 | 0.8295 | 0.8711 | 0.8513 |
96
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4594 | 0.7611 | 0.7276 | 0.9370 |
97
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.5969 | 0.7674 | 0.8032 | 0.8939 |
98
- | KO-platypus2-13B | NaN | NaN | NaN | NaN |
99
  > Classification
100
 
101
  # Implementation Code
@@ -104,7 +104,7 @@ license: cc-by-nc-4.0
104
  from transformers import AutoModelForCausalLM, AutoTokenizer
105
  import torch
106
 
107
- repo = "kyujinpy/KOrca-Platypus2-13B"
108
  ko_platypus = AutoModelForCausalLM.from_pretrained(
109
  repo,
110
  return_dict=True,
 
50
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.7388 | 0.7626 | 0.7808 | 0.7979 |
51
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.7436 | 0.7927 | 0.8037 | 0.8259 |
52
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.5916 | 0.6437 | 0.6474 | 0.6677 |
53
+ | KO-platypus2-13B | 0.5820 | 0.6269 | 0.6267 | 0.6527 |
54
  > Question Answering (QA)
55
 
56
  ### HellaSwag (F1)
 
65
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4518 | 0.466751 | 0.4726 | 0.4828 |
66
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4562 | 0.4657 | 0.4698 | 0.4774 |
67
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.4136 | 0.4320 | 0.4315 | 0.4426 |
68
+ | KO-platypus2-13B | 0.3912 | 0.4129 | 0.4144 | 0.4330 |
69
  > Natural Language Inference (NLI; 자연어 추론 평가)
70
 
71
  ### BoolQ (F1)
 
80
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.3607 | 0.679743 | 0.6801 | 0.6622 |
81
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.5786 | 0.6977 | 0.7084 | 0.7144 |
82
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.3428 | 0.7499 | 0.7957 | 0.7676 |
83
+ | KO-platypus2-13B | 0.3539 | 0.7168 | 0.7328 | 0.7172 |
84
  > Question Answering (QA)
85
 
86
  ### SentiNeg (F1)
 
95
  | [Llama-2-Ko-7b 20B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4855 | 0.8295 | 0.8711 | 0.8513 |
96
  | [Llama-2-Ko-7b 40B](https://huggingface.co/beomi/llama-2-ko-7b) | 0.4594 | 0.7611 | 0.7276 | 0.9370 |
97
  | [Platypus2-13B](https://huggingface.co/garage-bAInd/Platypus2-13B) | 0.5969 | 0.7674 | 0.8032 | 0.8939 |
98
+ | KO-platypus2-13B | 0.5216 | 0.8236 | 0.8487 | 0.8789 |
99
  > Classification
100
 
101
  # Implementation Code
 
104
  from transformers import AutoModelForCausalLM, AutoTokenizer
105
  import torch
106
 
107
+ repo = "kyujinpy/KO-Platypus2-13B"
108
  ko_platypus = AutoModelForCausalLM.from_pretrained(
109
  repo,
110
  return_dict=True,