nishimura999
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -23,8 +23,8 @@ This llama model was trained 2x faster with [Unsloth](https://github.com/unsloth
|
|
23 |
|
24 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
25 |
|
26 |
-
|
27 |
-
|
28 |
```python
|
29 |
from transformers import (
|
30 |
AutoModelForCausalLM,
|
@@ -36,7 +36,7 @@ from tqdm import tqdm
|
|
36 |
import json
|
37 |
```
|
38 |
|
39 |
-
|
40 |
```python
|
41 |
# Hugging Faceで取得したToken
|
42 |
HF_TOKEN = "{Your hagging face token}"
|
@@ -45,7 +45,7 @@ HF_TOKEN = "{Your hagging face token}"
|
|
45 |
model_name = "nishimura999/llm-jp-3-13b-finetune-v100"
|
46 |
```
|
47 |
|
48 |
-
|
49 |
```python
|
50 |
# QLoRA config
|
51 |
bnb_config = BitsAndBytesConfig(
|
@@ -55,7 +55,7 @@ bnb_config = BitsAndBytesConfig(
|
|
55 |
bnb_4bit_use_double_quant=False,
|
56 |
)
|
57 |
```
|
58 |
-
|
59 |
```python
|
60 |
# Load model
|
61 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -69,7 +69,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
69 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token = HF_TOKEN)
|
70 |
```
|
71 |
|
72 |
-
|
73 |
```python
|
74 |
# データセットの読み込み。
|
75 |
datasets = []
|
@@ -83,7 +83,7 @@ with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
|
83 |
item = ""
|
84 |
```
|
85 |
|
86 |
-
|
87 |
```python
|
88 |
results = []
|
89 |
for data in tqdm(datasets):
|
@@ -108,7 +108,7 @@ for data in tqdm(datasets):
|
|
108 |
results.append({"task_id": data["task_id"], "input": input, "output": output})
|
109 |
```
|
110 |
|
111 |
-
|
112 |
```python
|
113 |
import re
|
114 |
model_name = re.sub(".*/", "", model_name)
|
|
|
23 |
|
24 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
25 |
|
26 |
+
# usage
|
27 |
+
## -import
|
28 |
```python
|
29 |
from transformers import (
|
30 |
AutoModelForCausalLM,
|
|
|
36 |
import json
|
37 |
```
|
38 |
|
39 |
+
## -setting
|
40 |
```python
|
41 |
# Hugging Faceで取得したToken
|
42 |
HF_TOKEN = "{Your hagging face token}"
|
|
|
45 |
model_name = "nishimura999/llm-jp-3-13b-finetune-v100"
|
46 |
```
|
47 |
|
48 |
+
## -confing
|
49 |
```python
|
50 |
# QLoRA config
|
51 |
bnb_config = BitsAndBytesConfig(
|
|
|
55 |
bnb_4bit_use_double_quant=False,
|
56 |
)
|
57 |
```
|
58 |
+
## -load
|
59 |
```python
|
60 |
# Load model
|
61 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
69 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token = HF_TOKEN)
|
70 |
```
|
71 |
|
72 |
+
## -dataset
|
73 |
```python
|
74 |
# データセットの読み込み。
|
75 |
datasets = []
|
|
|
83 |
item = ""
|
84 |
```
|
85 |
|
86 |
+
## -generate
|
87 |
```python
|
88 |
results = []
|
89 |
for data in tqdm(datasets):
|
|
|
108 |
results.append({"task_id": data["task_id"], "input": input, "output": output})
|
109 |
```
|
110 |
|
111 |
+
## -output
|
112 |
```python
|
113 |
import re
|
114 |
model_name = re.sub(".*/", "", model_name)
|