Maciek commited on
Commit
5f91064
1 Parent(s): ef22351

Fix Google Colab notebook 2024-05 (#1662) [skip ci]

Browse files

* include mlflow installation in the colab notebook

Without explicitly installing mlflow the `accelerate launch` command fails.

* update the colab noteboko to use the latest tinyllama config

examples/colab-notebooks/colab-axolotl-example.ipynb CHANGED
@@ -1,216 +1,223 @@
1
  {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "id": "AKjdG7tbTb-n"
7
- },
8
- "source": [
9
- "# Example notebook for running Axolotl on google colab"
10
- ]
11
- },
12
- {
13
- "cell_type": "code",
14
- "execution_count": null,
15
- "metadata": {
16
- "id": "RcbNpOgWRcii"
17
- },
18
- "outputs": [],
19
- "source": [
20
- "import torch\n",
21
- "# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\n",
22
- "assert (torch.cuda.is_available()==True)"
23
- ]
24
- },
25
- {
26
- "cell_type": "markdown",
27
- "metadata": {
28
- "id": "h3nLav8oTRA5"
29
- },
30
- "source": [
31
- "## Install Axolotl and dependencies"
32
- ]
33
- },
34
- {
35
- "cell_type": "code",
36
- "execution_count": null,
37
- "metadata": {
38
- "colab": {
39
- "base_uri": "https://localhost:8080/"
40
- },
41
- "id": "3c3yGAwnOIdi",
42
- "outputId": "e3777b5a-40ef-424f-e181-62dfecd1dd01"
43
- },
44
- "outputs": [],
45
- "source": [
46
- "!pip install torch==\"2.1.2\"\n",
47
- "!pip install -e git+https://github.com/OpenAccess-AI-Collective/axolotl#egg=axolotl\n",
48
- "!pip install flash-attn==\"2.5.0\"\n",
49
- "!pip install deepspeed==\"0.13.1\""
50
- ]
51
- },
52
- {
53
- "cell_type": "markdown",
54
- "metadata": {
55
- "id": "BW2MFr7HTjub"
56
- },
57
- "source": [
58
- "## Create an yaml config file"
59
- ]
60
- },
61
- {
62
- "cell_type": "code",
63
- "execution_count": null,
64
- "metadata": {
65
- "id": "9pkF2dSoQEUN"
66
- },
67
- "outputs": [],
68
- "source": [
69
- "import yaml\n",
70
- "\n",
71
- "# Your YAML string\n",
72
- "yaml_string = \"\"\"\n",
73
- "base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T\n",
74
- "model_type: LlamaForCausalLM\n",
75
- "tokenizer_type: LlamaTokenizer\n",
76
- "is_llama_derived_model: true\n",
77
- "\n",
78
- "load_in_8bit: false\n",
79
- "load_in_4bit: true\n",
80
- "strict: false\n",
81
- "\n",
82
- "datasets:\n",
83
- " - path: mhenrichsen/alpaca_2k_test\n",
84
- " type: alpaca\n",
85
- "dataset_prepared_path:\n",
86
- "val_set_size: 0.05\n",
87
- "output_dir: ./outputs/qlora-out\n",
88
- "\n",
89
- "adapter: qlora\n",
90
- "lora_model_dir:\n",
91
- "\n",
92
- "sequence_len: 1096\n",
93
- "sample_packing: true\n",
94
- "pad_to_sequence_len: true\n",
95
- "\n",
96
- "lora_r: 32\n",
97
- "lora_alpha: 16\n",
98
- "lora_dropout: 0.05\n",
99
- "lora_target_modules:\n",
100
- "lora_target_linear: true\n",
101
- "lora_fan_in_fan_out:\n",
102
- "\n",
103
- "wandb_project:\n",
104
- "wandb_entity:\n",
105
- "wandb_watch:\n",
106
- "wandb_name:\n",
107
- "wandb_log_model:\n",
108
- "\n",
109
- "mlflow_experiment_name: colab-example\n",
110
- "\n",
111
- "gradient_accumulation_steps: 1\n",
112
- "micro_batch_size: 1\n",
113
- "num_epochs: 4\n",
114
- "max_steps: 20\n",
115
- "optimizer: paged_adamw_32bit\n",
116
- "lr_scheduler: cosine\n",
117
- "learning_rate: 0.0002\n",
118
- "\n",
119
- "train_on_inputs: false\n",
120
- "group_by_length: false\n",
121
- "bf16: false\n",
122
- "fp16: true\n",
123
- "tf32: false\n",
124
- "\n",
125
- "gradient_checkpointing: true\n",
126
- "early_stopping_patience:\n",
127
- "resume_from_checkpoint:\n",
128
- "local_rank:\n",
129
- "logging_steps: 1\n",
130
- "xformers_attention:\n",
131
- "flash_attention: false\n",
132
- "\n",
133
- "warmup_steps: 10\n",
134
- "evals_per_epoch:\n",
135
- "saves_per_epoch:\n",
136
- "debug:\n",
137
- "deepspeed:\n",
138
- "weight_decay: 0.0\n",
139
- "fsdp:\n",
140
- "fsdp_config:\n",
141
- "special_tokens:\n",
142
- "\n",
143
- "\"\"\"\n",
144
- "\n",
145
- "# Convert the YAML string to a Python dictionary\n",
146
- "yaml_dict = yaml.safe_load(yaml_string)\n",
147
- "\n",
148
- "# Specify your file path\n",
149
- "file_path = 'test_axolotl.yaml'\n",
150
- "\n",
151
- "# Write the YAML file\n",
152
- "with open(file_path, 'w') as file:\n",
153
- " yaml.dump(yaml_dict, file)\n"
154
- ]
155
- },
156
- {
157
- "cell_type": "markdown",
158
- "metadata": {
159
- "id": "bidoj8YLTusD"
160
- },
161
- "source": [
162
- "## Launch the training"
163
- ]
164
- },
165
- {
166
- "cell_type": "code",
167
- "execution_count": null,
168
- "metadata": {
169
- "colab": {
170
- "base_uri": "https://localhost:8080/"
171
- },
172
- "id": "ydTI2Jk2RStU",
173
- "outputId": "d6d0df17-4b53-439c-c802-22c0456d301b"
174
- },
175
- "outputs": [],
176
- "source": [
177
- "# Buy using the ! the comand will be executed as a bash command\n",
178
- "!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml"
179
- ]
180
- },
181
- {
182
- "cell_type": "markdown",
183
- "metadata": {},
184
- "source": [
185
- "## Play with inference"
186
- ]
187
- },
188
- {
189
- "cell_type": "code",
190
- "execution_count": null,
191
- "metadata": {},
192
- "outputs": [],
193
- "source": [
194
- "# Buy using the ! the comand will be executed as a bash command\n",
195
- "!accelerate launch -m axolotl.cli.inference /content/test_axolotl.yaml \\\n",
196
- " --qlora_model_dir=\"./qlora-out\" --gradio"
197
- ]
198
- }
199
- ],
200
- "metadata": {
201
- "accelerator": "GPU",
202
  "colab": {
203
- "gpuType": "T4",
204
- "provenance": []
205
  },
206
- "kernelspec": {
207
- "display_name": "Python 3",
208
- "name": "python3"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  },
210
- "language_info": {
211
- "name": "python"
212
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  },
214
- "nbformat": 4,
215
- "nbformat_minor": 0
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  }
 
1
  {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "AKjdG7tbTb-n"
7
+ },
8
+ "source": [
9
+ "# Example notebook for running Axolotl on google colab"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {
16
+ "id": "RcbNpOgWRcii"
17
+ },
18
+ "outputs": [],
19
+ "source": [
20
+ "import torch\n",
21
+ "# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\n",
22
+ "assert (torch.cuda.is_available()==True)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "metadata": {
28
+ "id": "h3nLav8oTRA5"
29
+ },
30
+ "source": [
31
+ "## Install Axolotl and dependencies"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": null,
37
+ "metadata": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  "colab": {
39
+ "base_uri": "https://localhost:8080/"
 
40
  },
41
+ "id": "3c3yGAwnOIdi",
42
+ "outputId": "e3777b5a-40ef-424f-e181-62dfecd1dd01"
43
+ },
44
+ "outputs": [],
45
+ "source": [
46
+ "!pip install torch==\"2.1.2\"\n",
47
+ "!pip install -e git+https://github.com/OpenAccess-AI-Collective/axolotl#egg=axolotl\n",
48
+ "!pip install flash-attn==\"2.5.0\"\n",
49
+ "!pip install deepspeed==\"0.13.1\"!pip install mlflow==\"2.13.0\""
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "markdown",
54
+ "metadata": {
55
+ "id": "BW2MFr7HTjub"
56
+ },
57
+ "source": [
58
+ "## Create an yaml config file"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "metadata": {
65
+ "id": "9pkF2dSoQEUN"
66
+ },
67
+ "outputs": [],
68
+ "source": [
69
+ "import yaml\n",
70
+ "\n",
71
+ "# Your YAML string\n",
72
+ "yaml_string = \"\"\"\n",
73
+ "base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T\n",
74
+ "model_type: LlamaForCausalLM\n",
75
+ "tokenizer_type: LlamaTokenizer\n",
76
+ "\n",
77
+ "load_in_8bit: false\n",
78
+ "load_in_4bit: true\n",
79
+ "strict: false\n",
80
+ "\n",
81
+ "datasets:\n",
82
+ " - path: mhenrichsen/alpaca_2k_test\n",
83
+ " type: alpaca\n",
84
+ "dataset_prepared_path:\n",
85
+ "val_set_size: 0.05\n",
86
+ "output_dir: ./outputs/qlora-out\n",
87
+ "\n",
88
+ "adapter: qlora\n",
89
+ "lora_model_dir:\n",
90
+ "\n",
91
+ "sequence_len: 4096\n",
92
+ "sample_packing: true\n",
93
+ "eval_sample_packing: false\n",
94
+ "pad_to_sequence_len: true\n",
95
+ "\n",
96
+ "lora_r: 32\n",
97
+ "lora_alpha: 16\n",
98
+ "lora_dropout: 0.05\n",
99
+ "lora_target_modules:\n",
100
+ "lora_target_linear: true\n",
101
+ "lora_fan_in_fan_out:\n",
102
+ "\n",
103
+ "wandb_project:\n",
104
+ "wandb_entity:\n",
105
+ "wandb_watch:\n",
106
+ "wandb_name:\n",
107
+ "wandb_log_model:\n",
108
+ "\n",
109
+ "gradient_accumulation_steps: 4\n",
110
+ "micro_batch_size: 2\n",
111
+ "num_epochs: 4\n",
112
+ "optimizer: paged_adamw_32bit\n",
113
+ "lr_scheduler: cosine\n",
114
+ "learning_rate: 0.0002\n",
115
+ "\n",
116
+ "train_on_inputs: false\n",
117
+ "group_by_length: false\n",
118
+ "bf16: auto\n",
119
+ "fp16:\n",
120
+ "tf32: false\n",
121
+ "\n",
122
+ "gradient_checkpointing: true\n",
123
+ "early_stopping_patience:\n",
124
+ "resume_from_checkpoint:\n",
125
+ "local_rank:\n",
126
+ "logging_steps: 1\n",
127
+ "xformers_attention:\n",
128
+ "flash_attention: true\n",
129
+ "\n",
130
+ "warmup_steps: 10\n",
131
+ "evals_per_epoch: 4\n",
132
+ "saves_per_epoch: 1\n",
133
+ "debug:\n",
134
+ "deepspeed:\n",
135
+ "weight_decay: 0.0\n",
136
+ "fsdp:\n",
137
+ "fsdp_config:\n",
138
+ "special_tokens:\n",
139
+ "\n",
140
+ "\"\"\"\n",
141
+ "\n",
142
+ "# Convert the YAML string to a Python dictionary\n",
143
+ "yaml_dict = yaml.safe_load(yaml_string)\n",
144
+ "\n",
145
+ "# Specify your file path\n",
146
+ "file_path = 'test_axolotl.yaml'\n",
147
+ "\n",
148
+ "# Write the YAML file\n",
149
+ "with open(file_path, 'w') as file:\n",
150
+ " yaml.dump(yaml_dict, file)\n"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "markdown",
155
+ "metadata": {
156
+ "id": "bidoj8YLTusD"
157
+ },
158
+ "source": [
159
+ "## Launch the training"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": null,
165
+ "metadata": {
166
+ "colab": {
167
+ "base_uri": "https://localhost:8080/"
168
  },
169
+ "id": "ydTI2Jk2RStU",
170
+ "outputId": "d6d0df17-4b53-439c-c802-22c0456d301b"
171
+ },
172
+ "outputs": [],
173
+ "source": [
174
+ "# Buy using the ! the comand will be executed as a bash command\n",
175
+ "!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "markdown",
180
+ "metadata": {},
181
+ "source": [
182
+ "## Play with inference"
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "code",
187
+ "execution_count": null,
188
+ "metadata": {},
189
+ "outputs": [],
190
+ "source": [
191
+ "# Buy using the ! the comand will be executed as a bash command\n",
192
+ "!accelerate launch -m axolotl.cli.inference /content/test_axolotl.yaml \\\n",
193
+ " --qlora_model_dir=\"./qlora-out\" --gradio"
194
+ ]
195
+ }
196
+ ],
197
+ "metadata": {
198
+ "accelerator": "GPU",
199
+ "colab": {
200
+ "gpuType": "T4",
201
+ "provenance": []
202
+ },
203
+ "kernelspec": {
204
+ "display_name": "Python 3 (ipykernel)",
205
+ "language": "python",
206
+ "name": "python3"
207
  },
208
+ "language_info": {
209
+ "codemirror_mode": {
210
+ "name": "ipython",
211
+ "version": 3
212
+ },
213
+ "file_extension": ".py",
214
+ "mimetype": "text/x-python",
215
+ "name": "python",
216
+ "nbconvert_exporter": "python",
217
+ "pygments_lexer": "ipython3",
218
+ "version": "3.12.1"
219
+ }
220
+ },
221
+ "nbformat": 4,
222
+ "nbformat_minor": 4
223
  }