|
[INFO|parser.py:344] 2024-07-16 19:25:29,726 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, compute dtype: torch.float16 |
|
|
|
[INFO|tokenization_utils_base.py:2108] 2024-07-16 19:25:32,413 >> loading file qwen.tiktoken from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/qwen.tiktoken |
|
|
|
[INFO|tokenization_utils_base.py:2108] 2024-07-16 19:25:32,413 >> loading file added_tokens.json from cache at None |
|
|
|
[INFO|tokenization_utils_base.py:2108] 2024-07-16 19:25:32,413 >> loading file special_tokens_map.json from cache at None |
|
|
|
[INFO|tokenization_utils_base.py:2108] 2024-07-16 19:25:32,413 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2108] 2024-07-16 19:25:32,414 >> loading file tokenizer.json from cache at None |
|
|
|
[INFO|template.py:268] 2024-07-16 19:25:32,773 >> Add eos token: <|im_end|> |
|
|
|
[INFO|template.py:372] 2024-07-16 19:25:32,773 >> Add pad token: <|im_end|> |
|
|
|
[INFO|loader.py:52] 2024-07-16 19:25:32,774 >> Loading dataset glaive_toolcall_en_demo.json... |
|
|
|
[INFO|configuration_utils.py:733] 2024-07-16 19:26:23,755 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/config.json |
|
|
|
[INFO|configuration_utils.py:733] 2024-07-16 19:26:24,494 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/config.json |
|
|
|
[INFO|configuration_utils.py:796] 2024-07-16 19:26:24,495 >> Model config QWenConfig { |
|
"_name_or_path": "Qwen/Qwen-1_8B-Chat", |
|
"architectures": [ |
|
"QWenLMHeadModel" |
|
], |
|
"attn_dropout_prob": 0.0, |
|
"auto_map": { |
|
"AutoConfig": "Qwen/Qwen-1_8B-Chat--configuration_qwen.QWenConfig", |
|
"AutoModelForCausalLM": "Qwen/Qwen-1_8B-Chat--modeling_qwen.QWenLMHeadModel" |
|
}, |
|
"bf16": false, |
|
"emb_dropout_prob": 0.0, |
|
"fp16": false, |
|
"fp32": false, |
|
"hidden_size": 2048, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 11008, |
|
"kv_channels": 128, |
|
"layer_norm_epsilon": 1e-06, |
|
"max_position_embeddings": 8192, |
|
"model_type": "qwen", |
|
"no_bias": true, |
|
"num_attention_heads": 16, |
|
"num_hidden_layers": 24, |
|
"onnx_safe": null, |
|
"rotary_emb_base": 10000, |
|
"rotary_pct": 1.0, |
|
"scale_attn_weights": true, |
|
"seq_length": 8192, |
|
"softmax_in_fp32": false, |
|
"tie_word_embeddings": false, |
|
"tokenizer_class": "QWenTokenizer", |
|
"transformers_version": "4.41.2", |
|
"use_cache": true, |
|
"use_cache_kernel": false, |
|
"use_cache_quantization": false, |
|
"use_dynamic_ntk": true, |
|
"use_flash_attn": "auto", |
|
"use_logn_attn": true, |
|
"vocab_size": 151936 |
|
} |
|
|
|
|
|
[INFO|modeling_utils.py:3474] 2024-07-16 19:26:26,974 >> loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/model.safetensors.index.json |
|
|
|
[INFO|modeling_utils.py:1519] 2024-07-16 19:26:45,032 >> Instantiating QWenLMHeadModel model under default dtype torch.float16. |
|
|
|
[INFO|configuration_utils.py:962] 2024-07-16 19:26:45,034 >> Generate config GenerationConfig {} |
|
|
|
|
|
[INFO|modeling_utils.py:4280] 2024-07-16 19:26:51,937 >> All model checkpoint weights were used when initializing QWenLMHeadModel. |
|
|
|
|
|
[INFO|modeling_utils.py:4288] 2024-07-16 19:26:51,937 >> All the weights of QWenLMHeadModel were initialized from the model checkpoint at Qwen/Qwen-1_8B-Chat. |
|
If your task is similar to the task the model of the checkpoint was trained on, you can already use QWenLMHeadModel for predictions without further training. |
|
|
|
[INFO|configuration_utils.py:917] 2024-07-16 19:26:52,423 >> loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/generation_config.json |
|
|
|
[INFO|configuration_utils.py:962] 2024-07-16 19:26:52,424 >> Generate config GenerationConfig { |
|
"chat_format": "chatml", |
|
"do_sample": true, |
|
"eos_token_id": 151643, |
|
"max_new_tokens": 512, |
|
"max_window_size": 6144, |
|
"pad_token_id": 151643, |
|
"repetition_penalty": 1.1, |
|
"top_k": 0, |
|
"top_p": 0.8 |
|
} |
|
|
|
|
|
[WARNING|checkpointing.py:70] 2024-07-16 19:26:52,440 >> You are using the old GC format, some features (e.g. BAdam) will be invalid. |
|
|
|
[INFO|checkpointing.py:103] 2024-07-16 19:26:52,440 >> Gradient checkpointing enabled. |
|
|
|
[INFO|attention.py:86] 2024-07-16 19:26:52,440 >> Using vanilla attention implementation. |
|
|
|
[INFO|adapter.py:302] 2024-07-16 19:26:52,441 >> Upcasting trainable params to float32. |
|
|
|
[INFO|adapter.py:158] 2024-07-16 19:26:52,441 >> Fine-tuning method: LoRA |
|
|
|
[INFO|misc.py:51] 2024-07-16 19:26:52,442 >> Found linear modules: c_attn,c_proj,w1,w2 |
|
|
|
[INFO|loader.py:196] 2024-07-16 19:26:53,145 >> trainable params: 6,709,248 || all params: 1,843,537,920 || trainable%: 0.3639 |
|
|
|
[INFO|trainer.py:641] 2024-07-16 19:26:53,161 >> Using auto half precision backend |
|
|
|
[INFO|trainer.py:2078] 2024-07-16 19:26:54,481 >> ***** Running training ***** |
|
|
|
[INFO|trainer.py:2079] 2024-07-16 19:26:54,481 >> Num examples = 300 |
|
|
|
[INFO|trainer.py:2080] 2024-07-16 19:26:54,481 >> Num Epochs = 3 |
|
|
|
[INFO|trainer.py:2081] 2024-07-16 19:26:54,481 >> Instantaneous batch size per device = 2 |
|
|
|
[INFO|trainer.py:2084] 2024-07-16 19:26:54,481 >> Total train batch size (w. parallel, distributed & accumulation) = 16 |
|
|
|
[INFO|trainer.py:2085] 2024-07-16 19:26:54,481 >> Gradient Accumulation steps = 8 |
|
|
|
[INFO|trainer.py:2086] 2024-07-16 19:26:54,482 >> Total optimization steps = 54 |
|
|
|
[INFO|trainer.py:2087] 2024-07-16 19:26:54,484 >> Number of trainable parameters = 6,709,248 |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:27:37,133 >> {'loss': 0.6756, 'learning_rate': 4.8950e-05, 'epoch': 0.27, 'throughput': 1193.09} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:28:17,112 >> {'loss': 0.6799, 'learning_rate': 4.5887e-05, 'epoch': 0.53, 'throughput': 1200.41} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:28:56,484 >> {'loss': 0.6995, 'learning_rate': 4.1070e-05, 'epoch': 0.80, 'throughput': 1208.30} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:29:36,117 >> {'loss': 0.6313, 'learning_rate': 3.4902e-05, 'epoch': 1.07, 'throughput': 1209.98} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:30:17,804 >> {'loss': 0.5683, 'learning_rate': 2.7902e-05, 'epoch': 1.33, 'throughput': 1211.98} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:30:57,971 >> {'loss': 0.4988, 'learning_rate': 2.0659e-05, 'epoch': 1.60, 'throughput': 1214.64} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:31:38,903 >> {'loss': 0.5748, 'learning_rate': 1.3780e-05, 'epoch': 1.87, 'throughput': 1215.63} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:32:15,869 >> {'loss': 0.5793, 'learning_rate': 7.8440e-06, 'epoch': 2.13, 'throughput': 1214.06} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:32:55,941 >> {'loss': 0.5500, 'learning_rate': 3.3494e-06, 'epoch': 2.40, 'throughput': 1214.25} |
|
|
|
[INFO|callbacks.py:310] 2024-07-16 19:33:38,528 >> {'loss': 0.5715, 'learning_rate': 6.7388e-07, 'epoch': 2.67, 'throughput': 1214.09} |
|
|
|
[INFO|trainer.py:2329] 2024-07-16 19:34:11,515 >> |
|
|
|
Training completed. Do not forget to share your model on huggingface.co/models =) |
|
|
|
|
|
|
|
[INFO|trainer.py:3410] 2024-07-16 19:34:11,517 >> Saving model checkpoint to saves/Qwen-1.8B-Chat/lora/train_2024-07-16-18-56-15 |
|
|
|
[INFO|configuration_utils.py:733] 2024-07-16 19:34:12,032 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen-1_8B-Chat/snapshots/1d0f68de57b88cfde81f3c3e537f24464d889081/config.json |
|
|
|
[INFO|configuration_utils.py:796] 2024-07-16 19:34:12,033 >> Model config QWenConfig { |
|
"architectures": [ |
|
"QWenLMHeadModel" |
|
], |
|
"attn_dropout_prob": 0.0, |
|
"auto_map": { |
|
"AutoConfig": "Qwen/Qwen-1_8B-Chat--configuration_qwen.QWenConfig", |
|
"AutoModelForCausalLM": "Qwen/Qwen-1_8B-Chat--modeling_qwen.QWenLMHeadModel" |
|
}, |
|
"bf16": false, |
|
"emb_dropout_prob": 0.0, |
|
"fp16": false, |
|
"fp32": false, |
|
"hidden_size": 2048, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 11008, |
|
"kv_channels": 128, |
|
"layer_norm_epsilon": 1e-06, |
|
"max_position_embeddings": 8192, |
|
"model_type": "qwen", |
|
"no_bias": true, |
|
"num_attention_heads": 16, |
|
"num_hidden_layers": 24, |
|
"onnx_safe": null, |
|
"rotary_emb_base": 10000, |
|
"rotary_pct": 1.0, |
|
"scale_attn_weights": true, |
|
"seq_length": 8192, |
|
"softmax_in_fp32": false, |
|
"tie_word_embeddings": false, |
|
"tokenizer_class": "QWenTokenizer", |
|
"transformers_version": "4.41.2", |
|
"use_cache": true, |
|
"use_cache_kernel": false, |
|
"use_cache_quantization": false, |
|
"use_dynamic_ntk": true, |
|
"use_flash_attn": "auto", |
|
"use_logn_attn": true, |
|
"vocab_size": 151936 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2513] 2024-07-16 19:34:12,174 >> tokenizer config file saved in saves/Qwen-1.8B-Chat/lora/train_2024-07-16-18-56-15/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2522] 2024-07-16 19:34:12,174 >> Special tokens file saved in saves/Qwen-1.8B-Chat/lora/train_2024-07-16-18-56-15/special_tokens_map.json |
|
|
|
[WARNING|ploting.py:89] 2024-07-16 19:34:12,511 >> No metric eval_loss to plot. |
|
|
|
[WARNING|ploting.py:89] 2024-07-16 19:34:12,512 >> No metric eval_accuracy to plot. |
|
|
|
[INFO|modelcard.py:450] 2024-07-16 19:34:12,513 >> Dropping the following result as it does not have all the necessary fields: |
|
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}} |
|
|
|
|