chenhugging
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -33,6 +33,11 @@ The following hyperparameters were used during training:
|
|
33 |
- num_epochs: 1.0
|
34 |
- mixed_precision_training: Native AMP
|
35 |
|
|
|
|
|
|
|
|
|
|
|
36 |
### Framework versions
|
37 |
|
38 |
- PEFT 0.8.2
|
@@ -54,3 +59,4 @@ hf (pretrained=upstage/SOLAR-10.7B-v1.0,peft=chenhugging/solar-10b-ocn-v1,trust_
|
|
54 |
|ocn |Yaml |none | 0|acc | 0.83|± |0.0378|
|
55 |
|aocnp |Yaml |none | 0|acc | 0.72|± |0.0451|
|
56 |
|
|
|
|
33 |
- num_epochs: 1.0
|
34 |
- mixed_precision_training: Native AMP
|
35 |
|
36 |
+
### Training script
|
37 |
+
|
38 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py --stage sft --do_train True --model_name_or_path upstage/SOLAR-10.7B-v1.0 --template solar --finetuning_type lora --quantization_bit 4 --flash_attn True --dataset_dir data --dataset oncc_medqa_instruct --cutoff_len 1024 --learning_rate 0.0005 --num_train_epochs 1.0 --max_samples 5000 --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --max_grad_norm 1.0 --logging_steps 10 --save_steps 100 --warmup_steps 10 --neftune_noise_alpha 0.5 --lora_rank 8 --lora_dropout 0.2 --lora_target wqkv --output_dir /workspace/solar-10b-ocn-v1 --fp16 True --plot_loss True
|
39 |
+
|
40 |
+
|
41 |
### Framework versions
|
42 |
|
43 |
- PEFT 0.8.2
|
|
|
59 |
|ocn |Yaml |none | 0|acc | 0.83|± |0.0378|
|
60 |
|aocnp |Yaml |none | 0|acc | 0.72|± |0.0451|
|
61 |
|
62 |
+
|