Nanobit commited on
Commit
a7a9a14
·
unverified ·
1 Parent(s): e2786cc

fix(examples): remove is_*_derived as it's parsed automatically (#1297)

Browse files
devtools/dev_sharegpt.yml CHANGED
@@ -2,7 +2,6 @@
2
  base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
3
  model_type: LlamaForCausalLM
4
  tokenizer_type: LlamaTokenizer
5
- is_llama_derived_model: true
6
 
7
  load_in_8bit: true
8
  load_in_4bit: false
 
2
  base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
3
  model_type: LlamaForCausalLM
4
  tokenizer_type: LlamaTokenizer
 
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
examples/code-llama/13b/lora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-13b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: codellama/CodeLlama-13b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/code-llama/13b/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-13b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: codellama/CodeLlama-13b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/code-llama/34b/lora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-34b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: codellama/CodeLlama-34b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/code-llama/34b/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-34b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: codellama/CodeLlama-34b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/code-llama/7b/lora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: codellama/CodeLlama-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/code-llama/7b/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: codellama/CodeLlama-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: codellama/CodeLlama-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: CodeLlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/falcon/config-7b-lora.yml CHANGED
@@ -2,7 +2,7 @@ base_model: tiiuae/falcon-7b
2
  trust_remote_code: true
3
  model_type: AutoModelForCausalLM
4
  tokenizer_type: AutoTokenizer
5
- is_falcon_derived_model: true
6
  load_in_8bit: true
7
  load_in_4bit: false
8
  gptq: false
 
2
  trust_remote_code: true
3
  model_type: AutoModelForCausalLM
4
  tokenizer_type: AutoTokenizer
5
+
6
  load_in_8bit: true
7
  load_in_4bit: false
8
  gptq: false
examples/falcon/config-7b-qlora.yml CHANGED
@@ -5,7 +5,7 @@ base_model: tiiuae/falcon-7b
5
  trust_remote_code: true
6
  model_type: AutoModelForCausalLM
7
  tokenizer_type: AutoTokenizer
8
- is_falcon_derived_model: true
9
  load_in_8bit: false
10
  # enable 4bit for QLoRA
11
  load_in_4bit: true
 
5
  trust_remote_code: true
6
  model_type: AutoModelForCausalLM
7
  tokenizer_type: AutoTokenizer
8
+
9
  load_in_8bit: false
10
  # enable 4bit for QLoRA
11
  load_in_4bit: true
examples/falcon/config-7b.yml CHANGED
@@ -2,7 +2,7 @@ base_model: tiiuae/falcon-7b
2
  trust_remote_code: true
3
  model_type: AutoModelForCausalLM
4
  tokenizer_type: AutoTokenizer
5
- is_falcon_derived_model: true
6
  load_in_8bit: false
7
  load_in_4bit: false
8
  gptq: false
 
2
  trust_remote_code: true
3
  model_type: AutoModelForCausalLM
4
  tokenizer_type: AutoTokenizer
5
+
6
  load_in_8bit: false
7
  load_in_4bit: false
8
  gptq: false
examples/llama-2/fft_optimized.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: false
 
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: false
examples/llama-2/gptq-lora.yml CHANGED
@@ -1,5 +1,4 @@
1
  base_model: TheBloke/Llama-2-7B-GPTQ
2
- is_llama_derived_model: false
3
  gptq: true
4
  gptq_disable_exllama: true
5
  model_type: AutoModelForCausalLM
 
1
  base_model: TheBloke/Llama-2-7B-GPTQ
 
2
  gptq: true
3
  gptq_disable_exllama: true
4
  model_type: AutoModelForCausalLM
examples/llama-2/loftq.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: false
 
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: false
examples/llama-2/lora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/llama-2/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/llama-2/relora.yml CHANGED
@@ -1,7 +1,7 @@
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: NousResearch/Llama-2-7b-hf
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
+
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
examples/mistral/Mistral-7b-example/config.yml CHANGED
@@ -2,7 +2,6 @@
2
  base_model: mistralai/Mistral-7B-v0.1
3
  model_type: MistralForCausalLM
4
  tokenizer_type: LlamaTokenizer
5
- is_mistral_derived_model: true
6
 
7
  load_in_8bit: true
8
  load_in_4bit: false
 
2
  base_model: mistralai/Mistral-7B-v0.1
3
  model_type: MistralForCausalLM
4
  tokenizer_type: LlamaTokenizer
 
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
examples/mistral/config.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: mistralai/Mistral-7B-v0.1
2
  model_type: MistralForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_mistral_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: false
 
1
  base_model: mistralai/Mistral-7B-v0.1
2
  model_type: MistralForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: false
examples/mistral/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: mistralai/Mistral-7B-v0.1
2
  model_type: MistralForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_mistral_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: mistralai/Mistral-7B-v0.1
2
  model_type: MistralForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/qwen/lora.yml CHANGED
@@ -2,7 +2,6 @@ base_model: Qwen/Qwen-7B
2
  model_type: AutoModelForCausalLM
3
  tokenizer_type: AutoTokenizer
4
 
5
- is_qwen_derived_model: true
6
  trust_remote_code: true
7
 
8
  load_in_8bit: true
 
2
  model_type: AutoModelForCausalLM
3
  tokenizer_type: AutoTokenizer
4
 
 
5
  trust_remote_code: true
6
 
7
  load_in_8bit: true
examples/qwen/qlora.yml CHANGED
@@ -2,7 +2,6 @@ base_model: Qwen/Qwen-7B
2
  model_type: AutoModelForCausalLM
3
  tokenizer_type: AutoTokenizer
4
 
5
- is_qwen_derived_model: true
6
  trust_remote_code: true
7
 
8
  load_in_8bit: false
 
2
  model_type: AutoModelForCausalLM
3
  tokenizer_type: AutoTokenizer
4
 
 
5
  trust_remote_code: true
6
 
7
  load_in_8bit: false
examples/tiny-llama/lora-mps.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/tiny-llama/lora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: true
7
  load_in_4bit: false
 
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: true
6
  load_in_4bit: false
examples/tiny-llama/pretrain.yml CHANGED
@@ -2,7 +2,6 @@ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
2
 
3
  model_type: LlamaForCausalLM
4
  tokenizer_type: LlamaTokenizer
5
- is_llama_derived_model: true
6
 
7
  load_in_8bit: false
8
  load_in_4bit: false
 
2
 
3
  model_type: LlamaForCausalLM
4
  tokenizer_type: LlamaTokenizer
 
5
 
6
  load_in_8bit: false
7
  load_in_4bit: false
examples/tiny-llama/qlora.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_llama_derived_model: true
5
 
6
  load_in_8bit: false
7
  load_in_4bit: true
 
1
  base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
 
4
 
5
  load_in_8bit: false
6
  load_in_4bit: true
examples/yi-34B-chat/qlora.yml CHANGED
@@ -1,8 +1,7 @@
1
  base_model: 01-ai/Yi-34B-Chat
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
- is_mistral_derived_model: false
5
- is_llama_derived_model: true
6
  load_in_8bit: false
7
  load_in_4bit: true
8
  strict: false
 
1
  base_model: 01-ai/Yi-34B-Chat
2
  model_type: LlamaForCausalLM
3
  tokenizer_type: LlamaTokenizer
4
+
 
5
  load_in_8bit: false
6
  load_in_4bit: true
7
  strict: false