File size: 252 Bytes
a64eed6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
models:
  - model: Qwen/Qwen2.5-14B-Instruct
  - model: Qwen/Qwen2.5-14B
  - model: Qwen/Qwen2.5-Coder-14B-Instruct

base_model: Qwen/Qwen2.5-14B

merge_method: model_stock

parameters:
  normalize: true

quantization:
  format: gguf
  target_size: 4k