TheBloke commited on
Commit
5fda971
·
1 Parent(s): ebf2f88

GPTQ model commit

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/process/llmware_dragon-mistral-7b-v0/source",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 10000.0,
21
+ "sliding_window": 4096,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.35.2",
25
+ "use_cache": true,
26
+ "vocab_size": 32000,
27
+ "quantization_config": {
28
+ "bits": 4,
29
+ "group_size": 128,
30
+ "damp_percent": 0.1,
31
+ "desc_act": true,
32
+ "sym": true,
33
+ "true_sequential": true,
34
+ "model_name_or_path": null,
35
+ "model_file_base_name": "model",
36
+ "quant_method": "gptq"
37
+ }
38
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.34.0.dev0"
6
+ }
generation_test_hf_script.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+
7
+ def load_rag_benchmark_tester_ds():
8
+
9
+ # pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
10
+ from datasets import load_dataset
11
+
12
+ ds_name = "llmware/rag_instruct_benchmark_tester"
13
+
14
+ dataset = load_dataset(ds_name)
15
+
16
+ print("update: loading RAG Benchmark test dataset - ", dataset)
17
+
18
+ test_set = []
19
+ for i, samples in enumerate(dataset["train"]):
20
+ test_set.append(samples)
21
+
22
+ # to view test set samples
23
+ # print("rag benchmark dataset test samples: ", i, samples)
24
+
25
+ return test_set
26
+
27
+
28
+ def run_test(model_name, test_ds):
29
+
30
+ device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ print("\nRAG Performance Test - 200 questions")
33
+ print("update: model - ", model_name)
34
+ print("update: device - ", device)
35
+
36
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
37
+ model.to(device)
38
+
39
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
40
+
41
+ for i, entries in enumerate(test_ds):
42
+
43
+ # prepare prompt packaging used in fine-tuning process
44
+ new_prompt = "<human>: " + entries["context"] + "\n" + entries["query"] + "\n" + "<bot>:"
45
+
46
+ inputs = tokenizer(new_prompt, return_tensors="pt")
47
+ start_of_output = len(inputs.input_ids[0])
48
+
49
+ # temperature: set at 0.3 for consistency of output
50
+ # max_new_tokens: set at 100 - may prematurely stop a few of the summaries
51
+
52
+ outputs = model.generate(
53
+ inputs.input_ids.to(device),
54
+ eos_token_id=tokenizer.eos_token_id,
55
+ pad_token_id=tokenizer.eos_token_id,
56
+ do_sample=True,
57
+ temperature=0.3,
58
+ max_new_tokens=100,
59
+ )
60
+
61
+ output_only = tokenizer.decode(outputs[0][start_of_output:],skip_special_tokens=True)
62
+
63
+ # quick/optional post-processing clean-up of potential fine-tuning artifacts
64
+
65
+ eot = output_only.find("<|endoftext|>")
66
+ if eot > -1:
67
+ output_only = output_only[:eot]
68
+
69
+ bot = output_only.find("<bot>:")
70
+ if bot > -1:
71
+ output_only = output_only[bot+len("<bot>:"):]
72
+
73
+ # end - post-processing
74
+
75
+ print("\n")
76
+ print(i, "llm_response - ", output_only)
77
+ print(i, "gold_answer - ", entries["answer"])
78
+
79
+ return 0
80
+
81
+
82
+ if __name__ == "__main__":
83
+
84
+ test_ds = load_rag_benchmark_tester_ds()
85
+
86
+ model_name = "llmware/dragon-mistral-7b-v0"
87
+ output = run_test(model_name,test_ds)
88
+
89
+
generation_test_llmware_script.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from llmware.prompts import Prompt
3
+
4
+
5
+ def load_rag_benchmark_tester_ds():
6
+
7
+ # pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
8
+ from datasets import load_dataset
9
+
10
+ ds_name = "llmware/rag_instruct_benchmark_tester"
11
+
12
+ dataset = load_dataset(ds_name)
13
+
14
+ print("update: loading RAG Benchmark test dataset - ", dataset)
15
+
16
+ test_set = []
17
+ for i, samples in enumerate(dataset["train"]):
18
+ test_set.append(samples)
19
+
20
+ # to view test set samples
21
+ # print("rag benchmark dataset test samples: ", i, samples)
22
+
23
+ return test_set
24
+
25
+
26
+ def run_test(model_name, prompt_list):
27
+
28
+ print("\nupdate: Starting RAG Benchmark Inference Test - ", model_name)
29
+
30
+ # pull DRAGON / BLING model directly from catalog, e.g., no from_hf=True
31
+ prompter = Prompt().load_model(model_name)
32
+
33
+ for i, entries in enumerate(prompt_list):
34
+
35
+ prompt = entries["query"]
36
+ context = entries["context"]
37
+
38
+ response = prompter.prompt_main(prompt,context=context,prompt_name="default_with_context", temperature=0.3)
39
+
40
+ print("\nupdate: model inference output - ", i, response["llm_response"])
41
+ print("update: gold_answer - ", i, entries["answer"])
42
+
43
+ fc = prompter.evidence_check_numbers(response)
44
+ sc = prompter.evidence_comparison_stats(response)
45
+ sr = prompter.evidence_check_sources(response)
46
+
47
+ print("\nFact-Checking Tools")
48
+
49
+ for entries in fc:
50
+ for f, facts in enumerate(entries["fact_check"]):
51
+ print("update: fact check - ", f, facts)
52
+
53
+ for entries in sc:
54
+ print("update: comparison stats - ", entries["comparison_stats"])
55
+
56
+ for entries in sr:
57
+ for s, sources in enumerate(entries["source_review"]):
58
+ print("update: sources - ", s, sources)
59
+
60
+ return 0
61
+
62
+
63
+ if __name__ == "__main__":
64
+
65
+ core_test_set = load_rag_benchmark_tester_ds()
66
+
67
+ # one of the 7 gpu dragon models
68
+ gpu_model_name = "llmware/dragon-mistral-7b-v0"
69
+
70
+ output = run_test(gpu_model_name, core_test_set)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f098226f0b8cc3e5b61a75261eb9afc7f6dbef6b9e3fb10ad8f6f1ccede1be54
3
+ size 4158662280
quantize_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "damp_percent": 0.1,
5
+ "desc_act": true,
6
+ "sym": true,
7
+ "true_sequential": true,
8
+ "model_name_or_path": null,
9
+ "model_file_base_name": "model"
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "unk_token": "<unk>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": null,
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": true
42
+ }