samidh commited on
Commit
50155f5
·
verified ·
1 Parent(s): 6727ac2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -5,15 +5,24 @@ import os
5
 
6
  import torch
7
  from peft import PeftConfig, PeftModel
8
- from transformers import AutoModelForCausalLM, AutoTokenizer
9
 
10
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
12
 
13
  base_model_name = "google/gemma-7b"
14
  adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
15
 
16
- model = AutoModelForCausalLM.from_pretrained(base_model_name, token=os.environ['HF_TOKEN'], device_map="auto")
 
 
 
 
 
 
 
 
 
 
17
  model = PeftModel.from_pretrained(model, adapter_model_name, token=os.environ['HF_TOKEN'])
18
  model.merge_and_unload()
19
 
 
5
 
6
  import torch
7
  from peft import PeftConfig, PeftModel
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
9
 
10
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
11
 
12
  base_model_name = "google/gemma-7b"
13
  adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
14
 
15
+ bnb_config = BitsAndBytesConfig(
16
+ load_in_4bit=True,
17
+ #bnb_4bit_quant_type="nf4",
18
+ #bnb_4bit_compute_dtype=torch.bfloat16,
19
+ #bnb_4bit_use_double_quant=True
20
+ )
21
+
22
+ model = AutoModelForCausalLM.from_pretrained(base_model_name,
23
+ token=os.environ['HF_TOKEN'],
24
+ quantization_config=bnb_config,
25
+ device_map="auto")
26
  model = PeftModel.from_pretrained(model, adapter_model_name, token=os.environ['HF_TOKEN'])
27
  model.merge_and_unload()
28