Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
@@ -9,8 +9,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
|
9 |
|
10 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
|
12 |
-
base_model_name = "google/gemma-7b"
|
13 |
-
|
|
|
|
|
14 |
|
15 |
bnb_config = BitsAndBytesConfig(
|
16 |
load_in_8bit=True,
|
|
|
9 |
|
10 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
|
12 |
+
#base_model_name = "google/gemma-7b"
|
13 |
+
base_model_name = "google/gemma-2-9b"
|
14 |
+
#adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
|
15 |
+
adapter_model_name = "cope-project/cope-g2.9b-2c-hs.s1.5fpc.9-sx.s1.5.9o-vl.s1.5.9-hr.s5-sh.s5.l5e5-e3-d0-r8"
|
16 |
|
17 |
bnb_config = BitsAndBytesConfig(
|
18 |
load_in_8bit=True,
|