Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ from peft import PeftConfig, PeftModel
|
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
|
10 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
11 |
|
12 |
base_model_name = "google/gemma-7b"
|
13 |
adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
|
|
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
|
10 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
|
12 |
|
13 |
base_model_name = "google/gemma-7b"
|
14 |
adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
|