Spaces:
Sleeping
Sleeping
AlekseyCalvin
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import logging
|
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import spaces
|
7 |
-
from diffusers import DiffusionPipeline
|
8 |
import copy
|
9 |
import random
|
10 |
import time
|
@@ -14,8 +14,12 @@ with open('loras.json', 'r') as f:
|
|
14 |
loras = json.load(f)
|
15 |
|
16 |
# Initialize the base model
|
|
|
17 |
base_model = "AlekseyCalvin/HistoricColorSoonr_Schnell"
|
18 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=
|
|
|
|
|
|
|
19 |
|
20 |
MAX_SEED = 2**32-1
|
21 |
|
@@ -56,7 +60,7 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
56 |
height,
|
57 |
)
|
58 |
|
59 |
-
@spaces.GPU(duration=
|
60 |
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
61 |
pipe.to("cuda")
|
62 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import spaces
|
7 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny
|
8 |
import copy
|
9 |
import random
|
10 |
import time
|
|
|
14 |
loras = json.load(f)
|
15 |
|
16 |
# Initialize the base model
|
17 |
+
dtype = torch.float16
|
18 |
base_model = "AlekseyCalvin/HistoricColorSoonr_Schnell"
|
19 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
|
20 |
+
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
21 |
+
torch.cuda.empty_cache()
|
22 |
+
|
23 |
|
24 |
MAX_SEED = 2**32-1
|
25 |
|
|
|
60 |
height,
|
61 |
)
|
62 |
|
63 |
+
@spaces.GPU(duration=50)
|
64 |
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
65 |
pipe.to("cuda")
|
66 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|