Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -80,9 +80,9 @@ HF_TOKEN = os.getenv("HF_TOKEN")
|
|
80 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
81 |
|
82 |
def load_and_prepare_model():
|
83 |
-
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
84 |
-
vaeRV = AutoencoderKL.from_pretrained("
|
85 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear"
|
86 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
87 |
'ford442/RealVisXL_V5.0_BF16',
|
88 |
#torch_dtype=torch.bfloat16,
|
@@ -134,9 +134,9 @@ def save_image(img):
|
|
134 |
return unique_name
|
135 |
|
136 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
137 |
-
filename= f'
|
138 |
with open(filename, "w") as f:
|
139 |
-
f.write(f"Realvis 5.0 (Tester
|
140 |
f.write(f"Date/time: {timestamp} \n")
|
141 |
f.write(f"Prompt: {prompt} \n")
|
142 |
f.write(f"Steps: {num_inference_steps} \n")
|
@@ -145,10 +145,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
|
145 |
f.write(f"Use Model Dtype: no \n")
|
146 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
147 |
f.write(f"Model VAE: sdxl-vae-bf16 before cuda then attn_proc / scale factor 8 \n")
|
148 |
-
f.write(f"Model UNET: sexy_beauty model \n")
|
149 |
-
f.write(f"Model HiDiffusion OFF \n")
|
150 |
-
f.write(f"Model do_resize ON \n")
|
151 |
-
f.write(f"added torch to prereq and changed accellerate \n")
|
152 |
upload_to_ftp(filename)
|
153 |
|
154 |
@spaces.GPU(duration=30)
|
@@ -184,7 +180,7 @@ def generate_30(
|
|
184 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
185 |
batch_options = options.copy()
|
186 |
rv_image = pipe(**batch_options).images[0]
|
187 |
-
sd_image_path = f"
|
188 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
189 |
upload_to_ftp(sd_image_path)
|
190 |
unique_name = str(uuid.uuid4()) + ".png"
|
@@ -224,7 +220,7 @@ def generate_60(
|
|
224 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
225 |
batch_options = options.copy()
|
226 |
rv_image = pipe(**batch_options).images[0]
|
227 |
-
sd_image_path = f"
|
228 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
229 |
upload_to_ftp(sd_image_path)
|
230 |
unique_name = str(uuid.uuid4()) + ".png"
|
@@ -264,7 +260,7 @@ def generate_90(
|
|
264 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
265 |
batch_options = options.copy()
|
266 |
rv_image = pipe(**batch_options).images[0]
|
267 |
-
sd_image_path = f"
|
268 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
269 |
upload_to_ftp(sd_image_path)
|
270 |
unique_name = str(uuid.uuid4()) + ".png"
|
|
|
80 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
81 |
|
82 |
def load_and_prepare_model():
|
83 |
+
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
84 |
+
vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
85 |
+
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
86 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
87 |
'ford442/RealVisXL_V5.0_BF16',
|
88 |
#torch_dtype=torch.bfloat16,
|
|
|
134 |
return unique_name
|
135 |
|
136 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
137 |
+
filename= f'rv_C_{timestamp}.txt'
|
138 |
with open(filename, "w") as f:
|
139 |
+
f.write(f"Realvis 5.0 (Tester C) \n")
|
140 |
f.write(f"Date/time: {timestamp} \n")
|
141 |
f.write(f"Prompt: {prompt} \n")
|
142 |
f.write(f"Steps: {num_inference_steps} \n")
|
|
|
145 |
f.write(f"Use Model Dtype: no \n")
|
146 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
147 |
f.write(f"Model VAE: sdxl-vae-bf16 before cuda then attn_proc / scale factor 8 \n")
|
|
|
|
|
|
|
|
|
148 |
upload_to_ftp(filename)
|
149 |
|
150 |
@spaces.GPU(duration=30)
|
|
|
180 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
181 |
batch_options = options.copy()
|
182 |
rv_image = pipe(**batch_options).images[0]
|
183 |
+
sd_image_path = f"rv_C_{timestamp}.png"
|
184 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
185 |
upload_to_ftp(sd_image_path)
|
186 |
unique_name = str(uuid.uuid4()) + ".png"
|
|
|
220 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
221 |
batch_options = options.copy()
|
222 |
rv_image = pipe(**batch_options).images[0]
|
223 |
+
sd_image_path = f"rv_C_{timestamp}.png"
|
224 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
225 |
upload_to_ftp(sd_image_path)
|
226 |
unique_name = str(uuid.uuid4()) + ".png"
|
|
|
260 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
261 |
batch_options = options.copy()
|
262 |
rv_image = pipe(**batch_options).images[0]
|
263 |
+
sd_image_path = f"rv_C_{timestamp}.png"
|
264 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
265 |
upload_to_ftp(sd_image_path)
|
266 |
unique_name = str(uuid.uuid4()) + ".png"
|