cocktailpeanut commited on
Commit
3c2cdca
·
1 Parent(s): fcc8012
Files changed (2) hide show
  1. app.py +16 -11
  2. requirements.txt +6 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from email.policy import default
2
  from json import encoder
3
  import gradio as gr
4
- import spaces
5
  import numpy as np
6
  import torch
7
  import requests
@@ -311,7 +311,8 @@ def set_attention_processor(unet,id_length,is_ipadapter = False):
311
  hidden_size = unet.config.block_out_channels[block_id]
312
  if cross_attention_dim is None:
313
  if name.startswith("up_blocks") :
314
- attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length)
 
315
  total_count +=1
316
  else:
317
  attn_procs[name] = AttnProcessor()
@@ -418,7 +419,8 @@ cur_step = 0
418
  id_length = 4
419
  total_length = 5
420
  cur_model_type = ""
421
- device="cuda"
 
422
  global attn_procs,unet
423
  attn_procs = {}
424
  ###
@@ -441,27 +443,27 @@ use_safetensors= False
441
  ###
442
  pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
443
  models_dict["Unstable"], torch_dtype=torch.float16, use_safetensors=use_safetensors)
444
- pipe2 = pipe2.to("cpu")
445
  pipe2.load_photomaker_adapter(
446
  os.path.dirname(photomaker_path),
447
  subfolder="",
448
  weight_name=os.path.basename(photomaker_path),
449
  trigger_word="img" # define the trigger word
450
  )
451
- pipe2 = pipe2.to("cpu")
452
  pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
453
  pipe2.fuse_lora()
454
 
455
  pipe4 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
456
  models_dict["RealVision"], torch_dtype=torch.float16, use_safetensors=True)
457
- pipe4 = pipe4.to("cpu")
458
  pipe4.load_photomaker_adapter(
459
  os.path.dirname(photomaker_path),
460
  subfolder="",
461
  weight_name=os.path.basename(photomaker_path),
462
  trigger_word="img" # define the trigger word
463
  )
464
- pipe4 = pipe4.to("cpu")
465
  pipe4.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
466
  pipe4.fuse_lora()
467
 
@@ -502,7 +504,7 @@ def change_visiale_by_model_type(_model_type):
502
 
503
 
504
  ######### Image Generation ##############
505
- @spaces.GPU(duration=120)
506
  def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
507
  _model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
508
  if _model_type == "Photomaker" and "img" not in general_prompt:
@@ -557,7 +559,8 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
557
  if start_merge_step > 30:
558
  start_merge_step = 30
559
  print(f"start_merge_step:{start_merge_step}")
560
- generator = torch.Generator(device="cuda").manual_seed(seed_)
 
561
  sa32, sa64 = sa32_, sa64_
562
  id_length = id_length_
563
  clipped_prompts = prompts[:]
@@ -566,7 +569,8 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
566
  print(prompts)
567
  id_prompts = prompts[:id_length]
568
  real_prompts = prompts[id_length:]
569
- torch.cuda.empty_cache()
 
570
  write = True
571
  cur_step = 0
572
 
@@ -804,4 +808,5 @@ with gr.Blocks(css=css) as demo:
804
 
805
  # demo.load(None, None, None, _js=load_js)
806
 
807
- demo.launch(server_name="0.0.0.0", share = True if use_va else False)
 
 
1
  from email.policy import default
2
  from json import encoder
3
  import gradio as gr
4
+ #import spaces
5
  import numpy as np
6
  import torch
7
  import requests
 
311
  hidden_size = unet.config.block_out_channels[block_id]
312
  if cross_attention_dim is None:
313
  if name.startswith("up_blocks") :
314
+ device = devictorch.get(torch)
315
+ attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length, device=device)
316
  total_count +=1
317
  else:
318
  attn_procs[name] = AttnProcessor()
 
419
  id_length = 4
420
  total_length = 5
421
  cur_model_type = ""
422
+ #device="cuda"
423
+ device = devicetorch.get(torch)
424
  global attn_procs,unet
425
  attn_procs = {}
426
  ###
 
443
  ###
444
  pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
445
  models_dict["Unstable"], torch_dtype=torch.float16, use_safetensors=use_safetensors)
446
+ pipe2 = pipe2.to(device)
447
  pipe2.load_photomaker_adapter(
448
  os.path.dirname(photomaker_path),
449
  subfolder="",
450
  weight_name=os.path.basename(photomaker_path),
451
  trigger_word="img" # define the trigger word
452
  )
453
+ pipe2 = pipe2.to(device)
454
  pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
455
  pipe2.fuse_lora()
456
 
457
  pipe4 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
458
  models_dict["RealVision"], torch_dtype=torch.float16, use_safetensors=True)
459
+ pipe4 = pipe4.to(device)
460
  pipe4.load_photomaker_adapter(
461
  os.path.dirname(photomaker_path),
462
  subfolder="",
463
  weight_name=os.path.basename(photomaker_path),
464
  trigger_word="img" # define the trigger word
465
  )
466
+ pipe4 = pipe4.to(device)
467
  pipe4.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
468
  pipe4.fuse_lora()
469
 
 
504
 
505
 
506
  ######### Image Generation ##############
507
+ #@spaces.GPU(duration=120)
508
  def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
509
  _model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
510
  if _model_type == "Photomaker" and "img" not in general_prompt:
 
559
  if start_merge_step > 30:
560
  start_merge_step = 30
561
  print(f"start_merge_step:{start_merge_step}")
562
+ device = devictorch.get(torch)
563
+ generator = torch.Generator(device=device).manual_seed(seed_)
564
  sa32, sa64 = sa32_, sa64_
565
  id_length = id_length_
566
  clipped_prompts = prompts[:]
 
569
  print(prompts)
570
  id_prompts = prompts[:id_length]
571
  real_prompts = prompts[id_length:]
572
+ devicetorch.empty_cache(torch)
573
+ #torch.cuda.empty_cache()
574
  write = True
575
  cur_step = 0
576
 
 
808
 
809
  # demo.load(None, None, None, _js=load_js)
810
 
811
+ #demo.launch(server_name="0.0.0.0", share = True if use_va else False)
812
+ demo.launch()
requirements.txt CHANGED
@@ -1,14 +1,14 @@
1
- xformers==0.0.20
2
- torch==2.0.1
3
- torchvision==0.15.2
4
  diffusers==0.25.0
5
  transformers==4.36.2
6
  huggingface-hub==0.20.2
7
- spaces==0.19.4
8
  numpy
9
  accelerate
10
  safetensors
11
  omegaconf
12
  peft
13
- spaces==0.19.4
14
- Pillow==9.5.0
 
1
+ #xformers==0.0.20
2
+ #torch==2.0.1
3
+ #torchvision==0.15.2
4
  diffusers==0.25.0
5
  transformers==4.36.2
6
  huggingface-hub==0.20.2
7
+ #spaces==0.19.4
8
  numpy
9
  accelerate
10
  safetensors
11
  omegaconf
12
  peft
13
+ #spaces==0.19.4
14
+ Pillow==9.5.0