Anonymous commited on
Commit
2cc3d41
1 Parent(s): b8f1e2b

clean code

Browse files
pipeline_freescale.py CHANGED
@@ -33,6 +33,7 @@ from inspect import isfunction
33
  from functools import partial
34
  import numpy as np
35
 
 
36
  from diffusers.models.attention import BasicTransformerBlock
37
  from scale_attention import ori_forward, scale_forward
38
 
 
33
  from functools import partial
34
  import numpy as np
35
 
36
+ import torch.nn.functional as F
37
  from diffusers.models.attention import BasicTransformerBlock
38
  from scale_attention import ori_forward, scale_forward
39
 
pipeline_freescale_turbo.py CHANGED
@@ -33,6 +33,7 @@ from inspect import isfunction
33
  from functools import partial
34
  import numpy as np
35
 
 
36
  from diffusers.models.attention import BasicTransformerBlock
37
  from scale_attention_turbo import ori_forward, scale_forward
38
 
@@ -815,7 +816,7 @@ class StableDiffusionXLPipeline_Turbo(DiffusionPipeline, FromSingleFileMixin, Lo
815
  height, width = resolutions_list[0]
816
  target_sizes = resolutions_list[1:]
817
  if not restart_steps:
818
- restart_steps = [1] * len(target_sizes)
819
  else:
820
  height = height or self.default_sample_size * self.vae_scale_factor
821
  width = width or self.default_sample_size * self.vae_scale_factor
 
33
  from functools import partial
34
  import numpy as np
35
 
36
+ import torch.nn.functional as F
37
  from diffusers.models.attention import BasicTransformerBlock
38
  from scale_attention_turbo import ori_forward, scale_forward
39
 
 
816
  height, width = resolutions_list[0]
817
  target_sizes = resolutions_list[1:]
818
  if not restart_steps:
819
+ restart_steps = [int(num_inference_steps*0.5)] * len(target_sizes)
820
  else:
821
  height = height or self.default_sample_size * self.vae_scale_factor
822
  width = width or self.default_sample_size * self.vae_scale_factor
scale_attention.py CHANGED
@@ -2,8 +2,7 @@ from typing import Any, Dict, Optional
2
 
3
  import torch
4
  import torch.nn.functional as F
5
- from torch import nn
6
- from einops import rearrange, repeat
7
  import random
8
 
9
  def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
 
2
 
3
  import torch
4
  import torch.nn.functional as F
5
+ from einops import rearrange
 
6
  import random
7
 
8
  def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
scale_attention_turbo.py CHANGED
@@ -2,8 +2,7 @@ from typing import Any, Dict, Optional
2
 
3
  import torch
4
  import torch.nn.functional as F
5
- from torch import nn
6
- from einops import rearrange, repeat
7
  import random
8
 
9
  def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
 
2
 
3
  import torch
4
  import torch.nn.functional as F
5
+ from einops import rearrange
 
6
  import random
7
 
8
  def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):