Spaces:
Runtime error
Runtime error
File size: 3,963 Bytes
3aaaaf0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
# import spaces
import random
import gradio as gr
import numpy as np
import torch
from PIL import Image
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
### PeRFlow-T2I
from diffusers import StableDiffusionXLPipeline
pipe = StableDiffusionXLPipeline.from_pretrained("hansyan/perflow-sdxl-dreamshaper", torch_dtype=torch.float16, use_safetensors=True, variant="v0-fix")
from src.scheduler_perflow import PeRFlowScheduler
pipe.scheduler = PeRFlowScheduler.from_config(pipe.scheduler.config, prediction_type="ddim_eps", num_time_windows=4)
pipe.to("cuda:0", torch.float16)
# pipe_t2i = None
### gradio
# @spaces.GPU
def generate(text, num_inference_steps, cfg_scale, seed):
setup_seed(int(seed))
num_inference_steps = int(num_inference_steps)
cfg_scale = float(cfg_scale)
prompt_prefix = "photorealistic, uhd, high resolution, high quality, highly detailed; "
neg_prompt = "distorted, blur, low-quality, haze, out of focus"
text = prompt_prefix + text
samples = pipe(
prompt = [text],
negative_prompt = [neg_prompt],
height = 1024,
width = 1024,
num_inference_steps = num_inference_steps,
guidance_scale = cfg_scale,
output_type = 'pt',
).images
samples = samples.squeeze(0).permute(1, 2, 0).cpu().numpy()*255.
samples = samples.astype(np.uint8)
samples = Image.fromarray(samples[:, :, :3])
return samples
# layout
css = """
h1 {
text-align: center;
display:block;
}
h2 {
text-align: center;
display:block;
}
h3 {
text-align: center;
display:block;
}
.gradio-container {
max-width: 768px !important;
}
"""
with gr.Blocks(title="PeRFlow-SDXL", css=css) as interface:
gr.Markdown(
"""
# PeRFlow-SDXL
GitHub: [https://github.com/magic-research/piecewise-rectified-flow](https://github.com/magic-research/piecewise-rectified-flow) <br/>
Models: [https://huggingface.co/hansyan/perflow-sdxl-dreamshaper](https://huggingface.co/hansyan/perflow-sdxl-dreamshaper)
<br/>
"""
)
with gr.Column():
text = gr.Textbox(
label="Input Prompt",
value="masterpiece, A closeup face photo of girl, wearing a rain coat, in the street, heavy rain, bokeh"
)
with gr.Row():
num_inference_steps = gr.Dropdown(label='Num Inference Steps',choices=[4,5,6,7,8], value=6, interactive=True)
cfg_scale = gr.Dropdown(label='CFG scale',choices=[1.5, 2.0, 2.5], value=2.0, interactive=True)
seed = gr.Textbox(label="Random Seed", value=42)
submit = gr.Button(scale=1, variant='primary')
# with gr.Column():
# with gr.Row():
output_image = gr.Image(label='Generated Image')
gr.Markdown(
"""
Here are some examples provided:
- “masterpiece, A closeup face photo of girl, wearing a rain coat, in the street, heavy rain, bokeh”
- “RAW photo, a handsome man, wearing a black coat, outside, closeup face”
- “RAW photo, a red luxury car, studio light”
- “masterpiece, A beautiful cat bask in the sun”
"""
)
# activate
text.submit(
fn=generate,
inputs=[text, num_inference_steps, cfg_scale, seed],
outputs=[output_image],
)
seed.submit(
fn=generate,
inputs=[text, num_inference_steps, cfg_scale, seed],
outputs=[output_image],
)
submit.click(fn=generate,
inputs=[text, num_inference_steps, cfg_scale, seed],
outputs=[output_image],
)
if __name__ == '__main__':
interface.queue(max_size=10)
# interface.launch()
interface.launch(share=True)
|