Show-o / gradio /app_w_clip.py
JosephPai
init
8741abe
raw
history blame
27 kB
import os
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import numpy as np
import gradio as gr
import torch
from PIL import Image
from omegaconf import OmegaConf
from transformers import AutoTokenizer
import torch.nn.functional as F
from transformers import CLIPImageProcessor
import sys
sys.path.insert(0, ".")
from training import conversation as conversation_lib
from prompting_utils import UniversalPrompting, create_attention_mask_predict_next, create_attention_mask_for_mmu_vit
from training_utils import image_transform
from models import Showo, MAGVITv2, get_mask_chedule, CLIPVisionTower
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conversation_lib.default_conversation = conversation_lib.conv_templates["phi1.5"]
SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. " \
"The assistant gives helpful, detailed, and polite answers to the user's questions."
SYSTEM_PROMPT_LEN = 28
def load_discrete_checkpoint():
config = OmegaConf.load("configs/showo_demo.yaml")
tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left")
uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length,
special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>",
"<|t2v|>", "<|v2v|>", "<|lvg|>"),
ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob)
vq_model = MAGVITv2()
vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device)
vq_model.requires_grad_(False)
vq_model.eval()
model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device)
model.eval()
mask_token_id = model.config.mask_token_id
return config, uni_prompting, tokenizer, vq_model, model, mask_token_id
config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen, mask_token_id = load_discrete_checkpoint()
def load_continuous_checkpoint():
config = OmegaConf.load("configs/showo_demo_w_clip_vit.yaml")
tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left")
uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length,
special_tokens=(
"<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>",
"<|v2v|>", "<|lvg|>"),
ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob)
vision_tower_name = "openai/clip-vit-large-patch14-336"
vision_tower = CLIPVisionTower(vision_tower_name).to(device)
clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower_name)
model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device)
model.eval()
return config, uni_prompting, tokenizer, model, vision_tower, clip_image_processor
config_mmu = uni_prompting_mmu = tokenizer_mmu = model_mmu = vision_tower = clip_image_processor = None
def text_to_image_generation(input_text, guidance_scale, generation_timesteps):
config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen
prompts = [input_text]
config.training.batch_size = config.batch_size = 1
config.training.guidance_scale = config.guidance_scale = guidance_scale
config.training.generation_timesteps = config.generation_timesteps = generation_timesteps
image_tokens = torch.ones((len(prompts), config.model.showo.num_vq_tokens),
dtype=torch.long, device=device) * mask_token_id
input_ids, _ = uni_prompting((prompts, image_tokens), 't2i_gen')
if config.training.guidance_scale > 0:
uncond_input_ids, _ = uni_prompting(([''] * len(prompts), image_tokens), 't2i_gen')
attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0),
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
else:
attention_mask = create_attention_mask_predict_next(input_ids,
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
uncond_input_ids = None
if config.get("mask_schedule", None) is not None:
schedule = config.mask_schedule.schedule
args = config.mask_schedule.get("params", {})
mask_schedule = get_mask_chedule(schedule, **args)
else:
mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine"))
with torch.no_grad():
gen_token_ids = model.t2i_generate(
input_ids=input_ids,
uncond_input_ids=uncond_input_ids,
attention_mask=attention_mask,
guidance_scale=config.training.guidance_scale,
temperature=config.training.get("generation_temperature", 1.0),
timesteps=config.training.generation_timesteps,
noise_schedule=mask_schedule,
noise_type=config.training.get("noise_type", "mask"),
seq_len=config.model.showo.num_vq_tokens,
uni_prompting=uni_prompting,
config=config,
)
gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0)
images = vq_model.decode_code(gen_token_ids)
images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0)
images *= 255.0
images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
return images[0]
def text_guided_inpainting(input_text, inpainting_image, inpainting_mask, guidance_scale, generation_timesteps):
config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen
prompt = [input_text]
config.training.batch_size = config.batch_size = 1
config.training.guidance_scale = config.guidance_scale = guidance_scale
config.training.generation_timesteps = config.generation_timesteps = generation_timesteps
inpainting_image = image_transform(inpainting_image, resolution=config.dataset.params.resolution).to(device)
inpainting_mask = image_transform(inpainting_mask, resolution=config.dataset.params.resolution, normalize=False)
inpainting_image = inpainting_image.unsqueeze(0).repeat(config.training.batch_size, 1, 1, 1)
inpainting_mask = inpainting_mask.unsqueeze(0).to(device)
inpainting_mask = F.interpolate(inpainting_mask, size=config.dataset.params.resolution // 16, mode='bicubic')
inpainting_mask = inpainting_mask.repeat(config.training.batch_size, 1, 1, 1)
inpainting_mask[inpainting_mask < 0.5] = 0
inpainting_mask[inpainting_mask >= 0.5] = 1
inpainting_mask = inpainting_mask.reshape(config.training.batch_size, -1)
inpainting_mask = inpainting_mask.to(torch.bool)
inpainting_image_tokens = vq_model.get_code(inpainting_image) + len(uni_prompting.text_tokenizer)
inpainting_image_tokens[inpainting_mask] = mask_token_id
input_ids, _ = uni_prompting((prompt, inpainting_image_tokens), 't2i_gen')
if config.training.guidance_scale > 0:
uncond_input_ids, _ = uni_prompting(([''] * len(prompt), inpainting_image_tokens), 't2i_gen')
attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0),
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
else:
attention_mask = create_attention_mask_predict_next(input_ids,
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
uncond_input_ids = None
if config.get("mask_schedule", None) is not None:
schedule = config.mask_schedule.schedule
args = config.mask_schedule.get("params", {})
mask_schedule = get_mask_chedule(schedule, **args)
else:
mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine"))
with torch.no_grad():
gen_token_ids = model.t2i_generate(
input_ids=input_ids,
uncond_input_ids=uncond_input_ids,
attention_mask=attention_mask,
guidance_scale=config.training.guidance_scale,
temperature=config.training.get("generation_temperature", 1.0),
timesteps=config.training.generation_timesteps,
noise_schedule=mask_schedule,
noise_type=config.training.get("noise_type", "mask"),
seq_len=config.model.showo.num_vq_tokens,
uni_prompting=uni_prompting,
config=config,
)
gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0)
images = vq_model.decode_code(gen_token_ids)
images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0)
images *= 255.0
images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
return images[0]
def text_guided_extrapolation(input_img, input_text, left_ext, right_ext, guidance_scale, generation_timesteps):
config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen
config.offset = 0
config.training.batch_size = config.batch_size = 1
config.training.guidance_scale = config.guidance_scale = guidance_scale
config.training.generation_timesteps = config.generation_timesteps = generation_timesteps
extra_direction = ['right'] * int(right_ext) + ['left'] * int(left_ext)
prompt = [input_text] * len(extra_direction)
W = config.dataset.params.resolution // 16
for id, (prt, direction) in enumerate(zip(prompt, extra_direction)):
prt = [prt] * config.training.batch_size
if id == 0:
# extrapolation_image = Image.open(config.image_path).convert("RGB")
extrapolation_image = input_img
extrapolation_image = image_transform(extrapolation_image,
resolution=config.dataset.params.resolution).to(device)
B, _, _ = extrapolation_image.shape
extrapolation_image = extrapolation_image.unsqueeze(0)
extrapolation_image_tokens = vq_model.get_code(extrapolation_image) + len(uni_prompting.text_tokenizer)
extrapolation_image_tokens = extrapolation_image_tokens.reshape(1,
config.dataset.params.resolution // 16,
config.dataset.params.resolution // 16)
extrapolation_image_tokens = extrapolation_image_tokens.repeat(config.training.batch_size, 1, 1)
else:
extrapolation_image_tokens = gen_token_ids + len(uni_prompting.text_tokenizer)
image_left_part = extrapolation_image_tokens[:, :, :-(W // 2 - config.offset)] - len(
uni_prompting.text_tokenizer)
image_right_part = extrapolation_image_tokens[:, :, W // 2 - config.offset:] - len(uni_prompting.text_tokenizer)
image_up_part = extrapolation_image_tokens[:, :-(W // 2 - config.offset), :] - len(uni_prompting.text_tokenizer)
image_down_part = extrapolation_image_tokens[:, W // 2 - config.offset:, :] - len(uni_prompting.text_tokenizer)
if direction in ['left', 'right']:
extrapolation_mask = torch.zeros((config.training.batch_size,
config.dataset.params.resolution // 16,
config.dataset.params.resolution // 16 // 2 + config.offset),
dtype=torch.int64, device=device) + mask_token_id
else:
extrapolation_mask = torch.zeros((config.training.batch_size,
config.dataset.params.resolution // 16 // 2 + config.offset,
config.dataset.params.resolution // 16),
dtype=torch.int64, device=device) + mask_token_id
if direction == 'left':
extrapolation_image_tokens = torch.cat(
[extrapolation_mask, extrapolation_image_tokens[:, :, :W // 2 - config.offset]], dim=-1)
elif direction == 'right':
extrapolation_image_tokens = torch.cat(
[extrapolation_image_tokens[:, :, -(W // 2 - config.offset):], extrapolation_mask], dim=-1)
elif direction == 'up':
extrapolation_image_tokens = torch.cat(
[extrapolation_mask, extrapolation_image_tokens[:, :W // 2 - config.offset, :]], dim=-2)
else:
extrapolation_image_tokens = torch.cat(
[extrapolation_image_tokens[:, -(W // 2 - config.offset):, :], extrapolation_mask], dim=-2)
extrapolation_image_tokens = extrapolation_image_tokens.reshape(config.training.batch_size, -1)
input_ids, _ = uni_prompting((prt, extrapolation_image_tokens), 't2i_gen')
if config.training.guidance_scale > 0:
uncond_input_ids, _ = uni_prompting(([''] * len(prt), extrapolation_image_tokens), 't2i_gen')
attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0),
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
else:
attention_mask = create_attention_mask_predict_next(input_ids,
pad_id=int(uni_prompting.sptids_dict['<|pad|>']),
soi_id=int(uni_prompting.sptids_dict['<|soi|>']),
eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']),
rm_pad_in_image=True)
uncond_input_ids = None
if config.get("mask_schedule", None) is not None:
schedule = config.mask_schedule.schedule
args = config.mask_schedule.get("params", {})
mask_schedule = get_mask_chedule(schedule, **args)
else:
mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine"))
with torch.no_grad():
gen_token_ids = model.t2i_generate(
input_ids=input_ids,
uncond_input_ids=uncond_input_ids,
attention_mask=attention_mask,
guidance_scale=config.training.guidance_scale,
temperature=config.training.get("generation_temperature", 1.0),
timesteps=config.training.generation_timesteps,
noise_schedule=mask_schedule,
noise_type=config.training.get("noise_type", "mask"),
seq_len=config.model.showo.num_vq_tokens,
uni_prompting=uni_prompting,
config=config,
)
gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0)
gen_token_ids = gen_token_ids.reshape(config.training.batch_size,
config.dataset.params.resolution // 16,
config.dataset.params.resolution // 16)
if direction == 'left':
gen_token_ids = torch.cat([gen_token_ids, image_right_part], dim=-1)
elif direction == 'right':
gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-1)
elif direction == 'up':
gen_token_ids = torch.cat([gen_token_ids, image_down_part], dim=-2)
else:
gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-2)
_, h, w = gen_token_ids.shape
gen_token_ids = gen_token_ids.reshape(config.training.batch_size, -1)
images = vq_model.decode_code(gen_token_ids, shape=(h, w))
images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0)
images *= 255.0
images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
return images[0]
def multimodal_understanding(input_img, input_text, chat_history):
global config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu, vision_tower, clip_image_processor
if model_mmu is None:
config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu, vision_tower, clip_image_processor = load_continuous_checkpoint()
config, uni_prompting, tokenizer, model = config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu
image_ori = input_img
pixel_values = clip_image_processor.preprocess(image_ori, return_tensors="pt")["pixel_values"][0]
batch_size = 1
question = input_text
top_k = 1 # retain only the top_k most likely tokens, clamp others to have 0 probability
conv = conversation_lib.default_conversation.copy()
conv.append_message(conv.roles[0], question)
conv.append_message(conv.roles[1], None)
prompt_question = conv.get_prompt()
question_input = []
question_input.append(prompt_question.strip())
input_ids_system = [uni_prompting.text_tokenizer(SYSTEM_PROMPT, return_tensors="pt", padding="longest").input_ids
for _ in range(batch_size)]
input_ids_system = torch.stack(input_ids_system, dim=0)
assert input_ids_system.shape[-1] == 28
input_ids_system = input_ids_system.to(device)
input_ids_system = input_ids_system[0]
input_ids = [uni_prompting.text_tokenizer(prompt, return_tensors="pt", padding="longest").input_ids
for prompt in question_input]
input_ids = torch.stack(input_ids)
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids, batch_first=True, padding_value=uni_prompting.text_tokenizer.pad_token_id
)
input_ids = torch.tensor(input_ids).to(device).squeeze(0)
input_ids_llava = torch.cat([
(torch.ones(input_ids.shape[0], 1) *uni_prompting.sptids_dict['<|mmu|>']).to(device),
input_ids_system,
(torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|soi|>']).to(device),
# place your img embedding here
(torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device),
input_ids,
], dim=1).long()
images_embeddings = vision_tower(pixel_values[None])
images_embeddings = model.mm_projector(images_embeddings)
text_embeddings = model.showo.model.embed_tokens(input_ids_llava)
# Full input seq
part1 = text_embeddings[:, :2 + SYSTEM_PROMPT_LEN, :]
part2 = text_embeddings[:, 2 + SYSTEM_PROMPT_LEN:, :]
input_embeddings = torch.cat((part1, images_embeddings, part2), dim=1)
attention_mask_llava = create_attention_mask_for_mmu_vit(input_embeddings,
system_prompt_len=SYSTEM_PROMPT_LEN)
cont_toks_list = model.mmu_generate(input_embeddings=input_embeddings,
attention_mask=attention_mask_llava[0].unsqueeze(0),
max_new_tokens=100,
top_k=top_k,
# eot_token=uni_prompting.sptids_dict['<|eot|>']
eot_token=tokenizer.eos_token_id
)
cont_toks_list = torch.stack(cont_toks_list).squeeze()[None]
output_text = uni_prompting.text_tokenizer.batch_decode(cont_toks_list, skip_special_tokens=True)
output_text = output_text[0].strip()
chat_history.append((input_text, output_text))
return "", chat_history
with gr.Blocks() as demo:
gr.HTML("""
<h1 class="display-2 fw-bold title">
<a style="color: #70a8dc;">S</a><a style="color: #6fb051;">h</a><a style="color: #e06766;">o</a><a style="color: #f7b26b;">w</a>-o
</h1>
<p>This is the official Gradio demo for the Show-o model, a unified model that can do multimodal understanding and generation.</p>
<strong>Paper:</strong> <a href="https://arxiv.org/abs/2408.12528" target="_blank">Show-o: One Single Transformer To Unify Multimodal Understanding and Generation </a>
<br/>
<strong>Project Website:</strong> <a href="https://showlab.github.io/Show-o/" target="_blank">Show-o Website</a>
<br/>
<strong>Code and Models:</strong> <a href="https://github.com/showlab/Show-o" target="_blank">GitHub</a>
<br/>
<br/>
""")
with gr.Row():
with gr.Column():
text_prompt_t2i = gr.Textbox(
label="Text prompt",
lines=2,
placeholder="Input the text prompt here for image generation."
)
guidance_scale_t2i = gr.Slider(
label="guidance scale",
minimum=0,
maximum=5,
step=0.05,
value=1.75
)
generation_timesteps_t2i = gr.Slider(
label="timesteps",
minimum=1,
maximum=30,
step=1,
value=18
)
generated_img_t2i = gr.Image(
label="Output image"
)
submit_btn_t2i = gr.Button("Generate: Text-to-image")
submit_btn_t2i.click(text_to_image_generation,
[text_prompt_t2i, guidance_scale_t2i, generation_timesteps_t2i],
[generated_img_t2i])
with gr.Row():
inpainting_input_img = gr.Image(
label="Input image",
type="pil",
)
inpainting_input_mask = gr.Image(
label="Inpainting mask",
image_mode="L",
type="pil",
)
with gr.Column():
text_prompt_inpainting = gr.Textbox(
label="Text prompt",
lines=2,
placeholder="Input the text prompt here for image inpainting."
)
guidance_scale_inpainting = gr.Slider(
label="guidance scale",
minimum=0,
maximum=5,
step=0.05,
value=1.75
)
generation_timesteps_inpainting = gr.Slider(
label="timesteps",
minimum=1,
maximum=30,
step=1,
value=16
)
generated_img_inpainting = gr.Image(
label="Output image"
)
submit_btn_inpainting = gr.Button("Generate: Text-guided Inpainting")
submit_btn_inpainting.click(text_guided_inpainting,
[text_prompt_inpainting, inpainting_input_img, inpainting_input_mask,
guidance_scale_inpainting, generation_timesteps_inpainting],
[generated_img_inpainting])
with gr.Row():
extra_input_img = gr.Image(
label="Input image",
type="pil",
image_mode="RGB",
)
with gr.Column():
text_prompt_extrapolation = gr.Textbox(
label="Text prompt",
lines=1,
placeholder="Input the text prompt here for image extrapolation."
)
guidance_scale_extrapolation = gr.Slider(
label="guidance scale",
minimum=0,
maximum=5,
step=0.05,
value=1.75
)
generation_timesteps_extrapolation = gr.Slider(
label="timesteps",
minimum=1,
maximum=30,
step=1,
value=16
)
left_extrapolation = gr.Slider(
label="left extrapolation",
minimum=0,
maximum=5,
step=1,
value=1
)
right_extrapolation = gr.Slider(
label="right extrapolation",
minimum=0,
maximum=5,
step=1,
value=1
)
generated_img_extrapolation = gr.Image(
label="Output image"
)
submit_btn_inpainting = gr.Button("Generate: Text-guided Extrapolation")
submit_btn_inpainting.click(text_guided_extrapolation,
[extra_input_img, text_prompt_extrapolation, left_extrapolation, right_extrapolation,
guidance_scale_extrapolation, generation_timesteps_extrapolation],
[generated_img_extrapolation])
with gr.Row():
with gr.Row():
chat_input_img = gr.Image(
label="Input image",
type="pil",
image_mode="RGB",
)
with gr.Column():
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Press Enter to send a message for chat")
clear = gr.ClearButton([msg, chatbot])
msg.submit(multimodal_understanding, [chat_input_img, msg, chatbot], [msg, chatbot])
demo.launch()