Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import torch
|
|
5 |
from PIL import Image
|
6 |
import os
|
7 |
|
8 |
-
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
9 |
from kolors.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter import StableDiffusionXLPipeline
|
10 |
from kolors.models.modeling_chatglm import ChatGLMModel
|
11 |
from kolors.models.tokenization_chatglm import ChatGLMTokenizer
|
@@ -53,6 +53,8 @@ if hasattr(pipe.unet, 'encoder_hid_proj'):
|
|
53 |
|
54 |
pipe.load_ip_adapter(f'{root_dir}/weights/Kolors-IP-Adapter-Plus', subfolder="", weight_name=["ip_adapter_plus_general.bin"])
|
55 |
|
|
|
|
|
56 |
MAX_SEED = np.iinfo(np.int32).max
|
57 |
MAX_IMAGE_SIZE = 1024
|
58 |
|
@@ -61,6 +63,9 @@ def infer(prompt, ip_adapter_image, ip_adapter_scale=0.5, negative_prompt="", se
|
|
61 |
if randomize_seed:
|
62 |
seed = random.randint(0, MAX_SEED)
|
63 |
|
|
|
|
|
|
|
64 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
65 |
pipe.to("cuda")
|
66 |
image_encoder.to("cuda")
|
@@ -68,7 +73,7 @@ def infer(prompt, ip_adapter_image, ip_adapter_scale=0.5, negative_prompt="", se
|
|
68 |
pipe.set_ip_adapter_scale([ip_adapter_scale])
|
69 |
|
70 |
image = pipe(
|
71 |
-
prompt=
|
72 |
ip_adapter_image=[ip_adapter_image],
|
73 |
negative_prompt=negative_prompt,
|
74 |
height=height,
|
@@ -82,9 +87,9 @@ def infer(prompt, ip_adapter_image, ip_adapter_scale=0.5, negative_prompt="", se
|
|
82 |
return image, seed
|
83 |
|
84 |
examples = [
|
85 |
-
["
|
86 |
-
["
|
87 |
-
["
|
88 |
["", "meow.jpeg", 1.0],
|
89 |
]
|
90 |
|
|
|
5 |
from PIL import Image
|
6 |
import os
|
7 |
|
8 |
+
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor, pipeline
|
9 |
from kolors.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter import StableDiffusionXLPipeline
|
10 |
from kolors.models.modeling_chatglm import ChatGLMModel
|
11 |
from kolors.models.tokenization_chatglm import ChatGLMTokenizer
|
|
|
53 |
|
54 |
pipe.load_ip_adapter(f'{root_dir}/weights/Kolors-IP-Adapter-Plus', subfolder="", weight_name=["ip_adapter_plus_general.bin"])
|
55 |
|
56 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
57 |
+
|
58 |
MAX_SEED = np.iinfo(np.int32).max
|
59 |
MAX_IMAGE_SIZE = 1024
|
60 |
|
|
|
63 |
if randomize_seed:
|
64 |
seed = random.randint(0, MAX_SEED)
|
65 |
|
66 |
+
# Translate prompt if it's in Korean
|
67 |
+
translated_prompt = translator(prompt, src_lang="ko", tgt_lang="en")[0]['translation_text']
|
68 |
+
|
69 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
70 |
pipe.to("cuda")
|
71 |
image_encoder.to("cuda")
|
|
|
73 |
pipe.set_ip_adapter_scale([ip_adapter_scale])
|
74 |
|
75 |
image = pipe(
|
76 |
+
prompt=translated_prompt,
|
77 |
ip_adapter_image=[ip_adapter_image],
|
78 |
negative_prompt=negative_prompt,
|
79 |
height=height,
|
|
|
87 |
return image, seed
|
88 |
|
89 |
examples = [
|
90 |
+
["A dog", "minta.jpeg", 0.4],
|
91 |
+
["A capybara", "king-min.png", 0.5],
|
92 |
+
["A cat", "blue_hair.png", 0.5],
|
93 |
["", "meow.jpeg", 1.0],
|
94 |
]
|
95 |
|