halimbahae commited on
Commit
ecc3c41
·
verified ·
1 Parent(s): 1134d2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -33
app.py CHANGED
@@ -3,8 +3,6 @@ import numpy as np
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
- from PIL import Image, ImageDraw, ImageFont
7
-
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
@@ -27,39 +25,20 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
27
 
28
  if randomize_seed:
29
  seed = random.randint(0, MAX_SEED)
30
-
31
  generator = torch.Generator().manual_seed(seed)
32
-
33
  image = pipe(
34
- prompt=style_prompt,
35
- negative_prompt=negative_prompt,
36
- guidance_scale=guidance_scale,
37
- num_inference_steps=num_inference_steps,
38
- width=width,
39
- height=height,
40
- generator=generator
41
- ).images[0]
42
-
43
- # Ensure image is in uint8 format
44
- image = (255 * np.clip(image, 0, 1)).astype(np.uint8)
45
-
46
- # Convert the image to PIL format for overlaying the watermark
47
- pil_image = Image.fromarray(image)
48
-
49
- # Add watermark
50
- watermark_text = "Bibou.jpeg"
51
- font = ImageFont.truetype("arial.ttf", size=30) # Adjust font and size as needed
52
- draw = ImageDraw.Draw(pil_image)
53
- text_width, text_height = draw.textsize(watermark_text, font=font)
54
- margin = 10
55
- opacity = 0.6
56
- draw.text((pil_image.width - text_width - margin, pil_image.height - text_height - margin), watermark_text, font=font, fill=(255, 255, 255, int(255 * opacity)))
57
-
58
- # Convert back to numpy array for Gradio display
59
- watermarked_image = np.array(pil_image)
60
-
61
- return watermarked_image
62
-
63
 
64
 
65
 
 
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
 
 
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
 
25
 
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
28
+
29
  generator = torch.Generator().manual_seed(seed)
30
+
31
  image = pipe(
32
+ prompt = style_prompt,
33
+ negative_prompt = negative_prompt,
34
+ guidance_scale = guidance_scale,
35
+ num_inference_steps = num_inference_steps,
36
+ width = width,
37
+ height = height,
38
+ generator = generator
39
+ ).images[0]
40
+
41
+ return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
 
44