JimmyLee05 commited on
Commit
d7593eb
·
1 Parent(s): 5aa30f0
Files changed (2) hide show
  1. README.md +34 -7
  2. app.py +12 -17
README.md CHANGED
@@ -1,11 +1,38 @@
1
-
2
  ---
3
- title: animeganv2
4
- emoji: 🔥
5
- colorFrom: indigo
6
- colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 3.12.0
9
- app_file: run.py
10
  pinned: false
11
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: AnimeGANv2
3
+ emoji:
4
+ colorFrom: yellow
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.1.3
8
+ app_file: app.py
9
  pinned: false
10
  ---
11
+
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Display title for the Space
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
app.py CHANGED
@@ -1,38 +1,33 @@
1
- import gradio as gr
2
  from PIL import Image
3
  import torch
 
 
 
4
 
5
  model2 = torch.hub.load(
6
  "AK391/animegan2-pytorch:main",
7
  "generator",
8
  pretrained=True,
 
9
  progress=False
10
  )
11
- model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")
 
 
12
  face2paint = torch.hub.load(
13
  'AK391/animegan2-pytorch:main', 'face2paint',
14
- size=512,side_by_side=False
15
  )
16
-
17
  def inference(img, ver):
18
  if ver == 'version 2 (🔺 robustness,🔻 stylization)':
19
  out = face2paint(model2, img)
20
  else:
21
  out = face2paint(model1, img)
22
  return out
23
-
24
  title = "AnimeGANv2"
25
  description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
26
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
27
- examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
28
-
29
- demo = gr.Interface(
30
- fn=inference,
31
- inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
32
- outputs=gr.outputs.Image(type="pil"),
33
- title=title,
34
- description=description,
35
- article=article,
36
- examples=examples)
37
-
38
- demo.launch()
 
 
1
  from PIL import Image
2
  import torch
3
+ import gradio as gr
4
+
5
+
6
 
7
  model2 = torch.hub.load(
8
  "AK391/animegan2-pytorch:main",
9
  "generator",
10
  pretrained=True,
11
+ device="cpu",
12
  progress=False
13
  )
14
+
15
+
16
+ model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cpu")
17
  face2paint = torch.hub.load(
18
  'AK391/animegan2-pytorch:main', 'face2paint',
19
+ size=512, device="cpu",side_by_side=False
20
  )
 
21
  def inference(img, ver):
22
  if ver == 'version 2 (🔺 robustness,🔻 stylization)':
23
  out = face2paint(model2, img)
24
  else:
25
  out = face2paint(model1, img)
26
  return out
27
+
28
  title = "AnimeGANv2"
29
  description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
30
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
31
+ examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.png','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
32
+ gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')
33
+ ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,allow_flagging=False,allow_screenshot=False).launch()