mnh
commited on
Commit
·
94a8d69
1
Parent(s):
1764399
hello
Browse files- app.py +4 -0
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import clip
|
|
|
4 |
|
5 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
6 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
@@ -10,6 +11,9 @@ def predict(image):
|
|
10 |
labels = "Japanese, Chinese, Roman, Greek, Etruscan, Scandinavian, Celtic, Medieval, Victorian, Neoclassic, Romanticism, Art Nouveau, Art deco"
|
11 |
labels = labels.split(',')
|
12 |
|
|
|
|
|
|
|
13 |
image = preprocess(image).unsqueeze(0).to(device)
|
14 |
text = clip.tokenize([f"a character of origin {c}" for c in labels]).to(device)
|
15 |
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import clip
|
4 |
+
from PIL import Image, ImageEnhance
|
5 |
|
6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
|
|
11 |
labels = "Japanese, Chinese, Roman, Greek, Etruscan, Scandinavian, Celtic, Medieval, Victorian, Neoclassic, Romanticism, Art Nouveau, Art deco"
|
12 |
labels = labels.split(',')
|
13 |
|
14 |
+
converter = ImageEnhance.Color(image)
|
15 |
+
image = converter.enhance(0.5)
|
16 |
+
image = image.convert("L")
|
17 |
image = preprocess(image).unsqueeze(0).to(device)
|
18 |
text = clip.tokenize([f"a character of origin {c}" for c in labels]).to(device)
|
19 |
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
git+https://github.com/openai/CLIP
|
2 |
torch
|
3 |
-
Jinja2
|
|
|
|
1 |
git+https://github.com/openai/CLIP
|
2 |
torch
|
3 |
+
Jinja2
|
4 |
+
Pillow
|