Spaces:
Runtime error
Runtime error
omerXfaruq
commited on
Commit
·
50cd9e5
1
Parent(s):
01fa2e1
tweaks
Browse files
app.py
CHANGED
@@ -11,7 +11,9 @@ model = AutoModel.from_pretrained(model_ckpt)
|
|
11 |
hidden_dim = model.config.hidden_size
|
12 |
dataset = load_dataset("BounharAbdelaziz/Face-Aging-Dataset")
|
13 |
|
14 |
-
|
|
|
|
|
15 |
|
16 |
with gr.Blocks() as demo:
|
17 |
gr.Markdown(
|
@@ -36,8 +38,8 @@ with gr.Blocks() as demo:
|
|
36 |
inputs = extractor(images=image, return_tensors="pt")
|
37 |
outputs = model(**inputs)
|
38 |
embed = outputs.last_hidden_state[0][0]
|
39 |
-
result = await index.query(vector=embed.tolist(), top_k=
|
40 |
-
return [dataset["train"][int(vector.id)]["image"] for vector in result]
|
41 |
|
42 |
gr.Examples(
|
43 |
examples=[
|
@@ -55,31 +57,46 @@ with gr.Blocks() as demo:
|
|
55 |
with gr.Row():
|
56 |
with gr.Column(scale=1):
|
57 |
adv_input_image = gr.Image(type="pil")
|
58 |
-
adv_image_count = gr.Slider(1,
|
59 |
adv_button = gr.Button("Submit")
|
60 |
|
61 |
with gr.Column(scale=2):
|
62 |
-
|
63 |
|
64 |
async def find_similar_faces(image, count):
|
|
|
|
|
65 |
inputs = extractor(images=image, return_tensors="pt")
|
66 |
outputs = model(**inputs)
|
67 |
embed = outputs.last_hidden_state[0][0]
|
68 |
result = await index.query(
|
69 |
-
vector=embed.tolist(), top_k=
|
70 |
)
|
71 |
-
return [dataset["train"][int(vector.id)]["image"] for vector in result]
|
72 |
|
73 |
adv_button.click(
|
74 |
fn=find_similar_faces,
|
75 |
inputs=[adv_input_image, adv_image_count],
|
76 |
-
outputs=[
|
77 |
)
|
78 |
-
adv_input_image.
|
79 |
fn=find_similar_faces,
|
80 |
inputs=[adv_input_image, adv_image_count],
|
81 |
-
outputs=[
|
82 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
if __name__ == "__main__":
|
85 |
demo.queue(default_concurrency_limit=40)
|
|
|
11 |
hidden_dim = model.config.hidden_size
|
12 |
dataset = load_dataset("BounharAbdelaziz/Face-Aging-Dataset")
|
13 |
|
14 |
+
TOP_K = 1000
|
15 |
+
BASE_COUNT=4
|
16 |
+
MAX_COUNT = 30
|
17 |
|
18 |
with gr.Blocks() as demo:
|
19 |
gr.Markdown(
|
|
|
38 |
inputs = extractor(images=image, return_tensors="pt")
|
39 |
outputs = model(**inputs)
|
40 |
embed = outputs.last_hidden_state[0][0]
|
41 |
+
result = await index.query(vector=embed.tolist(), top_k=TOP_K)
|
42 |
+
return [dataset["train"][int(vector.id)]["image"] for vector in result[:BASE_COUNT]]
|
43 |
|
44 |
gr.Examples(
|
45 |
examples=[
|
|
|
57 |
with gr.Row():
|
58 |
with gr.Column(scale=1):
|
59 |
adv_input_image = gr.Image(type="pil")
|
60 |
+
adv_image_count = gr.Slider(1, MAX_COUNT, 10, label="Image Count")
|
61 |
adv_button = gr.Button("Submit")
|
62 |
|
63 |
with gr.Column(scale=2):
|
64 |
+
adv_output_images = gr.Gallery()
|
65 |
|
66 |
async def find_similar_faces(image, count):
|
67 |
+
if image is None:
|
68 |
+
return None
|
69 |
inputs = extractor(images=image, return_tensors="pt")
|
70 |
outputs = model(**inputs)
|
71 |
embed = outputs.last_hidden_state[0][0]
|
72 |
result = await index.query(
|
73 |
+
vector=embed.tolist(), top_k=TOP_K
|
74 |
)
|
75 |
+
return [dataset["train"][int(vector.id)]["image"] for vector in result[:int(count)]]
|
76 |
|
77 |
adv_button.click(
|
78 |
fn=find_similar_faces,
|
79 |
inputs=[adv_input_image, adv_image_count],
|
80 |
+
outputs=[adv_output_images],
|
81 |
)
|
82 |
+
adv_input_image.change(
|
83 |
fn=find_similar_faces,
|
84 |
inputs=[adv_input_image, adv_image_count],
|
85 |
+
outputs=[adv_output_images],
|
86 |
)
|
87 |
+
gr.Examples(
|
88 |
+
examples=[
|
89 |
+
[dataset["train"][6]["image"], MAX_COUNT],
|
90 |
+
[dataset["train"][7]["image"], MAX_COUNT],
|
91 |
+
[dataset["train"][8]["image"], MAX_COUNT],
|
92 |
+
],
|
93 |
+
inputs=[adv_input_image, adv_image_count],
|
94 |
+
outputs=adv_output_images,
|
95 |
+
fn=find_similar_faces,
|
96 |
+
cache_examples=False,
|
97 |
+
)
|
98 |
+
|
99 |
+
|
100 |
|
101 |
if __name__ == "__main__":
|
102 |
demo.queue(default_concurrency_limit=40)
|