Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -90,7 +90,10 @@ def main(_):
|
|
90 |
image_list = gr.State([])
|
91 |
gr.Markdown('# LVM Demo')
|
92 |
gr.Markdown(f'Serving model: {FLAGS.checkpoint}')
|
93 |
-
|
|
|
|
|
|
|
94 |
gr.Markdown('## Inputs')
|
95 |
with gr.Row():
|
96 |
upload_drag = gr.File(
|
|
|
90 |
image_list = gr.State([])
|
91 |
gr.Markdown('# LVM Demo')
|
92 |
gr.Markdown(f'Serving model: {FLAGS.checkpoint}')
|
93 |
+
|
94 |
+
gr.Markdown('**There are mainly two visual prompting: sequential prompting and analogy prompting.**')
|
95 |
+
gr.Markdown('**For analogy prompting: describe the task with few-shot examples, which is pairs of (x, y) inputs where x is the input image and y the "annotated" image. And add one query image in the end. Download the few-shot examples dataset at [this link](https://livejohnshopkins-my.sharepoint.com/:f:/g/personal/ybai20_jh_edu/Ei0xiLdFFqJPnwAlFWar29EBUAvB0O3CVaJykZl-f11KDQ?e=Bx9SXZ), and you can simply change the query image in the end for testing.**')
|
96 |
+
gr.Markdown('**For sequential prompting, input a sequence of continuous frames and let the model generate the next one. Please refer to the default examples below.**')
|
97 |
gr.Markdown('## Inputs')
|
98 |
with gr.Row():
|
99 |
upload_drag = gr.File(
|