YangZhoumill commited on
Commit
b257e01
·
1 Parent(s): 8c13a7b

pre-release

Browse files
.gitignore CHANGED
@@ -167,4 +167,8 @@ cython_debug/
167
  /*.ipynb
168
 
169
  GenAI-Arena-hf-logs/vote_log/*
170
- ksort-logs/
 
 
 
 
 
167
  /*.ipynb
168
 
169
  GenAI-Arena-hf-logs/vote_log/*
170
+ ksort-logs/
171
+
172
+
173
+ *.mp4
174
+ cache_video/
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import os
3
  from serve.gradio_web import *
4
  from serve.gradio_web_video import build_side_by_side_video_ui_anony
5
- from serve.leaderboard import build_leaderboard_tab, build_leaderboard_video_tab
6
  from model.model_manager import ModelManager
7
  from pathlib import Path
8
  from serve.constants import SERVER_PORT, ROOT_PATH, ELO_RESULTS_DIR
@@ -35,6 +35,8 @@ def build_combine_demo(models, elo_results_file, leaderboard_table_file):
35
 
36
  with gr.Tab("Generation Arena (battle)", id=1):
37
  build_side_by_side_video_ui_anony(models)
 
 
38
  return demo
39
 
40
 
 
2
  import os
3
  from serve.gradio_web import *
4
  from serve.gradio_web_video import build_side_by_side_video_ui_anony
5
+ from serve.leaderboard import build_leaderboard_tab, build_leaderboard_video_tab, build_leaderboard_contributor
6
  from model.model_manager import ModelManager
7
  from pathlib import Path
8
  from serve.constants import SERVER_PORT, ROOT_PATH, ELO_RESULTS_DIR
 
35
 
36
  with gr.Tab("Generation Arena (battle)", id=1):
37
  build_side_by_side_video_ui_anony(models)
38
+ with gr.Tab("Contributor", id=2):
39
+ build_leaderboard_contributor()
40
  return demo
41
 
42
 
contributor.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "contributor": [
3
+ {
4
+ "Rank": 1,
5
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Jing",
6
+ "\ud83c\udfdb\ufe0f Affiliation": "CASIA",
7
+ "\ud83d\uddf3\ufe0f Votes": 200
8
+ },
9
+ {
10
+ "Rank": 2,
11
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Zhikai",
12
+ "\ud83c\udfdb\ufe0f Affiliation": "CASIA",
13
+ "\ud83d\uddf3\ufe0f Votes": 180
14
+ },
15
+ {
16
+ "Rank": 3,
17
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Xuewen",
18
+ "\ud83c\udfdb\ufe0f Affiliation": "CASIA",
19
+ "\ud83d\uddf3\ufe0f Votes": 180
20
+ },
21
+ {
22
+ "Rank": 4,
23
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Joe",
24
+ "\ud83c\udfdb\ufe0f Affiliation": "Berkeley",
25
+ "\ud83d\uddf3\ufe0f Votes": 150
26
+ },
27
+ {
28
+ "Rank": 5,
29
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Jianquan",
30
+ "\ud83c\udfdb\ufe0f Affiliation": "CASIA",
31
+ "\ud83d\uddf3\ufe0f Votes": 100
32
+ },
33
+ {
34
+ "Rank": 6,
35
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Zhen",
36
+ "\ud83c\udfdb\ufe0f Affiliation": "Berkeley",
37
+ "\ud83d\uddf3\ufe0f Votes": 50
38
+ },
39
+ {
40
+ "Rank": 7,
41
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Collov Labs",
42
+ "\ud83c\udfdb\ufe0f Affiliation": "Collov Labs",
43
+ "\ud83d\uddf3\ufe0f Votes": 12
44
+ },
45
+ {
46
+ "Rank": 8,
47
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Jinbin Bai",
48
+ "\ud83c\udfdb\ufe0f Affiliation": "NUS",
49
+ "\ud83d\uddf3\ufe0f Votes": 3
50
+ },
51
+ {
52
+ "Rank": 9,
53
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Yinglong",
54
+ "\ud83c\udfdb\ufe0f Affiliation": "Meituan",
55
+ "\ud83d\uddf3\ufe0f Votes": 3
56
+ },
57
+ {
58
+ "Rank": 10,
59
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Gray_squirrel",
60
+ "\ud83c\udfdb\ufe0f Affiliation": "",
61
+ "\ud83d\uddf3\ufe0f Votes": 2
62
+ },
63
+ {
64
+ "Rank": 11,
65
+ "\ud83d\udc68\u200d\u2696\ufe0f Name": "Mingfei Guo",
66
+ "\ud83c\udfdb\ufe0f Affiliation": "",
67
+ "\ud83d\uddf3\ufe0f Votes": 1
68
+ }
69
+ ]
70
+ }
model/matchmaker.py CHANGED
@@ -4,6 +4,8 @@ from trueskill import TrueSkill
4
  import paramiko
5
  import io, os
6
  import sys
 
 
7
  sys.path.append('../')
8
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_SKILL
9
  trueskill_env = TrueSkill()
@@ -79,7 +81,7 @@ def load_json_via_sftp():
79
  class RunningPivot(object):
80
  running_pivot = []
81
 
82
- not_run = [20,21,22]
83
 
84
  def matchmaker(num_players, k_group=4):
85
  trueskill_env = TrueSkill()
@@ -112,6 +114,8 @@ def matchmaker(num_players, k_group=4):
112
 
113
  # Group players
114
  model_ids = [selected_player] + opponents
 
 
115
 
116
  return model_ids
117
 
 
4
  import paramiko
5
  import io, os
6
  import sys
7
+ import random
8
+
9
  sys.path.append('../')
10
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_SKILL
11
  trueskill_env = TrueSkill()
 
81
  class RunningPivot(object):
82
  running_pivot = []
83
 
84
+ not_run = [12,13,14,15,16,17,18,19,20,21,22, 25,26] #23,24,
85
 
86
  def matchmaker(num_players, k_group=4):
87
  trueskill_env = TrueSkill()
 
114
 
115
  # Group players
116
  model_ids = [selected_player] + opponents
117
+
118
+ random.shuffle(model_ids)
119
 
120
  return model_ids
121
 
model/matchmaker_video.py CHANGED
@@ -4,6 +4,8 @@ from trueskill import TrueSkill
4
  import paramiko
5
  import io, os
6
  import sys
 
 
7
  sys.path.append('../')
8
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_VIDEO_SKILL
9
  trueskill_env = TrueSkill()
@@ -94,10 +96,34 @@ def matchmaker_video(num_players, k_group=4):
94
 
95
  # Exclude self, select opponent with highest UCB score
96
  ucb_scores[selected_player] = -float('inf') # minimize the score for the selected player to exclude it
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  opponents = np.argsort(ucb_scores)[-k_group + 1:].tolist()
98
 
99
  # Group players
100
  model_ids = [selected_player] + opponents
 
 
101
 
102
  return model_ids
103
 
 
4
  import paramiko
5
  import io, os
6
  import sys
7
+ import random
8
+
9
  sys.path.append('../')
10
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_VIDEO_SKILL
11
  trueskill_env = TrueSkill()
 
96
 
97
  # Exclude self, select opponent with highest UCB score
98
  ucb_scores[selected_player] = -float('inf') # minimize the score for the selected player to exclude it
99
+
100
+ excluded_players_1 = [num_players-1, num_players-4]
101
+ excluded_players_2 = [num_players-2, num_players-3, num_players-5]
102
+ excluded_players = excluded_players_1 + excluded_players_2
103
+ if selected_player in excluded_players_1:
104
+ for player in excluded_players:
105
+ ucb_scores[player] = -float('inf')
106
+ if selected_player in excluded_players_2:
107
+ for player in excluded_players_1:
108
+ ucb_scores[player] = -float('inf')
109
+ else:
110
+ excluded_ucb_scores = {player: ucb_scores[player] for player in excluded_players}
111
+ max_player = max(excluded_ucb_scores, key=excluded_ucb_scores.get)
112
+ if max_player in excluded_players_1:
113
+ for player in excluded_players:
114
+ if player != max_player:
115
+ ucb_scores[player] = -float('inf')
116
+ else:
117
+ for player in excluded_players_1:
118
+ ucb_scores[player] = -float('inf')
119
+
120
+
121
  opponents = np.argsort(ucb_scores)[-k_group + 1:].tolist()
122
 
123
  # Group players
124
  model_ids = [selected_player] + opponents
125
+
126
+ random.shuffle(model_ids)
127
 
128
  return model_ids
129
 
model/model_manager.py CHANGED
@@ -9,7 +9,9 @@ from PIL import Image
9
  from openai import OpenAI
10
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
11
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
12
- from serve.upload import get_random_mscoco_prompt
 
 
13
 
14
  class ModelManager:
15
  def __init__(self):
@@ -85,15 +87,17 @@ class ModelManager:
85
  else:
86
  model_names = [model_A, model_B, model_C, model_D]
87
 
 
88
  with concurrent.futures.ThreadPoolExecutor() as executor:
89
  futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface")
90
- else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names]
91
  results = [future.result() for future in futures]
92
 
 
93
  return results[0], results[1], results[2], results[3], \
94
  model_names[0], model_names[1], model_names[2], model_names[3]
95
 
96
- def generate_video_ig_parallel_anony(self, prompt_num, model_A, model_B, model_C, model_D):
97
  if model_A == "" and model_B == "" and model_C == "" and model_D == "":
98
  # model_names = random.sample([model for model in self.model_vg_list], 4)
99
 
@@ -104,14 +108,31 @@ class ModelManager:
104
  print(model_names)
105
  else:
106
  model_names = [model_A, model_B, model_C, model_D]
107
-
108
- cache_dir = os.path.join("/mnt/data/lizhikai/ksort_video_cache/", f'cache_{prompt_num}')
109
- results = []
110
  for name in model_names:
111
- model_source, model_name, model_type = name.split("_")
112
- video_path = os.path.join(cache_dir, f'{model_name}.mp4')
113
- print(video_path)
114
- results.append(video_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  # with concurrent.futures.ThreadPoolExecutor() as executor:
117
  # futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface")
@@ -119,7 +140,7 @@ class ModelManager:
119
  # results = [future.result() for future in futures]
120
 
121
  return results[0], results[1], results[2], results[3], \
122
- model_names[0], model_names[1], model_names[2], model_names[3]
123
 
124
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B, model_C, model_D):
125
  if model_A == "" and model_B == "" and model_C == "" and model_D == "":
 
9
  from openai import OpenAI
10
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
11
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
12
+ from serve.upload import get_random_mscoco_prompt, get_random_video_prompt, get_ssh_random_video_prompt
13
+ from serve.constants import SSH_CACHE_OPENSOURCE, SSH_CACHE_ADVANCE, SSH_CACHE_PIKA, SSH_CACHE_SORA, SSH_CACHE_IMAGE
14
+
15
 
16
  class ModelManager:
17
  def __init__(self):
 
87
  else:
88
  model_names = [model_A, model_B, model_C, model_D]
89
 
90
+
91
  with concurrent.futures.ThreadPoolExecutor() as executor:
92
  futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface")
93
+ else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names]
94
  results = [future.result() for future in futures]
95
 
96
+
97
  return results[0], results[1], results[2], results[3], \
98
  model_names[0], model_names[1], model_names[2], model_names[3]
99
 
100
+ def generate_video_vg_parallel_anony(self, model_A, model_B, model_C, model_D):
101
  if model_A == "" and model_B == "" and model_C == "" and model_D == "":
102
  # model_names = random.sample([model for model in self.model_vg_list], 4)
103
 
 
108
  print(model_names)
109
  else:
110
  model_names = [model_A, model_B, model_C, model_D]
111
+
112
+ root_dir = SSH_CACHE_OPENSOURCE
 
113
  for name in model_names:
114
+ if "Runway-Gen3" in name or "Runway-Gen2" in name or "Pika-v1.0" in name:
115
+ root_dir = SSH_CACHE_ADVANCE
116
+ elif "Pika-beta" in name:
117
+ root_dir = SSH_CACHE_PIKA
118
+ elif "Sora" in name and "OpenSora" not in name:
119
+ root_dir = SSH_CACHE_SORA
120
+
121
+ local_dir = "./cache_video"
122
+ prompt, results = get_ssh_random_video_prompt(root_dir, local_dir, model_names)
123
+ cache_dir = local_dir
124
+ # cache_dir, prompt = get_random_video_prompt(root_dir)
125
+ # results = []
126
+ # for name in model_names:
127
+ # model_source, model_name, model_type = name.split("_")
128
+ # # if model_name in ["Runway-Gen3", "Pika-beta", "Pika-v1.0"]:
129
+ # # file_name = cache_dir.split("/")[-1]
130
+ # # video_path = os.path.join(cache_dir, f'{file_name}.mp4')
131
+ # # else:
132
+ # # video_path = os.path.join(cache_dir, f'{model_name}.mp4')
133
+ # video_path = os.path.join(cache_dir, f'{model_name}.mp4')
134
+ # print(video_path)
135
+ # results.append(video_path)
136
 
137
  # with concurrent.futures.ThreadPoolExecutor() as executor:
138
  # futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface")
 
140
  # results = [future.result() for future in futures]
141
 
142
  return results[0], results[1], results[2], results[3], \
143
+ model_names[0], model_names[1], model_names[2], model_names[3], prompt, cache_dir
144
 
145
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B, model_C, model_D):
146
  if model_A == "" and model_B == "" and model_C == "" and model_D == "":
model/models/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
- from .imagenhub_models import load_imagenhub_model
2
  # from .playground_api import load_playground_model
3
- from .fal_api_models import load_fal_model
4
  # from .videogenhub_models import load_videogenhub_model
5
  from .huggingface_models import load_huggingface_model
6
  from .replicate_api_models import load_replicate_model
@@ -36,6 +36,9 @@ IMAGE_GENERATION_MODELS = [
36
  'openai_Dalle-3_text2image',
37
  'other_Midjourney-v6.0_text2image',
38
  'other_Midjourney-v5.0_text2image',
 
 
 
39
  ]
40
 
41
 
@@ -48,12 +51,16 @@ IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZe
48
  # 'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
49
  # 'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
50
  VIDEO_GENERATION_MODELS = ['replicate_Zeroscope-v2-xl_text2video',
51
- # 'replicate_Damo-Text-to-Video_text2video',
52
- 'replicate_Animate-Diff_text2video',
53
- 'replicate_OpenSora_text2video',
54
- 'replicate_LaVie_text2video',
55
- 'replicate_VideoCrafter2_text2video',
56
- 'replicate_Stable-Video-Diffusion_text2video',
 
 
 
 
57
  ]
58
 
59
  def load_pipeline(model_name):
 
1
+ # from .imagenhub_models import load_imagenhub_model
2
  # from .playground_api import load_playground_model
3
+ # from .fal_api_models import load_fal_model
4
  # from .videogenhub_models import load_videogenhub_model
5
  from .huggingface_models import load_huggingface_model
6
  from .replicate_api_models import load_replicate_model
 
36
  'openai_Dalle-3_text2image',
37
  'other_Midjourney-v6.0_text2image',
38
  'other_Midjourney-v5.0_text2image',
39
+ "replicate_FLUX.1-schnell_text2image",
40
+ "replicate_FLUX.1-pro_text2image",
41
+ "replicate_FLUX.1-dev_text2image",
42
  ]
43
 
44
 
 
51
  # 'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
52
  # 'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
53
  VIDEO_GENERATION_MODELS = ['replicate_Zeroscope-v2-xl_text2video',
54
+ 'replicate_Animate-Diff_text2video',
55
+ 'replicate_OpenSora_text2video',
56
+ 'replicate_LaVie_text2video',
57
+ 'replicate_VideoCrafter2_text2video',
58
+ 'replicate_Stable-Video-Diffusion_text2video',
59
+ 'other_Runway-Gen3_text2video',
60
+ 'other_Pika-beta_text2video',
61
+ 'other_Pika-v1.0_text2video',
62
+ 'other_Runway-Gen2_text2video',
63
+ 'other_Sora_text2video',
64
  ]
65
 
66
  def load_pipeline(model_name):
model/models/generate_image_cache.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_models import load_huggingface_model
2
+ from replicate_api_models import load_replicate_model
3
+ from openai_api_models import load_openai_model
4
+ from other_api_models import load_other_model
5
+ import concurrent.futures
6
+ import os
7
+ import io, time
8
+ import requests
9
+ import json
10
+ from PIL import Image
11
+
12
+
13
+ IMAGE_GENERATION_MODELS = [
14
+ # 'replicate_SDXL_text2image',
15
+ # 'replicate_SD-v3.0_text2image',
16
+ # 'replicate_SD-v2.1_text2image',
17
+ # 'replicate_SD-v1.5_text2image',
18
+ # 'replicate_SDXL-Lightning_text2image',
19
+ # 'replicate_Kandinsky-v2.0_text2image',
20
+ # 'replicate_Kandinsky-v2.2_text2image',
21
+ # 'replicate_Proteus-v0.2_text2image',
22
+ # 'replicate_Playground-v2.0_text2image',
23
+ # 'replicate_Playground-v2.5_text2image',
24
+ # 'replicate_Dreamshaper-xl-turbo_text2image',
25
+ # 'replicate_SDXL-Deepcache_text2image',
26
+ # 'replicate_Openjourney-v4_text2image',
27
+ # 'replicate_LCM-v1.5_text2image',
28
+ # 'replicate_Realvisxl-v3.0_text2image',
29
+ # 'replicate_Realvisxl-v2.0_text2image',
30
+ # 'replicate_Pixart-Sigma_text2image',
31
+ # 'replicate_SSD-1b_text2image',
32
+ # 'replicate_Open-Dalle-v1.1_text2image',
33
+ # 'replicate_Deepfloyd-IF_text2image',
34
+ # 'huggingface_SD-turbo_text2image',
35
+ # 'huggingface_SDXL-turbo_text2image',
36
+ # 'huggingface_Stable-cascade_text2image',
37
+ # 'openai_Dalle-2_text2image',
38
+ # 'openai_Dalle-3_text2image',
39
+ 'other_Midjourney-v6.0_text2image',
40
+ 'other_Midjourney-v5.0_text2image',
41
+ # "replicate_FLUX.1-schnell_text2image",
42
+ # "replicate_FLUX.1-pro_text2image",
43
+ # "replicate_FLUX.1-dev_text2image",
44
+ ]
45
+
46
+ Prompts = [
47
+ # 'An aerial view of someone walking through a forest alone in the style of Romanticism.',
48
+ # 'With dark tones and backlit resolution, this oil painting depicts a thunderstorm over a cityscape.',
49
+ # 'The rendering depicts a futuristic train station with volumetric lighting in an Art Nouveau style.',
50
+ # 'An Impressionist illustration depicts a river winding through a meadow.', # featuring a thick black outline
51
+ # 'Photo of a black and white picture of a person facing the sunset from a bench.',
52
+ # 'The skyline of a city is painted in bright, high-resolution colors.',
53
+ # 'A sketch shows two robots talking to each other, featuring a surreal look and narrow aspect ratio.',
54
+ # 'An abstract Dadaist collage in neon tones and 4K resolutions of a post-apocalyptic world.',
55
+ # 'With abstract elements and a rococo style, the painting depicts a garden in high resolution.',
56
+ # 'A picture of a senior man walking in the rain and looking directly at the camera from a medium distance.',
57
+ ]
58
+
59
+ def load_pipeline(model_name):
60
+ model_source, model_name, model_type = model_name.split("_")
61
+ if model_source == "replicate":
62
+ pipe = load_replicate_model(model_name, model_type)
63
+ elif model_source == "huggingface":
64
+ pipe = load_huggingface_model(model_name, model_type)
65
+ elif model_source == "openai":
66
+ pipe = load_openai_model(model_name, model_type)
67
+ elif model_source == "other":
68
+ pipe = load_other_model(model_name, model_type)
69
+ else:
70
+ raise ValueError(f"Model source {model_source} not supported")
71
+ return pipe
72
+
73
+ def generate_image_ig_api(prompt, model_name):
74
+ pipe = load_pipeline(model_name)
75
+ result = pipe(prompt=prompt)
76
+ return result
77
+
78
+ save_names = []
79
+ for name in IMAGE_GENERATION_MODELS:
80
+ model_source, model_name, model_type = name.split("_")
81
+ save_names.append(model_name)
82
+
83
+ for i, prompt in enumerate(Prompts):
84
+ print("save the {} prompt".format(i+1))
85
+ with concurrent.futures.ThreadPoolExecutor() as executor:
86
+ futures = [executor.submit(generate_image_ig_api, prompt, model) for model in IMAGE_GENERATION_MODELS]
87
+ results = [future.result() for future in futures]
88
+
89
+ root_dir = '/rscratch/zhendong/lizhikai/ksort/ksort_image_cache/'
90
+ save_dir = os.path.join(root_dir, f'output-{i+4}')
91
+ if not os.path.exists(save_dir):
92
+ os.makedirs(save_dir, exist_ok=True)
93
+ with open(os.path.join(save_dir, "prompt.txt"), 'w', encoding='utf-8') as file:
94
+ file.write(prompt)
95
+
96
+ for j, result in enumerate(results):
97
+ result = result.resize((512, 512))
98
+ file_path = os.path.join(save_dir, f'{save_names[j]}.jpg')
99
+ result.save(file_path, format="JPEG")
model/models/generate_video_cache.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ file_path = '/home/lizhikai/webvid_prompt100.txt'
4
+ str_list = []
5
+ with open(file_path, 'r', encoding='utf-8') as file:
6
+ for line in file:
7
+ str_list.append(line.strip())
8
+ if len(str_list) == 100:
9
+ break
10
+
11
+ def generate_image_ig_api(prompt, model_name):
12
+ model_source, model_name, model_type = model_name.split("_")
13
+ pipe = load_replicate_model(model_name, model_type)
14
+ result = pipe(prompt=prompt)
15
+ return result
16
+ model_names = ['replicate_Zeroscope-v2-xl_text2video',
17
+ # 'replicate_Damo-Text-to-Video_text2video',
18
+ 'replicate_Animate-Diff_text2video',
19
+ 'replicate_OpenSora_text2video',
20
+ 'replicate_LaVie_text2video',
21
+ 'replicate_VideoCrafter2_text2video',
22
+ 'replicate_Stable-Video-Diffusion_text2video',
23
+ ]
24
+ save_names = []
25
+ for name in model_names:
26
+ model_source, model_name, model_type = name.split("_")
27
+ save_names.append(model_name)
28
+
29
+ for i, prompt in enumerate(str_list):
30
+ print("save the {} prompt".format(i+1))
31
+ # if i+1 < 97:
32
+ # continue
33
+ with concurrent.futures.ThreadPoolExecutor() as executor:
34
+ futures = [executor.submit(generate_image_ig_api, prompt, model) for model in model_names]
35
+ results = [future.result() for future in futures]
36
+
37
+ root_dir = '/mnt/data/lizhikai/ksort_video_cache/'
38
+ save_dir = os.path.join(root_dir, f'cache_{i+1}')
39
+ if not os.path.exists(save_dir):
40
+ os.makedirs(save_dir, exist_ok=True)
41
+ with open(os.path.join(save_dir, "prompt.txt"), 'w', encoding='utf-8') as file:
42
+ file.write(prompt)
43
+
44
+ # 下载视频并保存
45
+ repeat_num = 5
46
+ for j, url in enumerate(results):
47
+ while 1:
48
+ time.sleep(1)
49
+ response = requests.get(url, stream=True)
50
+ if response.status_code == 200:
51
+ file_path = os.path.join(save_dir, f'{save_names[j]}.mp4')
52
+ with open(file_path, 'wb') as file:
53
+ for chunk in response.iter_content(chunk_size=8192):
54
+ file.write(chunk)
55
+ print(f"视频 {j} 已保存到 {file_path}")
56
+ break
57
+ else:
58
+ repeat_num = repeat_num - 1
59
+ if repeat_num == 0:
60
+ print(f"视频 {j} 保存失败")
61
+ # raise ValueError("Video request failed.")
62
+ continue
model/models/openai_api_models.py CHANGED
@@ -53,7 +53,7 @@ def load_openai_model(model_name, model_type):
53
 
54
 
55
  if __name__ == "__main__":
56
- pipe = load_openai_model('Dalle-2', 'text2image')
57
  result = pipe(prompt='draw a tiger')
58
  print(result)
59
 
 
53
 
54
 
55
  if __name__ == "__main__":
56
+ pipe = load_openai_model('Dalle-3', 'text2image')
57
  result = pipe(prompt='draw a tiger')
58
  print(result)
59
 
model/models/other_api_models.py CHANGED
@@ -79,35 +79,37 @@ def load_other_model(model_name, model_type):
79
  return OtherModel(model_name, model_type)
80
 
81
  if __name__ == "__main__":
82
-
83
- # pipe = load_other_model("Midjourney-v5.0", "text2image")
84
- # result = pipe(prompt="a good girl")
85
- # print(result)
86
-
87
  import http.client
88
  import json
89
- key = os.environ.get('MIDJOURNEY_KEY')
90
- prompt = "a good girl"
91
 
92
- conn = http.client.HTTPSConnection("xdai.online")
93
- payload = json.dumps({
94
- "messages": [
95
- {
96
- "role": "user",
97
- "content": "{}".format(prompt)
98
- }
99
- ],
100
- "stream": True,
101
- "model": "luma-video",
102
- # "model": "pika-text-to-video",
103
- })
104
- headers = {
105
- 'Authorization': "Bearer {}".format(key),
106
- 'Content-Type': 'application/json'
107
- }
108
- conn.request("POST", "/v1/chat/completions", payload, headers)
109
- res = conn.getresponse()
110
- data = res.read()
111
- info = data.decode("utf-8")
112
- print(data.decode("utf-8"))
 
 
 
 
 
 
 
 
 
113
 
 
79
  return OtherModel(model_name, model_type)
80
 
81
  if __name__ == "__main__":
 
 
 
 
 
82
  import http.client
83
  import json
 
 
84
 
85
+ pipe = load_other_model("Midjourney-v6.0", "text2image")
86
+ result = pipe(prompt="An Impressionist illustration depicts a river winding through a meadow ")
87
+ print(result)
88
+ exit()
89
+
90
+
91
+ # key = os.environ.get('MIDJOURNEY_KEY')
92
+ # prompt = "a good girl"
93
+
94
+ # conn = http.client.HTTPSConnection("xdai.online")
95
+ # payload = json.dumps({
96
+ # "messages": [
97
+ # {
98
+ # "role": "user",
99
+ # "content": "{}".format(prompt)
100
+ # }
101
+ # ],
102
+ # "stream": True,
103
+ # "model": "luma-video",
104
+ # # "model": "pika-text-to-video",
105
+ # })
106
+ # headers = {
107
+ # 'Authorization': "Bearer {}".format(key),
108
+ # 'Content-Type': 'application/json'
109
+ # }
110
+ # conn.request("POST", "/v1/chat/completions", payload, headers)
111
+ # res = conn.getresponse()
112
+ # data = res.read()
113
+ # info = data.decode("utf-8")
114
+ # print(data.decode("utf-8"))
115
 
model/models/replicate_api_models.py CHANGED
@@ -35,6 +35,9 @@ Replicate_MODEl_NAME_MAP = {
35
  "LaVie": "cjwbw/lavie:0bca850c4928b6c30052541fa002f24cbb4b677259c461dd041d271ba9d3c517",
36
  "VideoCrafter2": "lucataco/video-crafter:7757c5775e962c618053e7df4343052a21075676d6234e8ede5fa67c9e43bce0",
37
  "Stable-Video-Diffusion": "sunfjun/stable-video-diffusion:d68b6e09eedbac7a49e3d8644999d93579c386a083768235cabca88796d70d82",
 
 
 
38
  }
39
 
40
  class ReplicateModel():
@@ -62,10 +65,11 @@ class ReplicateModel():
62
  result_url = output[0]
63
  else:
64
  result_url = output
65
- print(result_url)
66
  response = requests.get(result_url)
67
  result = Image.open(io.BytesIO(response.content))
68
  return result
 
69
  elif self.model_type == "text2video":
70
  assert "prompt" in kwargs, "prompt is required for text2image model"
71
  if self.model_name == "Zeroscope-v2-xl":
@@ -183,8 +187,10 @@ if __name__ == "__main__":
183
  import replicate
184
  import time
185
  import concurrent.futures
186
- import os
187
  import requests
 
 
188
  # model_name = 'replicate_zeroscope-v2-xl_text2video'
189
  # model_name = 'replicate_Damo-Text-to-Video_text2video'
190
  # model_name = 'replicate_Animate-Diff_text2video'
@@ -197,101 +203,131 @@ if __name__ == "__main__":
197
  # prompt = "Clown fish swimming in a coral reef, beautiful, 8k, perfect, award winning, national geographic"
198
  # result = pipe(prompt=prompt)
199
 
200
- # url = "https://replicate.delivery/yhqm/ucuUNHBetmQVJiogeNiDpxJoccKxPvxzTXQPZO53azfHfwvMB/out.mp4"
201
- # response = requests.get(url, stream=True)
202
- # if response.status_code == 200:
203
- # file_path = os.path.join("/mnt/data/lizhikai/ksort_video_cache/", '1.mp4')
204
- # with open(file_path, 'wb') as file:
205
- # for chunk in response.iter_content(chunk_size=8192):
206
- # file.write(chunk)
207
- # print(f"视频 {j} 已保存到 {file_path}")
208
 
209
- file_path = '/home/lizhikai/webvid_prompt100.txt'
210
- str_list = []
211
- with open(file_path, 'r', encoding='utf-8') as file:
212
- for line in file:
213
- str_list.append(line.strip())
214
- if len(str_list) == 100:
215
- break
 
 
 
 
 
 
216
 
217
- def generate_image_ig_api(prompt, model_name):
218
- model_source, model_name, model_type = model_name.split("_")
219
- pipe = load_replicate_model(model_name, model_type)
220
- result = pipe(prompt=prompt)
221
- return result
222
- model_names = ['replicate_Zeroscope-v2-xl_text2video',
223
- # 'replicate_Damo-Text-to-Video_text2video',
224
- 'replicate_Animate-Diff_text2video',
225
- 'replicate_OpenSora_text2video',
226
- 'replicate_LaVie_text2video',
227
- 'replicate_VideoCrafter2_text2video',
228
- 'replicate_Stable-Video-Diffusion_text2video',
229
- ]
230
- save_names = []
231
- for name in model_names:
232
- model_source, model_name, model_type = name.split("_")
233
- save_names.append(model_name)
234
 
235
- for i, prompt in enumerate(str_list):
236
- print("save the {} prompt".format(i+1))
237
- # if i+1 < 97:
238
- # continue
239
- with concurrent.futures.ThreadPoolExecutor() as executor:
240
- futures = [executor.submit(generate_image_ig_api, prompt, model) for model in model_names]
241
- results = [future.result() for future in futures]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
- root_dir = '/mnt/data/lizhikai/ksort_video_cache/'
244
- save_dir = os.path.join(root_dir, f'cache_{i+1}')
245
- if not os.path.exists(save_dir):
246
- os.makedirs(save_dir, exist_ok=True)
247
- with open(os.path.join(save_dir, "prompt.txt"), 'w', encoding='utf-8') as file:
248
- file.write(prompt)
 
 
249
 
250
- # 下载视频并保存
251
- repeat_num = 5
252
- for j, url in enumerate(results):
253
- while 1:
254
- time.sleep(1)
255
- response = requests.get(url, stream=True)
256
- if response.status_code == 200:
257
- file_path = os.path.join(save_dir, f'{save_names[j]}.mp4')
258
- with open(file_path, 'wb') as file:
259
- for chunk in response.iter_content(chunk_size=8192):
260
- file.write(chunk)
261
- print(f"视频 {j} 已保存到 {file_path}")
262
- break
263
- else:
264
- repeat_num = repeat_num - 1
265
- if repeat_num == 0:
266
- print(f"视频 {j} 保存失败")
267
- # raise ValueError("Video request failed.")
268
- continue
269
 
270
 
271
- # input = {
272
- # "seed": 1,
273
- # "width": 512,
274
- # "height": 512,
275
- # "grid_size": 1,
276
- # "prompt": "anime astronaut riding a horse on mars"
277
- # }
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
- # for name, address in Replicate_MODEl_NAME_MAP.items():
281
- # print('*'*50)
282
- # print(name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
284
- # t1 = time.time()
285
- # output = replicate.run(
286
- # address,
287
- # input=input
288
- # )
289
- # # for item in output:
290
- # # print(item)
291
- # print(output)
292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
 
294
- # t2 = time.time()
295
- # print(t2-t1)
296
- # print('*'*50)
297
-
 
35
  "LaVie": "cjwbw/lavie:0bca850c4928b6c30052541fa002f24cbb4b677259c461dd041d271ba9d3c517",
36
  "VideoCrafter2": "lucataco/video-crafter:7757c5775e962c618053e7df4343052a21075676d6234e8ede5fa67c9e43bce0",
37
  "Stable-Video-Diffusion": "sunfjun/stable-video-diffusion:d68b6e09eedbac7a49e3d8644999d93579c386a083768235cabca88796d70d82",
38
+ "FLUX.1-schnell": "black-forest-labs/flux-schnell",
39
+ "FLUX.1-pro": "black-forest-labs/flux-pro",
40
+ "FLUX.1-dev": "black-forest-labs/flux-dev",
41
  }
42
 
43
  class ReplicateModel():
 
65
  result_url = output[0]
66
  else:
67
  result_url = output
68
+ print(self.model_name, result_url)
69
  response = requests.get(result_url)
70
  result = Image.open(io.BytesIO(response.content))
71
  return result
72
+
73
  elif self.model_type == "text2video":
74
  assert "prompt" in kwargs, "prompt is required for text2image model"
75
  if self.model_name == "Zeroscope-v2-xl":
 
187
  import replicate
188
  import time
189
  import concurrent.futures
190
+ import os, shutil, re
191
  import requests
192
+ from moviepy.editor import VideoFileClip
193
+
194
  # model_name = 'replicate_zeroscope-v2-xl_text2video'
195
  # model_name = 'replicate_Damo-Text-to-Video_text2video'
196
  # model_name = 'replicate_Animate-Diff_text2video'
 
203
  # prompt = "Clown fish swimming in a coral reef, beautiful, 8k, perfect, award winning, national geographic"
204
  # result = pipe(prompt=prompt)
205
 
206
+ # # 文件复制
207
+ source_folder = '/mnt/data/lizhikai/ksort_video_cache/Pika-v1.0add/'
208
+ destination_folder = '/mnt/data/lizhikai/ksort_video_cache/Advance/'
 
 
 
 
 
209
 
210
+ special_char = 'output'
211
+ for dirpath, dirnames, filenames in os.walk(source_folder):
212
+ for dirname in dirnames:
213
+ des_dirname = "output-"+dirname[-3:]
214
+ print(des_dirname)
215
+ if special_char in dirname:
216
+ model_name = ["Pika-v1.0"]
217
+ for name in model_name:
218
+ source_file_path = os.path.join(source_folder, os.path.join(dirname, name+".mp4"))
219
+ print(source_file_path)
220
+ destination_file_path = os.path.join(destination_folder, os.path.join(des_dirname, name+".mp4"))
221
+ print(destination_file_path)
222
+ shutil.copy(source_file_path, destination_file_path)
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
+ # 视频裁剪
226
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Runway-Gen3/'
227
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Runway-Gen2/'
228
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Pika-Beta/'
229
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Pika-v1/'
230
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Sora/'
231
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Pika-v1.0add/'
232
+ # special_char = 'output'
233
+ # num = 0
234
+ # for dirpath, dirnames, filenames in os.walk(root_dir):
235
+ # for dirname in dirnames:
236
+ # # 如果文件夹名称中包含指定的特殊字符
237
+ # if special_char in dirname:
238
+ # num = num+1
239
+ # print(num)
240
+ # if num < 0:
241
+ # continue
242
+ # video_path = os.path.join(root_dir, (os.path.join(dirname, f"{dirname}.mp4")))
243
+ # out_video_path = os.path.join(root_dir, (os.path.join(dirname, f"Pika-v1.0.mp4")))
244
+ # print(video_path)
245
+ # print(out_video_path)
246
 
247
+ # video = VideoFileClip(video_path)
248
+ # width, height = video.size
249
+ # center_x, center_y = width // 2, height // 2
250
+ # new_width, new_height = 512, 512
251
+ # cropped_video = video.crop(x_center=center_x, y_center=center_y, width=min(width, height), height=min(width, height))
252
+ # resized_video = cropped_video.resize(newsize=(new_width, new_height))
253
+ # resized_video.write_videofile(out_video_path, codec='libx264', fps=video.fps)
254
+ # os.remove(video_path)
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
 
258
+ # file_path = '/home/lizhikai/webvid_prompt100.txt'
259
+ # str_list = []
260
+ # with open(file_path, 'r', encoding='utf-8') as file:
261
+ # for line in file:
262
+ # str_list.append(line.strip())
263
+ # if len(str_list) == 100:
264
+ # break
265
 
266
+ # 生成代码
267
+ # def generate_image_ig_api(prompt, model_name):
268
+ # model_source, model_name, model_type = model_name.split("_")
269
+ # pipe = load_replicate_model(model_name, model_type)
270
+ # result = pipe(prompt=prompt)
271
+ # return result
272
+ # model_names = ['replicate_Zeroscope-v2-xl_text2video',
273
+ # # 'replicate_Damo-Text-to-Video_text2video',
274
+ # 'replicate_Animate-Diff_text2video',
275
+ # 'replicate_OpenSora_text2video',
276
+ # 'replicate_LaVie_text2video',
277
+ # 'replicate_VideoCrafter2_text2video',
278
+ # 'replicate_Stable-Video-Diffusion_text2video',
279
+ # ]
280
+ # save_names = []
281
+ # for name in model_names:
282
+ # model_source, model_name, model_type = name.split("_")
283
+ # save_names.append(model_name)
284
 
285
+ # # 遍历根目录及其子目录
286
+ # # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Runway-Gen3/'
287
+ # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Runway-Gen2/'
288
+ # # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Pika-Beta/'
289
+ # # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Pika-v1/'
290
+ # # root_dir = '/mnt/data/lizhikai/ksort_video_cache/Sora/'
291
+ # special_char = 'output'
292
+ # num = 0
293
+ # for dirpath, dirnames, filenames in os.walk(root_dir):
294
+ # for dirname in dirnames:
295
+ # # 如果文件夹名称中包含指定的特殊字符
296
+ # if special_char in dirname:
297
+ # num = num+1
298
+ # print(num)
299
+ # if num < 0:
300
+ # continue
301
+ # str_list = []
302
+ # prompt_path = os.path.join(root_dir, (os.path.join(dirname, "prompt.txt")))
303
+ # print(prompt_path)
304
+ # with open(prompt_path, 'r', encoding='utf-8') as file:
305
+ # for line in file:
306
+ # str_list.append(line.strip())
307
+ # prompt = str_list[0]
308
+ # print(prompt)
309
 
310
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
311
+ # futures = [executor.submit(generate_image_ig_api, prompt, model) for model in model_names]
312
+ # results = [future.result() for future in futures]
 
 
 
 
 
313
 
314
+ # # 下载视频并保存
315
+ # repeat_num = 5
316
+ # for j, url in enumerate(results):
317
+ # while 1:
318
+ # time.sleep(1)
319
+ # response = requests.get(url, stream=True)
320
+ # if response.status_code == 200:
321
+ # file_path = os.path.join(os.path.join(root_dir, dirname), f'{save_names[j]}.mp4')
322
+ # with open(file_path, 'wb') as file:
323
+ # for chunk in response.iter_content(chunk_size=8192):
324
+ # file.write(chunk)
325
+ # print(f"视频 {j} 已保存到 {file_path}")
326
+ # break
327
+ # else:
328
+ # repeat_num = repeat_num - 1
329
+ # if repeat_num == 0:
330
+ # print(f"视频 {j} 保存失败")
331
+ # # raise ValueError("Video request failed.")
332
+ # continue
333
 
 
 
 
 
serve/Ksort.py CHANGED
@@ -167,7 +167,7 @@ def vote_ssh_submit(states, textbox, rank, user_name, user_institution):
167
  from .update_skill import update_skill
168
  update_skill(rank, [x.model_name for x in states])
169
 
170
- def vote_video_ssh_submit(states, textbox, prompt_num, rank, user_name, user_institution):
171
  conv_id = states[0].conv_id
172
  output_dir = create_remote_directory(conv_id, video=True)
173
 
@@ -175,7 +175,7 @@ def vote_video_ssh_submit(states, textbox, prompt_num, rank, user_name, user_ins
175
  "models_name": [x.model_name for x in states],
176
  "video_rank": [x for x in rank],
177
  "prompt": [textbox],
178
- "prompt_num": [prompt_num],
179
  "video_path": [x.output for x in states],
180
  "user_info": {"name": [user_name], "institution": [user_institution]},
181
  }
@@ -206,9 +206,9 @@ def submit_response_igm(
206
  gr.Markdown(state3.model_name, visible=True)
207
  ) + (disable_btn,)
208
  def submit_response_vg(
209
- state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_num, rank, user_name, user_institution, request: gr.Request
210
  ):
211
- vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_num, rank, user_name, user_institution)
212
  if model_selector0 == "":
213
  return (disable_btn,) * 6 + (
214
  gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
@@ -247,11 +247,11 @@ def submit_response_rank_igm(
247
  else:
248
  return (enable_btn,) * 16 + (enable_btn,) * 3 + ("wrong",) + (gr.Markdown("", visible=False),) * 4
249
  def submit_response_rank_vg(
250
- state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_num, rank, right_vote_text, user_name, user_institution, request: gr.Request
251
  ):
252
  print(rank)
253
  if right_vote_text == "right":
254
- vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_num, rank, user_name, user_institution)
255
  if model_selector0 == "":
256
  return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
257
  gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
 
167
  from .update_skill import update_skill
168
  update_skill(rank, [x.model_name for x in states])
169
 
170
+ def vote_video_ssh_submit(states, textbox, prompt_path, rank, user_name, user_institution):
171
  conv_id = states[0].conv_id
172
  output_dir = create_remote_directory(conv_id, video=True)
173
 
 
175
  "models_name": [x.model_name for x in states],
176
  "video_rank": [x for x in rank],
177
  "prompt": [textbox],
178
+ "prompt_path": [prompt_path],
179
  "video_path": [x.output for x in states],
180
  "user_info": {"name": [user_name], "institution": [user_institution]},
181
  }
 
206
  gr.Markdown(state3.model_name, visible=True)
207
  ) + (disable_btn,)
208
  def submit_response_vg(
209
+ state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_path, rank, user_name, user_institution, request: gr.Request
210
  ):
211
+ vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_path, rank, user_name, user_institution)
212
  if model_selector0 == "":
213
  return (disable_btn,) * 6 + (
214
  gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
 
247
  else:
248
  return (enable_btn,) * 16 + (enable_btn,) * 3 + ("wrong",) + (gr.Markdown("", visible=False),) * 4
249
  def submit_response_rank_vg(
250
+ state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_path, rank, right_vote_text, user_name, user_institution, request: gr.Request
251
  ):
252
  print(rank)
253
  if right_vote_text == "right":
254
+ vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_path, rank, user_name, user_institution)
255
  if model_selector0 == "":
256
  return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
257
  gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
serve/gradio_web.py CHANGED
@@ -48,11 +48,11 @@ def set_ssh():
48
 
49
  def build_side_by_side_ui_anony(models):
50
  notice_markdown = """
51
- # ⚔️ K-Sort-Arena (Text-to-Image) ⚔️
52
  ## 📜 Rules
53
- - Input prompt to Four anonymous models (e.g., SD, SDXL, OpenJourney in Text-guided Image Generation Model) and vote on the outputs!
54
- - Two voting mode: Rand Mode and Best Mode. Please feel free to switch them!
55
- - Users are encouraged to make evaluations based on subjective preferences. As an aid, they can refer to the following criteria: Alignment (50%) + Aesthetics (50%).
56
  - Alignment includes: Entity Matching (30%) + Style Matching (20%);
57
  - Aesthetics includes: Photorealism (30%) + Light and Shadow (10%) + Absence of Artifacts (10%).
58
 
@@ -197,7 +197,7 @@ def build_side_by_side_ui_anony(models):
197
  with gr.Row():
198
  vote_textbox = gr.Textbox(
199
  show_label=False,
200
- placeholder="👉 Enter your rank",
201
  container=True,
202
  elem_id="input_box",
203
  visible=False,
@@ -222,7 +222,7 @@ def build_side_by_side_ui_anony(models):
222
  # share_btn = gr.Button(value="📷 Share")
223
  with gr.Blocks():
224
  with gr.Row(elem_id="centered-text"): #
225
- user_info = gr.Markdown("User information", visible=True, elem_id="centered-text") #, elem_id="centered-text"
226
  # with gr.Blocks():
227
  # name = gr.Markdown("Name", visible=True)
228
  user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
 
48
 
49
  def build_side_by_side_ui_anony(models):
50
  notice_markdown = """
51
+ # ⚔️ K-Sort Arena (Text-to-Image Generation) ⚔️
52
  ## 📜 Rules
53
+ - Input a prompt for four anonymized models (e.g., SD, SDXL, OpenJourney for Text-guided Image Generation) and vote on their outputs.
54
+ - Two voting modes available: Rank Mode and Best Mode. Switch freely between modes. Please note that ties are always allowed. In ranking mode, users can input rankings like 1 3 3 1. Any invalid rankings, such as 1 4 4 1, will be automatically corrected during post-processing.
55
+ - Users are encouraged to make evaluations based on subjective preferences. Evaluation criteria: Alignment (50%) + Aesthetics (50%).
56
  - Alignment includes: Entity Matching (30%) + Style Matching (20%);
57
  - Aesthetics includes: Photorealism (30%) + Light and Shadow (10%) + Absence of Artifacts (10%).
58
 
 
197
  with gr.Row():
198
  vote_textbox = gr.Textbox(
199
  show_label=False,
200
+ placeholder="👉 Enter your rank (you can use buttons above, or directly type here, e.g. 1 2 3 4)",
201
  container=True,
202
  elem_id="input_box",
203
  visible=False,
 
222
  # share_btn = gr.Button(value="📷 Share")
223
  with gr.Blocks():
224
  with gr.Row(elem_id="centered-text"): #
225
+ user_info = gr.Markdown("User information (to appear on the contributor leaderboard)", visible=True, elem_id="centered-text") #, elem_id="centered-text"
226
  # with gr.Blocks():
227
  # name = gr.Markdown("Name", visible=True)
228
  user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
serve/gradio_web_video.py CHANGED
@@ -10,7 +10,7 @@ from .vote_utils import (
10
  tievote_last_response_igm as tievote_last_response,
11
  bothbad_vote_last_response_igm as bothbad_vote_last_response,
12
  share_click_igm as share_click,
13
- generate_igv_annoy,
14
  share_js
15
  )
16
  from .Ksort import (
@@ -32,19 +32,20 @@ from .Ksort import (
32
 
33
  from functools import partial
34
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD
35
- from serve.upload import get_random_webvid_prompt, create_ssh_client
36
  from serve.update_skill import create_ssh_skill_client
37
  from model.matchmaker import create_ssh_matchmaker_client
38
 
39
  def build_side_by_side_video_ui_anony(models):
40
  notice_markdown = """
41
- # ⚔️ K-Sort-Arena (Text-to-Video) ⚔️
42
  ## 📜 Rules
43
- - Input prompt to Four anonymous models (e.g., OpenSora, StableVideoDiffusion in Text-guided Video Generation Model) and vote on the outputs!
44
- - Two voting mode: Rand Mode and Best Mode. Please feel free to switch them!
45
- - Users are encouraged to make evaluations based on subjective preferences. As an aid, they can refer to the following criteria: Alignment (50%) + Aesthetics (50%).
46
- - Alignment includes: Video Content Matching (30%) + Inter-frame Consistency (20%);
47
  - Aesthetics includes: Photorealism (30%) + Physical Correctness (10%) + Absence of Artifacts (10%).
 
48
 
49
  ## 👇 Generating now!
50
 
@@ -57,7 +58,7 @@ def build_side_by_side_video_ui_anony(models):
57
  state2 = gr.State()
58
  state3 = gr.State()
59
 
60
- gen_func = partial(generate_igv_annoy, models.generate_video_ig_parallel_anony)
61
  # gen_func_random = partial(generate_igm_annoy_museum, models.generate_image_ig_museum_parallel_anony)
62
 
63
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
@@ -187,7 +188,7 @@ def build_side_by_side_video_ui_anony(models):
187
  with gr.Row():
188
  vote_textbox = gr.Textbox(
189
  show_label=False,
190
- placeholder="👉 Enter your rank",
191
  container=True,
192
  elem_id="input_box",
193
  visible=False,
@@ -198,19 +199,19 @@ def build_side_by_side_video_ui_anony(models):
198
  with gr.Row():
199
  textbox = gr.Textbox(
200
  show_label=False,
201
- placeholder="👉 Generate the rand prompt and Send",
202
  container=True,
203
  elem_id="input_box",
204
  interactive=False,
205
  )
206
 
207
- send_btn = gr.Button(value="Send", variant="primary", scale=0, elem_id="btnblue")
208
- draw_btn = gr.Button(value="🎲 Random Prompt", variant="primary", scale=0, elem_id="btnblue")
209
  with gr.Row():
210
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
211
  with gr.Blocks():
212
  with gr.Row(elem_id="centered-text"): #
213
- user_info = gr.Markdown("User information", visible=True, elem_id="centered-text") #, elem_id="centered-text"
214
  # with gr.Blocks():
215
  # name = gr.Markdown("Name", visible=True)
216
  user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
@@ -218,7 +219,7 @@ def build_side_by_side_video_ui_anony(models):
218
  # institution = gr.Markdown("Institution", visible=True)
219
  user_institution = gr.Textbox(show_label=False,placeholder="👉 Enter your affiliation (optional)", elem_classes="custom-width")
220
 
221
- prompt_num = gr.Number(value=0, visible=False, interactive=False) # record the num of prompt
222
  #gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
223
  example1_text = gr.Textbox(value="Chef with recipe book watching young cook preparing dish in the kitchen.", visible=False, interactive=False)
224
  example2_text = gr.Textbox(value="A baker turns freshly baked loaves of sourdough bread.", visible=False, interactive=False)
@@ -270,15 +271,31 @@ def build_side_by_side_video_ui_anony(models):
270
  vote_mode = gr.Textbox(value="Rank", visible=False, interactive=False)
271
  right_vote_text = gr.Textbox(value="wrong", visible=False, interactive=False)
272
 
273
- send_btn.click(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  disable_video_order_buttons,
275
- inputs=[textbox, example1_text, example2_text, example3_text, example4_text],
276
- outputs=[textbox, send_btn, draw_btn, clear_btn, prompt_num]
277
  ).then(
278
- gen_func, # 修改为调取函数而不是生成函数
279
- inputs=[state0, state1, state2, state3, textbox, prompt_num, model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
280
  outputs=[state0, state1, state2, state3, generate_ig0, generate_ig1, generate_ig2, generate_ig3, chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, \
281
- model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
282
  api_name="send_btn_annony"
283
  ).then(
284
  enable_vote_mode_buttons,
@@ -286,13 +303,6 @@ def build_side_by_side_video_ui_anony(models):
286
  outputs=vote_order_list
287
  )
288
 
289
- draw_btn.click(
290
- get_random_webvid_prompt,
291
- inputs=None,
292
- outputs=[prompt_num, textbox],
293
- api_name="draw_btn_annony"
294
- )
295
-
296
  clear_btn.click(
297
  clear_history_side_by_side_anony,
298
  inputs=None,
@@ -336,7 +346,7 @@ def build_side_by_side_video_ui_anony(models):
336
  outputs=[vote_textbox, right_vote_text, rank]
337
  ).then(
338
  submit_response_rank_vg,
339
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rank, right_vote_text, user_name, user_institution],
340
  outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
341
  vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
342
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
@@ -354,7 +364,7 @@ def build_side_by_side_video_ui_anony(models):
354
  outputs=[vote_textbox, right_vote_text, rank]
355
  ).then(
356
  submit_response_rank_vg,
357
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rank, right_vote_text, user_name, user_institution],
358
  outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
359
  vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
360
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
@@ -364,35 +374,35 @@ def build_side_by_side_video_ui_anony(models):
364
 
365
  leftvote_btn.click(
366
  submit_response_vg,
367
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankA, user_name, user_institution],
368
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
369
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
370
  vote_mode_btn]
371
  )
372
  left1vote_btn.click(
373
  submit_response_vg,
374
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankB, user_name, user_institution],
375
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
376
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
377
  vote_mode_btn]
378
  )
379
  rightvote_btn.click(
380
  submit_response_vg,
381
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankC, user_name, user_institution],
382
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
383
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
384
  vote_mode_btn]
385
  )
386
  right1vote_btn.click(
387
  submit_response_vg,
388
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankD, user_name, user_institution],
389
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
390
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
391
  vote_mode_btn]
392
  )
393
  tie_btn.click(
394
  submit_response_vg,
395
- inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankTie, user_name, user_institution],
396
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
397
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
398
  vote_mode_btn]
@@ -411,6 +421,7 @@ def build_side_by_side_video_ui_anony(models):
411
  inputs = [rank],
412
  outputs = [vote_textbox]
413
  )
 
414
  A2_btn.click(
415
  reset_btn_rank,
416
  inputs=[window1_text, rank, A2_btn, vote_level],
 
10
  tievote_last_response_igm as tievote_last_response,
11
  bothbad_vote_last_response_igm as bothbad_vote_last_response,
12
  share_click_igm as share_click,
13
+ generate_vg_annoy,
14
  share_js
15
  )
16
  from .Ksort import (
 
32
 
33
  from functools import partial
34
  from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD
35
+ from serve.upload import create_ssh_client
36
  from serve.update_skill import create_ssh_skill_client
37
  from model.matchmaker import create_ssh_matchmaker_client
38
 
39
  def build_side_by_side_video_ui_anony(models):
40
  notice_markdown = """
41
+ # ⚔️ K-Sort Arena (Text-to-Video Generation) ⚔️
42
  ## 📜 Rules
43
+ - Input a prompt for four anonymized models (e.g., Sora, Pika, OpenSora, StableVideoDiffusion for Text-guided Video Generation) and vote on their outputs.
44
+ - Two voting modes available: Rank Mode and Best Mode. Switch freely between modes. Please note that ties are always allowed. In ranking mode, users can input rankings like 1 3 3 1. Any invalid rankings, such as 1 4 4 1, will be automatically corrected during post-processing.
45
+ - Users are encouraged to make evaluations based on subjective preferences. Evaluation criteria: Alignment (50%) + Aesthetics (50%).
46
+ - Alignment includes: Video Content Matching (20%) + Movement Matching (15%) + Inter-frame Consistency (15%);
47
  - Aesthetics includes: Photorealism (30%) + Physical Correctness (10%) + Absence of Artifacts (10%).
48
+ - Users can hover over the videos to see a replay button and click to zoom out to full-screen.
49
 
50
  ## 👇 Generating now!
51
 
 
58
  state2 = gr.State()
59
  state3 = gr.State()
60
 
61
+ gen_func = partial(generate_vg_annoy, models.generate_video_vg_parallel_anony)
62
  # gen_func_random = partial(generate_igm_annoy_museum, models.generate_image_ig_museum_parallel_anony)
63
 
64
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
 
188
  with gr.Row():
189
  vote_textbox = gr.Textbox(
190
  show_label=False,
191
+ placeholder="👉 Enter your rank (you can use buttons above, or directly type here, e.g. 1 2 3 4)",
192
  container=True,
193
  elem_id="input_box",
194
  visible=False,
 
199
  with gr.Row():
200
  textbox = gr.Textbox(
201
  show_label=False,
202
+ placeholder="👉 Generate a random prompt and send to video generation",
203
  container=True,
204
  elem_id="input_box",
205
  interactive=False,
206
  )
207
 
208
+ send_btn = gr.Button(value="Send", variant="primary", scale=0, elem_id="btnblue", visible=False, interactive=False)
209
+ draw_btn = gr.Button(value="🎲 Random Sample", variant="primary", scale=0, elem_id="btnblue")
210
  with gr.Row():
211
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
212
  with gr.Blocks():
213
  with gr.Row(elem_id="centered-text"): #
214
+ user_info = gr.Markdown("User information (to appear on the contributor leaderboard)", visible=True, elem_id="centered-text") #, elem_id="centered-text"
215
  # with gr.Blocks():
216
  # name = gr.Markdown("Name", visible=True)
217
  user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
 
219
  # institution = gr.Markdown("Institution", visible=True)
220
  user_institution = gr.Textbox(show_label=False,placeholder="👉 Enter your affiliation (optional)", elem_classes="custom-width")
221
 
222
+ prompt_path = gr.Textbox(value="", visible=False, interactive=False)
223
  #gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
224
  example1_text = gr.Textbox(value="Chef with recipe book watching young cook preparing dish in the kitchen.", visible=False, interactive=False)
225
  example2_text = gr.Textbox(value="A baker turns freshly baked loaves of sourdough bread.", visible=False, interactive=False)
 
271
  vote_mode = gr.Textbox(value="Rank", visible=False, interactive=False)
272
  right_vote_text = gr.Textbox(value="wrong", visible=False, interactive=False)
273
 
274
+ # send_btn.click(
275
+ # disable_video_order_buttons,
276
+ # inputs=[textbox, example1_text, example2_text, example3_text, example4_text],
277
+ # outputs=[textbox, send_btn, draw_btn, clear_btn, prompt_path]
278
+ # ).then(
279
+ # gen_func, # 修改为调取函数而不是生成函数
280
+ # inputs=[state0, state1, state2, state3, textbox, prompt_path, model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
281
+ # outputs=[state0, state1, state2, state3, generate_ig0, generate_ig1, generate_ig2, generate_ig3, chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, \
282
+ # model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
283
+ # api_name="send_btn_annony"
284
+ # ).then(
285
+ # enable_vote_mode_buttons,
286
+ # inputs=[vote_mode, textbox],
287
+ # outputs=vote_order_list
288
+ # )
289
+
290
+ draw_btn.click(
291
  disable_video_order_buttons,
292
+ inputs=[],
293
+ outputs=[textbox, send_btn, draw_btn, clear_btn]
294
  ).then(
295
+ gen_func,
296
+ inputs=[state0, state1, state2, state3, model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
297
  outputs=[state0, state1, state2, state3, generate_ig0, generate_ig1, generate_ig2, generate_ig3, chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, \
298
+ model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, textbox, prompt_path],
299
  api_name="send_btn_annony"
300
  ).then(
301
  enable_vote_mode_buttons,
 
303
  outputs=vote_order_list
304
  )
305
 
 
 
 
 
 
 
 
306
  clear_btn.click(
307
  clear_history_side_by_side_anony,
308
  inputs=None,
 
346
  outputs=[vote_textbox, right_vote_text, rank]
347
  ).then(
348
  submit_response_rank_vg,
349
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rank, right_vote_text, user_name, user_institution],
350
  outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
351
  vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
352
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
 
364
  outputs=[vote_textbox, right_vote_text, rank]
365
  ).then(
366
  submit_response_rank_vg,
367
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rank, right_vote_text, user_name, user_institution],
368
  outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
369
  vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
370
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
 
374
 
375
  leftvote_btn.click(
376
  submit_response_vg,
377
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rankA, user_name, user_institution],
378
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
379
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
380
  vote_mode_btn]
381
  )
382
  left1vote_btn.click(
383
  submit_response_vg,
384
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rankB, user_name, user_institution],
385
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
386
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
387
  vote_mode_btn]
388
  )
389
  rightvote_btn.click(
390
  submit_response_vg,
391
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rankC, user_name, user_institution],
392
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
393
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
394
  vote_mode_btn]
395
  )
396
  right1vote_btn.click(
397
  submit_response_vg,
398
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rankD, user_name, user_institution],
399
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
400
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
401
  vote_mode_btn]
402
  )
403
  tie_btn.click(
404
  submit_response_vg,
405
+ inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_path, rankTie, user_name, user_institution],
406
  outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
407
  model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
408
  vote_mode_btn]
 
421
  inputs = [rank],
422
  outputs = [vote_textbox]
423
  )
424
+
425
  A2_btn.click(
426
  reset_btn_rank,
427
  inputs=[window1_text, rank, A2_btn, vote_level],
serve/leaderboard.py CHANGED
@@ -36,13 +36,13 @@ from datetime import datetime
36
 
37
  def make_leaderboard_md():
38
  leaderboard_md = f"""
39
- # 🏆 K-Sort-Arena Leaderboard (Text-to-Image)
40
  """
41
  return leaderboard_md
42
 
43
  def make_leaderboard_video_md():
44
  leaderboard_md = f"""
45
- # 🏆 K-Sort-Arena Leaderboard (Text-to-Video)
46
  """
47
  return leaderboard_md
48
 
@@ -55,15 +55,15 @@ def make_arena_leaderboard_md(total_models, total_votes, last_updated):
55
  # last_updated = last_updated.strftime("%Y-%m-%d")
56
 
57
  leaderboard_md = f"""
58
- Total #models: **{total_models}** (anonymous). Total #votes: **{total_votes}** (Equivalent to **{total_votes*6}** votes for one-on-one games).
59
- \n Last updated: {last_updated}.
60
  """
61
 
62
  return leaderboard_md
63
 
64
 
65
  def make_disclaimer_md():
66
- disclaimer_md = f'''
67
  <div id="modal" style="display:none; position:fixed; top:50%; left:50%; transform:translate(-50%, -50%); padding:20px; background:white; box-shadow:0 0 10px rgba(0,0,0,0.5); z-index:1000;">
68
  <p style="font-size:24px;"><strong>Disclaimer</strong></p>
69
  <p style="font-size:18px;"><b>Purpose and Scope</b></b></p>
@@ -85,12 +85,10 @@ def make_disclaimer_md():
85
  <p><b>For any questions or to report issues, please contact us at [email protected].</b></p>
86
  </div>
87
  <div id="overlay" style="display:none; position:fixed; top:0; left:0; width:100%; height:100%; background:rgba(0,0,0,0.5); z-index:999;" onclick="document.getElementById('modal').style.display='none'; document.getElementById('overlay').style.display='none'"></div>
88
- <p> ⚠️ This platform is designed for academic usage, for details please refer to <a href="#" id="open_link" onclick="document.getElementById('modal').style.display='block'; document.getElementById('overlay').style.display='block'">disclaimer</a>.</p>
89
  '''
90
-
91
  return disclaimer_md
92
 
93
-
94
  def make_arena_leaderboard_data(results):
95
  import pandas as pd
96
  df = pd.DataFrame(results)
@@ -106,19 +104,20 @@ def build_leaderboard_tab(score_result_file = 'sorted_score_list.json'):
106
 
107
  md = make_leaderboard_md()
108
  md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
109
- gr.HTML(make_disclaimer_md)
110
 
111
- with gr.Tab("Arena Score", id=0):
112
- md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
113
- gr.Markdown(md, elem_id="leaderboard_markdown")
114
- md = make_arena_leaderboard_data(score_results)
115
- gr.Dataframe(md)
116
 
117
  gr.Markdown(
118
- """ ## The leaderboard is updated frequently and continues to incorporate new models.
119
  """,
120
  elem_id="leaderboard_markdown",
121
  )
 
 
122
  from .utils import acknowledgment_md, html_code
123
  with gr.Blocks():
124
  gr.Markdown(acknowledgment_md)
@@ -134,19 +133,50 @@ def build_leaderboard_video_tab(score_result_file = 'sorted_score_list_video.jso
134
 
135
  md = make_leaderboard_video_md()
136
  md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
137
- gr.HTML(make_disclaimer_md)
 
 
 
 
 
 
 
138
 
139
- with gr.Tab("Arena Score", id=0):
140
- md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
141
- gr.Markdown(md, elem_id="leaderboard_markdown")
142
- md = make_arena_leaderboard_data(score_results)
143
- gr.Dataframe(md)
144
 
145
  gr.Markdown(
146
- """ ## The leaderboard is updated frequently and continues to incorporate new models.
147
  """,
148
  elem_id="leaderboard_markdown",
149
  )
150
  from .utils import acknowledgment_md, html_code
151
  with gr.Blocks():
152
  gr.Markdown(acknowledgment_md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  def make_leaderboard_md():
38
  leaderboard_md = f"""
39
+ # 🏆 K-Sort Arena Leaderboard (Text-to-Image Generation)
40
  """
41
  return leaderboard_md
42
 
43
  def make_leaderboard_video_md():
44
  leaderboard_md = f"""
45
+ # 🏆 K-Sort Arena Leaderboard (Text-to-Video Generation)
46
  """
47
  return leaderboard_md
48
 
 
55
  # last_updated = last_updated.strftime("%Y-%m-%d")
56
 
57
  leaderboard_md = f"""
58
+ Total models: **{total_models}** (anonymized), Total votes: **{total_votes}** (equivalent to **{total_votes*6}** pairwise comparisons)
59
+ \n Last updated: {last_updated}
60
  """
61
 
62
  return leaderboard_md
63
 
64
 
65
  def make_disclaimer_md():
66
+ disclaimer_md = '''
67
  <div id="modal" style="display:none; position:fixed; top:50%; left:50%; transform:translate(-50%, -50%); padding:20px; background:white; box-shadow:0 0 10px rgba(0,0,0,0.5); z-index:1000;">
68
  <p style="font-size:24px;"><strong>Disclaimer</strong></p>
69
  <p style="font-size:18px;"><b>Purpose and Scope</b></b></p>
 
85
  <p><b>For any questions or to report issues, please contact us at [email protected].</b></p>
86
  </div>
87
  <div id="overlay" style="display:none; position:fixed; top:0; left:0; width:100%; height:100%; background:rgba(0,0,0,0.5); z-index:999;" onclick="document.getElementById('modal').style.display='none'; document.getElementById('overlay').style.display='none'"></div>
88
+ <p> This platform is designed for academic usage, for details please refer to <a href="#" id="open_link" onclick="document.getElementById('modal').style.display='block'; document.getElementById('overlay').style.display='block'">disclaimer</a>.</p>
89
  '''
 
90
  return disclaimer_md
91
 
 
92
  def make_arena_leaderboard_data(results):
93
  import pandas as pd
94
  df = pd.DataFrame(results)
 
104
 
105
  md = make_leaderboard_md()
106
  md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
 
107
 
108
+ # with gr.Tab("Arena Score", id=0):
109
+ md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
110
+ gr.Markdown(md, elem_id="leaderboard_markdown")
111
+ md = make_arena_leaderboard_data(score_results)
112
+ gr.Dataframe(md)
113
 
114
  gr.Markdown(
115
+ """ ### The leaderboard is regularly updated and continuously incorporates new models.
116
  """,
117
  elem_id="leaderboard_markdown",
118
  )
119
+ with gr.Blocks():
120
+ gr.HTML(make_disclaimer_md)
121
  from .utils import acknowledgment_md, html_code
122
  with gr.Blocks():
123
  gr.Markdown(acknowledgment_md)
 
133
 
134
  md = make_leaderboard_video_md()
135
  md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
136
+ # with gr.Blocks():
137
+ # gr.HTML(make_disclaimer_md)
138
+
139
+ # with gr.Tab("Arena Score", id=0):
140
+ md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
141
+ gr.Markdown(md, elem_id="leaderboard_markdown")
142
+ md = make_arena_leaderboard_data(score_results)
143
+ gr.Dataframe(md)
144
 
145
+ notice_markdown_sora = """
146
+ - Note: As Sora's video generation function is not publicly available, we used sample videos from their official website. This may lead to a biased assessment of Sora's capabilities, as these samples likely represent Sora's best outputs. Therefore, Sora's position on our leaderboard should be considered as its upper bound. We are working on methods to conduct more comprehensive and fair comparisons in the future.
147
+ """
148
+
149
+ gr.Markdown(notice_markdown_sora, elem_id="notice_markdown_sora")
150
 
151
  gr.Markdown(
152
+ """ ### The leaderboard is regularly updated and continuously incorporates new models.
153
  """,
154
  elem_id="leaderboard_markdown",
155
  )
156
  from .utils import acknowledgment_md, html_code
157
  with gr.Blocks():
158
  gr.Markdown(acknowledgment_md)
159
+
160
+
161
+ def build_leaderboard_contributor(file = 'contributor.json'):
162
+ md = f"""
163
+ # 🏆 Contributor Leaderboard
164
+ The submission of user information is entirely optional. This information is used solely for contribution statistics. We respect and safeguard users' privacy choices.
165
+ To maintain a clean and concise leaderboard, please ensure consistency in submitted names and affiliations. For example, use 'Berkeley' consistently rather than alternating with 'UC Berkeley'.
166
+ """
167
+ md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
168
+
169
+ # md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
170
+ # gr.Markdown(md, elem_id="leaderboard_markdown")
171
+
172
+ with open(file, "r") as json_file:
173
+ data = json.load(json_file)
174
+ score_results = data["contributor"]
175
+ md = make_arena_leaderboard_data(score_results)
176
+ gr.Dataframe(md)
177
+
178
+ gr.Markdown(
179
+ """ ### The leaderboard is regularly updated.
180
+ """,
181
+ elem_id="leaderboard_markdown",
182
+ )
serve/upload.py CHANGED
@@ -1,6 +1,6 @@
1
  import paramiko
2
  import numpy as np
3
- import io, os
4
  import gradio as gr
5
  from PIL import Image
6
  import requests
@@ -78,16 +78,75 @@ def get_random_mscoco_prompt():
78
 
79
  random_line = random.choice(lines).strip()
80
  return random_line
81
- def get_random_webvid_prompt():
82
 
83
- file_path = './webvid_prompt.txt'
84
- with open(file_path, 'r', encoding='utf-8') as file:
85
- lines = file.readlines()
86
- line_number = random.randint(0, len(lines) - 1)
87
- print(line_number + 1)
88
- random_line = lines[line_number].strip()
89
- print(random_line)
90
- return line_number + 1, random_line
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  def create_remote_directory(remote_directory, video=False):
93
  global ssh_client
 
1
  import paramiko
2
  import numpy as np
3
+ import io, os, stat
4
  import gradio as gr
5
  from PIL import Image
6
  import requests
 
78
 
79
  random_line = random.choice(lines).strip()
80
  return random_line
 
81
 
82
+ def get_random_video_prompt(root_dir):
83
+ subdirs = [os.path.join(root_dir, d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d))]
84
+ if not subdirs:
85
+ raise NotImplementedError
86
+ selected_dir = random.choice(subdirs)
87
+ prompt_path = os.path.join(selected_dir, 'prompt.txt')
88
+
89
+ if os.path.exists(prompt_path):
90
+ str_list = []
91
+ with open(prompt_path, 'r', encoding='utf-8') as file:
92
+ for line in file:
93
+ str_list.append(line.strip())
94
+ prompt = str_list[0]
95
+ else:
96
+ raise NotImplementedError
97
+ return selected_dir, prompt
98
+
99
+ def get_ssh_random_video_prompt(root_dir, local_dir, model_names):
100
+ def is_directory(sftp, path):
101
+ try:
102
+ return stat.S_ISDIR(sftp.stat(path).st_mode)
103
+ except IOError:
104
+ return False
105
+ ssh = paramiko.SSHClient()
106
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
107
+ try:
108
+ ssh.connect(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
109
+ sftp = ssh.open_sftp()
110
+
111
+ remote_subdirs = sftp.listdir(root_dir)
112
+ remote_subdirs = [d for d in remote_subdirs if is_directory(sftp, os.path.join(root_dir, d))]
113
+
114
+ if not remote_subdirs:
115
+ print(f"No subdirectories found in {root_dir}")
116
+ raise NotImplementedError
117
+
118
+ chosen_subdir = random.choice(remote_subdirs)
119
+ chosen_subdir_path = os.path.join(root_dir, chosen_subdir)
120
+ print(f"Chosen subdirectory: {chosen_subdir_path}")
121
+
122
+ prompt_path = 'prompt.txt'
123
+ results = [prompt_path]
124
+ for name in model_names:
125
+ model_source, model_name, model_type = name.split("_")
126
+ video_path = f'{model_name}.mp4'
127
+ print(video_path)
128
+ results.append(video_path)
129
+
130
+ local_path = []
131
+ for tar_file in results:
132
+ remote_file_path = os.path.join(chosen_subdir_path, tar_file)
133
+ local_file_path = os.path.join(local_dir, tar_file)
134
+ sftp.get(remote_file_path, local_file_path)
135
+ local_path.append(local_file_path)
136
+ print(f"Downloaded {remote_file_path} to {local_file_path}")
137
+
138
+ if os.path.exists(local_path[0]):
139
+ str_list = []
140
+ with open(local_path[0], 'r', encoding='utf-8') as file:
141
+ for line in file:
142
+ str_list.append(line.strip())
143
+ prompt = str_list[0]
144
+ else:
145
+ raise NotImplementedError
146
+ except Exception as e:
147
+ print(f"An error occurred: {e}")
148
+ raise NotImplementedError
149
+ return prompt, local_path[1:]
150
 
151
  def create_remote_directory(remote_directory, video=False):
152
  global ssh_client
serve/utils.py CHANGED
@@ -209,20 +209,8 @@ def disable_order_buttons(textbox, video=False):
209
  return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True))
210
  else:
211
  return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True))
212
- def disable_video_order_buttons(textbox, example1_text, example2_text, example3_text, example4_text):
213
- example_list = [example1_text, example2_text, example3_text, example4_text]
214
- if not textbox.strip():
215
- return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(visible=False))
216
- else:
217
- if textbox == example_list[0]:
218
- return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=8))
219
- elif textbox == example_list[1]:
220
- return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=3))
221
- elif textbox == example_list[2]:
222
- return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=93))
223
- elif textbox == example_list[3]:
224
- return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=84))
225
- return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(visible=False))
226
 
227
  def clear_history():
228
  return None, "", None
 
209
  return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True))
210
  else:
211
  return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True))
212
+ def disable_video_order_buttons():
213
+ return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True))
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  def clear_history():
216
  return None, "", None
serve/vote_utils.py CHANGED
@@ -871,9 +871,8 @@ def generate_igm_annoy(gen_func, state0, state1, state2, state3, text, model_nam
871
  # save_any_image(state.output, f)
872
  # save_image_file_on_log_server(output_file)
873
 
874
- def generate_igv_annoy(gen_func, state0, state1, state2, state3, text, prompt_num, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
875
- if not text.strip():
876
- return (gr.update(visible=False),) * 16
877
  if state0 is None:
878
  state0 = ImageStateIG(model_name0)
879
  if state1 is None:
@@ -892,8 +891,8 @@ def generate_igv_annoy(gen_func, state0, state1, state2, state3, text, prompt_nu
892
  model_name2 = ""
893
  model_name3 = ""
894
 
895
- generated_video0, generated_video1, generated_video2, generated_video3, model_name0, model_name1, model_name2, model_name3 \
896
- = gen_func(prompt_num, model_name0, model_name1, model_name2, model_name3)
897
  state0.prompt = text
898
  state1.prompt = text
899
  state2.prompt = text
@@ -912,7 +911,8 @@ def generate_igv_annoy(gen_func, state0, state1, state2, state3, text, prompt_nu
912
  yield state0, state1, state2, state3, generated_video0, generated_video1, generated_video2, generated_video3, \
913
  generated_video0, generated_video1, generated_video2, generated_video3, \
914
  gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False), \
915
- gr.Markdown(f"### Model C: {model_name2}", visible=False), gr.Markdown(f"### Model D: {model_name3}", visible=False)
 
916
 
917
 
918
  def generate_igm_annoy_museum(gen_func, state0, state1, state2, state3, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
 
871
  # save_any_image(state.output, f)
872
  # save_image_file_on_log_server(output_file)
873
 
874
+ def generate_vg_annoy(gen_func, state0, state1, state2, state3, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
875
+
 
876
  if state0 is None:
877
  state0 = ImageStateIG(model_name0)
878
  if state1 is None:
 
891
  model_name2 = ""
892
  model_name3 = ""
893
 
894
+ generated_video0, generated_video1, generated_video2, generated_video3, model_name0, model_name1, model_name2, model_name3, text, prompt_path \
895
+ = gen_func(model_name0, model_name1, model_name2, model_name3)
896
  state0.prompt = text
897
  state1.prompt = text
898
  state2.prompt = text
 
911
  yield state0, state1, state2, state3, generated_video0, generated_video1, generated_video2, generated_video3, \
912
  generated_video0, generated_video1, generated_video2, generated_video3, \
913
  gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False), \
914
+ gr.Markdown(f"### Model C: {model_name2}", visible=False), gr.Markdown(f"### Model D: {model_name3}", visible=False), \
915
+ text, prompt_path
916
 
917
 
918
  def generate_igm_annoy_museum(gen_func, state0, state1, state2, state3, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
sorted_score_list.json CHANGED
@@ -1,196 +1,217 @@
1
  {
2
- "total_models": 27,
3
- "total_votes": 965,
4
- "last_updated": "2024-07-29",
5
  "sorted_score_list": [
6
  {
7
- "Rank": 0,
8
  "\ud83e\udd16 Model": "Midjourney-v6.0",
9
  "\u2b50 Score (\u03bc/\u03c3)": "32.36 (34.75/0.796)",
10
  "\ud83d\uddf3\ufe0f Votes": 425.0,
11
- "Organization": "Midjourney"
12
  },
13
  {
14
- "Rank": 1,
15
  "\ud83e\udd16 Model": "Midjourney-v5.0",
16
  "\u2b50 Score (\u03bc/\u03c3)": "31.36 (33.74/0.791)",
17
  "\ud83d\uddf3\ufe0f Votes": 438.0,
18
- "Organization": "Midjourney"
19
  },
20
  {
21
- "Rank": 2,
22
  "\ud83e\udd16 Model": "SD-v3.0",
23
  "\u2b50 Score (\u03bc/\u03c3)": "29.33 (31.7/0.788)",
24
  "\ud83d\uddf3\ufe0f Votes": 426.0,
25
- "Organization": "Stability AI"
26
  },
27
  {
28
- "Rank": 3,
29
  "\ud83e\udd16 Model": "Dalle-3",
30
  "\u2b50 Score (\u03bc/\u03c3)": "28.55 (30.94/0.795)",
31
  "\ud83d\uddf3\ufe0f Votes": 427.0,
32
- "Organization": "OpenAI"
33
- },
34
- {
35
- "Rank": 4,
36
- "\ud83e\udd16 Model": "Pixart-Sigma",
37
- "\u2b50 Score (\u03bc/\u03c3)": "26.33 (28.7/0.79)",
38
- "\ud83d\uddf3\ufe0f Votes": 441.0,
39
- "Organization": "PixArt-Alpha"
40
  },
41
  {
42
  "Rank": 5,
43
  "\ud83e\udd16 Model": "Proteus-v0.2",
44
- "\u2b50 Score (\u03bc/\u03c3)": "26.15 (28.52/0.79)",
45
- "\ud83d\uddf3\ufe0f Votes": 423.0,
46
- "Organization": "DataAutoGPT3"
47
  },
48
  {
49
  "Rank": 6,
50
- "\ud83e\udd16 Model": "Dreamshaper-xl",
51
- "\u2b50 Score (\u03bc/\u03c3)": "25.66 (28.02/0.786)",
52
- "\ud83d\uddf3\ufe0f Votes": 434.0,
53
- "Organization": "Lykon"
54
  },
55
  {
56
  "Rank": 7,
57
  "\ud83e\udd16 Model": "Open-Dalle-v1.1",
58
- "\u2b50 Score (\u03bc/\u03c3)": "25.51 (27.85/0.782)",
59
- "\ud83d\uddf3\ufe0f Votes": 429.0,
60
- "Organization": "DataAutoGPT3"
61
  },
62
  {
63
  "Rank": 8,
 
 
 
 
 
 
 
64
  "\ud83e\udd16 Model": "Deepfloyd-IF",
65
  "\u2b50 Score (\u03bc/\u03c3)": "25.22 (27.58/0.789)",
66
  "\ud83d\uddf3\ufe0f Votes": 426.0,
67
- "Organization": "DeepFloyd"
68
  },
69
  {
70
- "Rank": 9,
71
  "\ud83e\udd16 Model": "Realvisxl-v3.0",
72
  "\u2b50 Score (\u03bc/\u03c3)": "24.9 (27.26/0.785)",
73
  "\ud83d\uddf3\ufe0f Votes": 424.0,
74
- "Organization": "Realistic Vision"
75
  },
76
  {
77
- "Rank": 10,
78
  "\ud83e\udd16 Model": "Realvisxl-v2.0",
79
  "\u2b50 Score (\u03bc/\u03c3)": "24.75 (27.11/0.785)",
80
  "\ud83d\uddf3\ufe0f Votes": 436.0,
81
- "Organization": "Realistic Vision"
82
  },
83
  {
84
- "Rank": 11,
85
  "\ud83e\udd16 Model": "Kandinsky-v2.2",
86
  "\u2b50 Score (\u03bc/\u03c3)": "24.48 (26.83/0.786)",
87
  "\ud83d\uddf3\ufe0f Votes": 426.0,
88
- "Organization": "AI-Forever"
89
  },
90
  {
91
- "Rank": 12,
92
  "\ud83e\udd16 Model": "Dalle-2",
93
  "\u2b50 Score (\u03bc/\u03c3)": "23.04 (25.4/0.787)",
94
  "\ud83d\uddf3\ufe0f Votes": 423.0,
95
- "Organization": "OpenAI"
96
  },
97
  {
98
- "Rank": 13,
99
  "\ud83e\udd16 Model": "Playground-v2.5",
100
  "\u2b50 Score (\u03bc/\u03c3)": "21.98 (24.36/0.795)",
101
  "\ud83d\uddf3\ufe0f Votes": 430.0,
102
- "Organization": "Playground AI"
103
  },
104
  {
105
- "Rank": 14,
106
  "\ud83e\udd16 Model": "Kandinsky-v2.0",
107
  "\u2b50 Score (\u03bc/\u03c3)": "21.86 (24.23/0.791)",
108
  "\ud83d\uddf3\ufe0f Votes": 427.0,
109
- "Organization": "AI-Forever"
110
  },
111
  {
112
- "Rank": 15,
113
  "\ud83e\udd16 Model": "SDXL-turbo",
114
  "\u2b50 Score (\u03bc/\u03c3)": "21.05 (23.42/0.79)",
115
  "\ud83d\uddf3\ufe0f Votes": 436.0,
116
- "Organization": "Stability AI"
117
  },
118
  {
119
- "Rank": 16,
120
  "\ud83e\udd16 Model": "Playground-v2.0",
121
  "\u2b50 Score (\u03bc/\u03c3)": "20.71 (23.1/0.795)",
122
  "\ud83d\uddf3\ufe0f Votes": 449.0,
123
- "Organization": "Playground AI"
124
  },
125
  {
126
- "Rank": 17,
127
  "\ud83e\udd16 Model": "Openjourney-v4",
128
  "\u2b50 Score (\u03bc/\u03c3)": "20.4 (22.76/0.789)",
129
  "\ud83d\uddf3\ufe0f Votes": 426.0,
130
- "Organization": "Prompthero"
131
  },
132
  {
133
- "Rank": 18,
134
  "\ud83e\udd16 Model": "SD-v2.1",
135
  "\u2b50 Score (\u03bc/\u03c3)": "20.19 (22.57/0.792)",
136
  "\ud83d\uddf3\ufe0f Votes": 424.0,
137
- "Organization": "Stability AI"
138
  },
139
  {
140
- "Rank": 19,
141
  "\ud83e\udd16 Model": "LCM-v1.5",
142
  "\u2b50 Score (\u03bc/\u03c3)": "19.79 (22.18/0.797)",
143
  "\ud83d\uddf3\ufe0f Votes": 429.0,
144
- "Organization": "Tsinghua"
145
  },
146
  {
147
- "Rank": 20,
148
  "\ud83e\udd16 Model": "SDXL",
149
  "\u2b50 Score (\u03bc/\u03c3)": "19.78 (22.15/0.789)",
150
  "\ud83d\uddf3\ufe0f Votes": 441.0,
151
- "Organization": "Stability AI"
152
  },
153
  {
154
- "Rank": 21,
155
  "\ud83e\udd16 Model": "SSD-1b",
156
  "\u2b50 Score (\u03bc/\u03c3)": "18.53 (20.91/0.794)",
157
  "\ud83d\uddf3\ufe0f Votes": 427.0,
158
- "Organization": "Segmind"
159
  },
160
  {
161
- "Rank": 22,
162
  "\ud83e\udd16 Model": "SD-v1.5",
163
  "\u2b50 Score (\u03bc/\u03c3)": "18.52 (20.92/0.799)",
164
  "\ud83d\uddf3\ufe0f Votes": 437.0,
165
- "Organization": "Stability AI"
166
  },
167
  {
168
- "Rank": 23,
169
  "\ud83e\udd16 Model": "SD-turbo",
170
  "\u2b50 Score (\u03bc/\u03c3)": "18.41 (20.8/0.794)",
171
  "\ud83d\uddf3\ufe0f Votes": 423.0,
172
- "Organization": "Stability AI"
173
  },
174
  {
175
- "Rank": 24,
176
  "\ud83e\udd16 Model": "Stable-cascade",
177
  "\u2b50 Score (\u03bc/\u03c3)": "15.27 (17.64/0.789)",
178
  "\ud83d\uddf3\ufe0f Votes": 426.0,
179
- "Organization": "Stability AI"
180
  },
181
  {
182
- "Rank": 25,
183
  "\ud83e\udd16 Model": "SDXL-Lightning",
184
  "\u2b50 Score (\u03bc/\u03c3)": "15.05 (17.43/0.794)",
185
  "\ud83d\uddf3\ufe0f Votes": 428.0,
186
- "Organization": "ByteDance"
187
  },
188
  {
189
- "Rank": 26,
190
  "\ud83e\udd16 Model": "SDXL-Deepcache",
191
  "\u2b50 Score (\u03bc/\u03c3)": "10.0 (12.46/0.821)",
192
  "\ud83d\uddf3\ufe0f Votes": 423.0,
193
- "Organization": "NUS"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  }
195
  ]
196
  }
 
1
  {
2
+ "total_models": 30,
3
+ "total_votes": 966,
4
+ "last_updated": "2024-08-12",
5
  "sorted_score_list": [
6
  {
7
+ "Rank": 1,
8
  "\ud83e\udd16 Model": "Midjourney-v6.0",
9
  "\u2b50 Score (\u03bc/\u03c3)": "32.36 (34.75/0.796)",
10
  "\ud83d\uddf3\ufe0f Votes": 425.0,
11
+ "\ud83c\udfdb\ufe0f Organization": "Midjourney"
12
  },
13
  {
14
+ "Rank": 2,
15
  "\ud83e\udd16 Model": "Midjourney-v5.0",
16
  "\u2b50 Score (\u03bc/\u03c3)": "31.36 (33.74/0.791)",
17
  "\ud83d\uddf3\ufe0f Votes": 438.0,
18
+ "\ud83c\udfdb\ufe0f Organization": "Midjourney"
19
  },
20
  {
21
+ "Rank": 3,
22
  "\ud83e\udd16 Model": "SD-v3.0",
23
  "\u2b50 Score (\u03bc/\u03c3)": "29.33 (31.7/0.788)",
24
  "\ud83d\uddf3\ufe0f Votes": 426.0,
25
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
26
  },
27
  {
28
+ "Rank": 4,
29
  "\ud83e\udd16 Model": "Dalle-3",
30
  "\u2b50 Score (\u03bc/\u03c3)": "28.55 (30.94/0.795)",
31
  "\ud83d\uddf3\ufe0f Votes": 427.0,
32
+ "\ud83c\udfdb\ufe0f Organization": "OpenAI"
 
 
 
 
 
 
 
33
  },
34
  {
35
  "Rank": 5,
36
  "\ud83e\udd16 Model": "Proteus-v0.2",
37
+ "\u2b50 Score (\u03bc/\u03c3)": "26.42 (28.78/0.789)",
38
+ "\ud83d\uddf3\ufe0f Votes": 426.0,
39
+ "\ud83c\udfdb\ufe0f Organization": "DataAutoGPT3"
40
  },
41
  {
42
  "Rank": 6,
43
+ "\ud83e\udd16 Model": "Pixart-Sigma",
44
+ "\u2b50 Score (\u03bc/\u03c3)": "26.32 (28.69/0.79)",
45
+ "\ud83d\uddf3\ufe0f Votes": 444.0,
46
+ "\ud83c\udfdb\ufe0f Organization": "PixArt-Alpha"
47
  },
48
  {
49
  "Rank": 7,
50
  "\ud83e\udd16 Model": "Open-Dalle-v1.1",
51
+ "\u2b50 Score (\u03bc/\u03c3)": "25.52 (27.86/0.782)",
52
+ "\ud83d\uddf3\ufe0f Votes": 432.0,
53
+ "\ud83c\udfdb\ufe0f Organization": "DataAutoGPT3"
54
  },
55
  {
56
  "Rank": 8,
57
+ "\ud83e\udd16 Model": "Dreamshaper-xl",
58
+ "\u2b50 Score (\u03bc/\u03c3)": "25.4 (27.76/0.786)",
59
+ "\ud83d\uddf3\ufe0f Votes": 437.0,
60
+ "\ud83c\udfdb\ufe0f Organization": "Lykon"
61
+ },
62
+ {
63
+ "Rank": 9,
64
  "\ud83e\udd16 Model": "Deepfloyd-IF",
65
  "\u2b50 Score (\u03bc/\u03c3)": "25.22 (27.58/0.789)",
66
  "\ud83d\uddf3\ufe0f Votes": 426.0,
67
+ "\ud83c\udfdb\ufe0f Organization": "DeepFloyd"
68
  },
69
  {
70
+ "Rank": 10,
71
  "\ud83e\udd16 Model": "Realvisxl-v3.0",
72
  "\u2b50 Score (\u03bc/\u03c3)": "24.9 (27.26/0.785)",
73
  "\ud83d\uddf3\ufe0f Votes": 424.0,
74
+ "\ud83c\udfdb\ufe0f Organization": "Realistic Vision"
75
  },
76
  {
77
+ "Rank": 11,
78
  "\ud83e\udd16 Model": "Realvisxl-v2.0",
79
  "\u2b50 Score (\u03bc/\u03c3)": "24.75 (27.11/0.785)",
80
  "\ud83d\uddf3\ufe0f Votes": 436.0,
81
+ "\ud83c\udfdb\ufe0f Organization": "Realistic Vision"
82
  },
83
  {
84
+ "Rank": 12,
85
  "\ud83e\udd16 Model": "Kandinsky-v2.2",
86
  "\u2b50 Score (\u03bc/\u03c3)": "24.48 (26.83/0.786)",
87
  "\ud83d\uddf3\ufe0f Votes": 426.0,
88
+ "\ud83c\udfdb\ufe0f Organization": "AI-Forever"
89
  },
90
  {
91
+ "Rank": 13,
92
  "\ud83e\udd16 Model": "Dalle-2",
93
  "\u2b50 Score (\u03bc/\u03c3)": "23.04 (25.4/0.787)",
94
  "\ud83d\uddf3\ufe0f Votes": 423.0,
95
+ "\ud83c\udfdb\ufe0f Organization": "OpenAI"
96
  },
97
  {
98
+ "Rank": 14,
99
  "\ud83e\udd16 Model": "Playground-v2.5",
100
  "\u2b50 Score (\u03bc/\u03c3)": "21.98 (24.36/0.795)",
101
  "\ud83d\uddf3\ufe0f Votes": 430.0,
102
+ "\ud83c\udfdb\ufe0f Organization": "Playground AI"
103
  },
104
  {
105
+ "Rank": 15,
106
  "\ud83e\udd16 Model": "Kandinsky-v2.0",
107
  "\u2b50 Score (\u03bc/\u03c3)": "21.86 (24.23/0.791)",
108
  "\ud83d\uddf3\ufe0f Votes": 427.0,
109
+ "\ud83c\udfdb\ufe0f Organization": "AI-Forever"
110
  },
111
  {
112
+ "Rank": 16,
113
  "\ud83e\udd16 Model": "SDXL-turbo",
114
  "\u2b50 Score (\u03bc/\u03c3)": "21.05 (23.42/0.79)",
115
  "\ud83d\uddf3\ufe0f Votes": 436.0,
116
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
117
  },
118
  {
119
+ "Rank": 17,
120
  "\ud83e\udd16 Model": "Playground-v2.0",
121
  "\u2b50 Score (\u03bc/\u03c3)": "20.71 (23.1/0.795)",
122
  "\ud83d\uddf3\ufe0f Votes": 449.0,
123
+ "\ud83c\udfdb\ufe0f Organization": "Playground AI"
124
  },
125
  {
126
+ "Rank": 18,
127
  "\ud83e\udd16 Model": "Openjourney-v4",
128
  "\u2b50 Score (\u03bc/\u03c3)": "20.4 (22.76/0.789)",
129
  "\ud83d\uddf3\ufe0f Votes": 426.0,
130
+ "\ud83c\udfdb\ufe0f Organization": "Prompthero"
131
  },
132
  {
133
+ "Rank": 19,
134
  "\ud83e\udd16 Model": "SD-v2.1",
135
  "\u2b50 Score (\u03bc/\u03c3)": "20.19 (22.57/0.792)",
136
  "\ud83d\uddf3\ufe0f Votes": 424.0,
137
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
138
  },
139
  {
140
+ "Rank": 20,
141
  "\ud83e\udd16 Model": "LCM-v1.5",
142
  "\u2b50 Score (\u03bc/\u03c3)": "19.79 (22.18/0.797)",
143
  "\ud83d\uddf3\ufe0f Votes": 429.0,
144
+ "\ud83c\udfdb\ufe0f Organization": "Tsinghua"
145
  },
146
  {
147
+ "Rank": 21,
148
  "\ud83e\udd16 Model": "SDXL",
149
  "\u2b50 Score (\u03bc/\u03c3)": "19.78 (22.15/0.789)",
150
  "\ud83d\uddf3\ufe0f Votes": 441.0,
151
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
152
  },
153
  {
154
+ "Rank": 22,
155
  "\ud83e\udd16 Model": "SSD-1b",
156
  "\u2b50 Score (\u03bc/\u03c3)": "18.53 (20.91/0.794)",
157
  "\ud83d\uddf3\ufe0f Votes": 427.0,
158
+ "\ud83c\udfdb\ufe0f Organization": "Segmind"
159
  },
160
  {
161
+ "Rank": 23,
162
  "\ud83e\udd16 Model": "SD-v1.5",
163
  "\u2b50 Score (\u03bc/\u03c3)": "18.52 (20.92/0.799)",
164
  "\ud83d\uddf3\ufe0f Votes": 437.0,
165
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
166
  },
167
  {
168
+ "Rank": 24,
169
  "\ud83e\udd16 Model": "SD-turbo",
170
  "\u2b50 Score (\u03bc/\u03c3)": "18.41 (20.8/0.794)",
171
  "\ud83d\uddf3\ufe0f Votes": 423.0,
172
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
173
  },
174
  {
175
+ "Rank": 25,
176
  "\ud83e\udd16 Model": "Stable-cascade",
177
  "\u2b50 Score (\u03bc/\u03c3)": "15.27 (17.64/0.789)",
178
  "\ud83d\uddf3\ufe0f Votes": 426.0,
179
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
180
  },
181
  {
182
+ "Rank": 26,
183
  "\ud83e\udd16 Model": "SDXL-Lightning",
184
  "\u2b50 Score (\u03bc/\u03c3)": "15.05 (17.43/0.794)",
185
  "\ud83d\uddf3\ufe0f Votes": 428.0,
186
+ "\ud83c\udfdb\ufe0f Organization": "ByteDance"
187
  },
188
  {
189
+ "Rank": 27,
190
  "\ud83e\udd16 Model": "SDXL-Deepcache",
191
  "\u2b50 Score (\u03bc/\u03c3)": "10.0 (12.46/0.821)",
192
  "\ud83d\uddf3\ufe0f Votes": 423.0,
193
+ "\ud83c\udfdb\ufe0f Organization": "NUS"
194
+ },
195
+ {
196
+ "Rank": 28,
197
+ "\ud83e\udd16 Model": "FLUX.1-schnell",
198
+ "\u2b50 Score (\u03bc/\u03c3)": "0.0 (25.0/8.333)",
199
+ "\ud83d\uddf3\ufe0f Votes": 0.0,
200
+ "\ud83c\udfdb\ufe0f Organization": "Black Forest Labs"
201
+ },
202
+ {
203
+ "Rank": 29,
204
+ "\ud83e\udd16 Model": "FLUX.1-pro",
205
+ "\u2b50 Score (\u03bc/\u03c3)": "0.0 (25.0/8.333)",
206
+ "\ud83d\uddf3\ufe0f Votes": 0.0,
207
+ "\ud83c\udfdb\ufe0f Organization": "Black Forest Labs"
208
+ },
209
+ {
210
+ "Rank": 30,
211
+ "\ud83e\udd16 Model": "FLUX.1-dev",
212
+ "\u2b50 Score (\u03bc/\u03c3)": "0.0 (25.0/8.333)",
213
+ "\ud83d\uddf3\ufe0f Votes": 0.0,
214
+ "\ud83c\udfdb\ufe0f Organization": "Black Forest Labs"
215
  }
216
  ]
217
  }
sorted_score_list_video.json CHANGED
@@ -1,49 +1,84 @@
1
  {
2
- "total_models": 6,
3
- "total_votes": 143,
4
- "last_updated": "2024-07-31",
5
  "sorted_score_list": [
6
- {
7
- "Rank": 0,
8
- "\ud83e\udd16 Model": "OpenSora",
9
- "\u2b50 Score (\u03bc/\u03c3)": "26.15 (28.54/0.796)",
10
- "\ud83d\uddf3\ufe0f Votes": 264.0,
11
- "Organization": "HPC-AI"
12
- },
13
  {
14
  "Rank": 1,
15
- "\ud83e\udd16 Model": "LaVie",
16
- "\u2b50 Score (\u03bc/\u03c3)": "25.85 (28.23/0.793)",
17
- "\ud83d\uddf3\ufe0f Votes": 243.0,
18
- "Organization": "Shanghai AI Lab"
19
  },
20
  {
21
  "Rank": 2,
22
- "\ud83e\udd16 Model": "VideoCrafter2",
23
- "\u2b50 Score (\u03bc/\u03c3)": "23.8 (26.23/0.811)",
24
- "\ud83d\uddf3\ufe0f Votes": 321.0,
25
- "Organization": "Tencent"
26
  },
27
  {
28
  "Rank": 3,
29
- "\ud83e\udd16 Model": "AnimateDiff",
30
- "\u2b50 Score (\u03bc/\u03c3)": "23.06 (25.5/0.813)",
31
- "\ud83d\uddf3\ufe0f Votes": 375.0,
32
- "Organization": "CUHK etc."
33
  },
34
  {
35
  "Rank": 4,
36
- "\ud83e\udd16 Model": "StableVideoDiffusion",
37
- "\u2b50 Score (\u03bc/\u03c3)": "23.03 (25.51/0.827)",
38
- "\ud83d\uddf3\ufe0f Votes": 267.0,
39
- "Organization": "Stability AI"
40
  },
41
  {
42
  "Rank": 5,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  "\ud83e\udd16 Model": "Zeroscope-v2-xl",
44
- "\u2b50 Score (\u03bc/\u03c3)": "11.91 (15.16/1.084)",
45
- "\ud83d\uddf3\ufe0f Votes": 246.0,
46
- "Organization": "Cerspense"
47
  }
48
  ]
49
  }
 
1
  {
2
+ "total_models": 11,
3
+ "total_votes": 332,
4
+ "last_updated": "2024-08-13",
5
  "sorted_score_list": [
 
 
 
 
 
 
 
6
  {
7
  "Rank": 1,
8
+ "\ud83e\udd16 Model": "Sora",
9
+ "\u2b50 Score (\u03bc/\u03c3)": "35.95 (40.65/1.565)",
10
+ "\ud83d\uddf3\ufe0f Votes": 105.0,
11
+ "\ud83c\udfdb\ufe0f Organization": "OpenAI"
12
  },
13
  {
14
  "Rank": 2,
15
+ "\ud83e\udd16 Model": "Runway-Gen3",
16
+ "\u2b50 Score (\u03bc/\u03c3)": "32.5 (35.75/1.086)",
17
+ "\ud83d\uddf3\ufe0f Votes": 122.0,
18
+ "\ud83c\udfdb\ufe0f Organization": "Runway"
19
  },
20
  {
21
  "Rank": 3,
22
+ "\ud83e\udd16 Model": "Pika-v1.0",
23
+ "\u2b50 Score (\u03bc/\u03c3)": "29.89 (32.7/0.94)",
24
+ "\ud83d\uddf3\ufe0f Votes": 119.0,
25
+ "\ud83c\udfdb\ufe0f Organization": "Pika"
26
  },
27
  {
28
  "Rank": 4,
29
+ "\ud83e\udd16 Model": "Pika-beta",
30
+ "\u2b50 Score (\u03bc/\u03c3)": "28.52 (31.28/0.919)",
31
+ "\ud83d\uddf3\ufe0f Votes": 119.0,
32
+ "\ud83c\udfdb\ufe0f Organization": "Pika"
33
  },
34
  {
35
  "Rank": 5,
36
+ "\ud83e\udd16 Model": "Runway-Gen2",
37
+ "\u2b50 Score (\u03bc/\u03c3)": "27.8 (30.44/0.879)",
38
+ "\ud83d\uddf3\ufe0f Votes": 111.0,
39
+ "\ud83c\udfdb\ufe0f Organization": "Runway"
40
+ },
41
+ {
42
+ "Rank": 6,
43
+ "\ud83e\udd16 Model": "OpenSora",
44
+ "\u2b50 Score (\u03bc/\u03c3)": "27.11 (29.57/0.818)",
45
+ "\ud83d\uddf3\ufe0f Votes": 709.0,
46
+ "\ud83c\udfdb\ufe0f Organization": "HPC-AI"
47
+ },
48
+ {
49
+ "Rank": 7,
50
+ "\ud83e\udd16 Model": "LaVie",
51
+ "\u2b50 Score (\u03bc/\u03c3)": "25.67 (28.13/0.818)",
52
+ "\ud83d\uddf3\ufe0f Votes": 664.0,
53
+ "\ud83c\udfdb\ufe0f Organization": "Shanghai AI Lab"
54
+ },
55
+ {
56
+ "Rank": 8,
57
+ "\ud83e\udd16 Model": "VideoCrafter2",
58
+ "\u2b50 Score (\u03bc/\u03c3)": "22.36 (24.88/0.842)",
59
+ "\ud83d\uddf3\ufe0f Votes": 717.0,
60
+ "\ud83c\udfdb\ufe0f Organization": "Tencent"
61
+ },
62
+ {
63
+ "Rank": 9,
64
+ "\ud83e\udd16 Model": "StableVideoDiffusion",
65
+ "\u2b50 Score (\u03bc/\u03c3)": "21.87 (24.39/0.841)",
66
+ "\ud83d\uddf3\ufe0f Votes": 510.0,
67
+ "\ud83c\udfdb\ufe0f Organization": "Stability AI"
68
+ },
69
+ {
70
+ "Rank": 10,
71
+ "\ud83e\udd16 Model": "AnimateDiff",
72
+ "\u2b50 Score (\u03bc/\u03c3)": "21.03 (23.56/0.844)",
73
+ "\ud83d\uddf3\ufe0f Votes": 480.0,
74
+ "\ud83c\udfdb\ufe0f Organization": "CUHK etc."
75
+ },
76
+ {
77
+ "Rank": 11,
78
  "\ud83e\udd16 Model": "Zeroscope-v2-xl",
79
+ "\u2b50 Score (\u03bc/\u03c3)": "12.54 (15.71/1.055)",
80
+ "\ud83d\uddf3\ufe0f Votes": 348.0,
81
+ "\ud83c\udfdb\ufe0f Organization": "Cerspense"
82
  }
83
  ]
84
  }
webvid_prompt.txt DELETED
@@ -1,100 +0,0 @@
1
- Cloudy moscow kremlin time lapse
2
- Sharp knife to cut delicious smoked fish
3
- A baker turns freshly baked loaves of sourdough bread
4
- Shot of beautiful dinnerware and cutlery on a banquet table at restaurant
5
- Corn husks in field for agriculture farming
6
- Pear fruits hanging on a branch of a tree rulevogo
7
- Peaceful girl doing yoga meditate virabhasana sunrise coast slow motion
8
- Chef with recipe book watching young cook preparing dish in the kitchen
9
- Man walking in deep snow under the branches
10
- Fist patting down sand in an orange bucket
11
- Old decrepit house in the center of the city surrounded by new buildings
12
- Green grape plants in the middle of nature
13
- Climbers go up to the mountain pass
14
- Bodrum holiday resort seashore marina
15
- A flight over a beautiful stream in the forest
16
- Attractive young couple at home laughing at funny internet joke online using digital tablet having fun
17
- A small animation for the design video about redcurrant
18
- Time lapse of sky clouds storm
19
- Happy young caucasian baby boy playing toy bucket spade sandy beach with father while mother takes photograph sun lens flare shot on red epic
20
- A young woman having dental treatment
21
- Huge waterfall in chilean scenery huilo huilo
22
- Yellow bird and nest on the branch
23
- Little girl playing in the garden in summer
24
- Hands of girl fall out sweets from bowl on white floor
25
- Woman hairdresser put on curlers to long hair plus size woman
26
- Pov of two hands and young woman putting the propellers on a drone
27
- Video of watering a flower pot
28
- Woman look at the city in hong kong
29
- Sexy woman posing sitting on chair
30
- Coconat palm tree on sky tropical view
31
- Dog sitting tied to a post and waiting for the owner
32
- Asian female cyclist blogger live stream happily to show her ride on the road
33
- Pretty student learning in computer class at the university
34
- Storm clouds and rainbow over bordeaux vineyards
35
- Aerial view of roof tops of old city
36
- Mountain area with fog and mist time lapse
37
- The famous waterfall of niagara falls in canada
38
- Low section of ice fisherman fishing in tent
39
- Medical students in training assisting surgeon in hospital operating room
40
- Passing by a cruise ship in the evening in ha long bay
41
- London skyline with building site at sunset
42
- The red deer female close up in the summertime
43
- Chinese new year flat icon chinese decor lantern
44
- The waiters in the restaurant put the glasses on buffet table
45
- Makeup artist dips brush into a makeup kit
46
- Panning down on team of businesswomen and businessmen in an office working and talking about new projects
47
- Time lapse in the mountains of brazil tree line
48
- Sick man in bed measuring temperature feeling fever medium shot
49
- Santa claus sitting in a chair with a little girl dreaming about her christmas presents
50
- Abused woman lying in the bathtub screams in despair
51
- Young mime is juggling in the park
52
- Countryside timelapse in spain
53
- Green branches of a coniferous tree with needles fill the entire screen
54
- Closeup of pouring healthy black chia seeds into a bowl
55
- South korea high resolution victory concept
56
- Inspecting south african passport
57
- Attractive business woman reading documents in busy office female executive working on company project deadline
58
- Slow dolly in towards beautiful waterfall through bright fall colors
59
- Digital animation of american flag waving while background shows silhouette of hill with tree during sunset
60
- Hud multipath target monitoring
61
- Happy young businessman thinking while using phone
62
- Thick morning fog in the summer forest
63
- Group of children play a game of reaction speed in scientific museum
64
- Abstract chrisanthemum background
65
- Inside processor of a battery chip
66
- Big and small frog sitting in the water and swimming away
67
- Street lamp fallen down on ground with shattered glass from snow storm
68
- Rustic cabin in early autumn in the great smoky mountains with dark shadows and some autumn color as seen from a window pane from another historical cabin
69
- Night vision view of timber wolf
70
- Pseudo galena by pyrrhotite hematitiziran
71
- Gorgeous couple in love have a romantic walk in magnificent budapest blurred background at night
72
- Close up thoughtful pretty young woman relaxing at the beach resort
73
- Happy senior woman working on laptop computer
74
- Singing nightingale on a tree branch
75
- Pouring of rice water from jug into glass on table
76
- Flaming gorge national park in summer in utah with handheld shot of brown cow grazing closeup on grass herd near ranch
77
- Aerial flyby miami beach condominiums
78
- Jungle covered mountain on panay island in the philippines
79
- Close up portrait of two caucasian young girl and boy playing together on digital tablet
80
- White spools are getting wound by textile equipment
81
- The wedge newport beach california waves crashing aerial footage
82
- Peak at kaengkrachan national park thailand
83
- Christmas bauble dropping and bouncing beside crackers and presents in slow motion
84
- Happy extended family on the sofa at home
85
- Closeup shot of young female confectioner decorating delicious handmade cake placed on rotating stand with freshly baked meringues
86
- Non smoking sign in the airplane cabin
87
- Swan opening up wings and flapping them to dry off while grooming
88
- A series of explosions in the winter tundra
89
- Man actor wearing a tiger clothe preparing his acting with a tambourine
90
- Abstract shape pattern texture moving background
91
- Above the thermal station two pipes smoking in the gray sky
92
- Vacation water villas on tropical island
93
- Dense fog glows orange and covers hills at dawn
94
- People walking on the beach in timelapse
95
- Aerial view of wind turbines energy production in yellow fields
96
- Preparing and mixing a fresh salad
97
- A jetsurfer taking various turns while jet surfing on water
98
- Happy little girl jumping in swimming pool and learning how to swim with help of father
99
- Student boy wash the school board
100
- Office workers showing work on laptop