ZhangYuanhan
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -129,7 +129,7 @@ base_model:
|
|
129 |
|
130 |
## Model Summary
|
131 |
|
132 |
-
|
133 |
|
134 |
This model support at most 110 frames.
|
135 |
|
@@ -183,19 +183,20 @@ def load_video(self, video_path, max_frames_num,fps=1,force_sample=False):
|
|
183 |
spare_frames = vr.get_batch(frame_idx).asnumpy()
|
184 |
# import pdb;pdb.set_trace()
|
185 |
return spare_frames,frame_time,video_time
|
186 |
-
pretrained = "lmms-lab/LLaVA-NeXT-Video-7B-Qwen2-Video-Only"
|
187 |
model_name = "llava_qwen"
|
188 |
device = "cuda"
|
189 |
device_map = "auto"
|
190 |
-
tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, device_map=device_map) # Add any other thing you want to pass in llava_model_args
|
191 |
model.eval()
|
192 |
video_path = "XXXX"
|
193 |
-
max_frames_num = "
|
194 |
video,frame_time,video_time = load_video(video_path, max_frames_num, 1, force_sample=True)
|
195 |
video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].cuda().bfloat16()
|
196 |
video = [video]
|
197 |
conv_template = "qwen_1_5" # Make sure you use correct chat template for different models
|
198 |
-
|
|
|
199 |
conv = copy.deepcopy(conv_templates[conv_template])
|
200 |
conv.append_message(conv.roles[0], question)
|
201 |
conv.append_message(conv.roles[1], None)
|
@@ -204,12 +205,12 @@ input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX,
|
|
204 |
cont = model.generate(
|
205 |
input_ids,
|
206 |
images=video,
|
207 |
-
modalities=["video"],
|
208 |
do_sample=False,
|
209 |
temperature=0,
|
210 |
max_new_tokens=4096,
|
211 |
)
|
212 |
-
text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)
|
213 |
print(text_outputs)
|
214 |
```
|
215 |
|
|
|
129 |
|
130 |
## Model Summary
|
131 |
|
132 |
+
In contrast to lmms-lab/LLaVA-NeXT-Video-7B-Qwen2, this is a 7B model trained on [LLaVA-NeXT-Video-178K](https://huggingface.co/datasets/lmms-lab/LLaVA-NeXT-Video-SFT-Data) only, based on Qwen2 language model with a context window of 32K tokens.
|
133 |
|
134 |
This model support at most 110 frames.
|
135 |
|
|
|
183 |
spare_frames = vr.get_batch(frame_idx).asnumpy()
|
184 |
# import pdb;pdb.set_trace()
|
185 |
return spare_frames,frame_time,video_time
|
186 |
+
pretrained = "lmms-lab/LLaVA-NeXT-Video-7B-Qwen2-Video-Only "
|
187 |
model_name = "llava_qwen"
|
188 |
device = "cuda"
|
189 |
device_map = "auto"
|
190 |
+
tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, torch_dtype="bfloat16", device_map=device_map) # Add any other thing you want to pass in llava_model_args
|
191 |
model.eval()
|
192 |
video_path = "XXXX"
|
193 |
+
max_frames_num = "64"
|
194 |
video,frame_time,video_time = load_video(video_path, max_frames_num, 1, force_sample=True)
|
195 |
video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].cuda().bfloat16()
|
196 |
video = [video]
|
197 |
conv_template = "qwen_1_5" # Make sure you use correct chat template for different models
|
198 |
+
time_instruciton = f"The video lasts for {video_time:.2f} seconds, and {len(video[0])} frames are uniformly sampled from it. These frames are located at {frame_time}.Please answer the following questions related to this video."
|
199 |
+
question = DEFAULT_IMAGE_TOKEN + f"{time_instruciton}\nPlease describe this video in detail."
|
200 |
conv = copy.deepcopy(conv_templates[conv_template])
|
201 |
conv.append_message(conv.roles[0], question)
|
202 |
conv.append_message(conv.roles[1], None)
|
|
|
205 |
cont = model.generate(
|
206 |
input_ids,
|
207 |
images=video,
|
208 |
+
modalities= ["video"],
|
209 |
do_sample=False,
|
210 |
temperature=0,
|
211 |
max_new_tokens=4096,
|
212 |
)
|
213 |
+
text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)[0].strip()
|
214 |
print(text_outputs)
|
215 |
```
|
216 |
|