Update README.md
Browse files
README.md
CHANGED
@@ -650,7 +650,7 @@ import torch
|
|
650 |
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
651 |
from qwen_vl_utils import process_vision_info
|
652 |
|
653 |
-
model_path = "erax/EraX-VL-7B-
|
654 |
|
655 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
656 |
model_path,
|
@@ -712,13 +712,13 @@ inputs = inputs.to("cuda")
|
|
712 |
# Generation configs
|
713 |
generation_config = model.generation_config
|
714 |
generation_config.do_sample = True
|
715 |
-
generation_config.temperature =
|
716 |
generation_config.top_k = 1
|
717 |
-
generation_config.top_p = 0.
|
718 |
generation_config.min_p = 0.1
|
719 |
-
generation_config.best_of =
|
720 |
generation_config.max_new_tokens = 2048
|
721 |
-
generation_config.repetition_penalty = 1.
|
722 |
|
723 |
# Inference
|
724 |
generated_ids = model.generate(**inputs, generation_config=generation_config)
|
|
|
650 |
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
651 |
from qwen_vl_utils import process_vision_info
|
652 |
|
653 |
+
model_path = "erax-ai/EraX-VL-7B-V2.0-Preview"
|
654 |
|
655 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
656 |
model_path,
|
|
|
712 |
# Generation configs
|
713 |
generation_config = model.generation_config
|
714 |
generation_config.do_sample = True
|
715 |
+
generation_config.temperature = 0.01
|
716 |
generation_config.top_k = 1
|
717 |
+
generation_config.top_p = 0.1
|
718 |
generation_config.min_p = 0.1
|
719 |
+
generation_config.best_of = 1
|
720 |
generation_config.max_new_tokens = 2048
|
721 |
+
generation_config.repetition_penalty = 1.05
|
722 |
|
723 |
# Inference
|
724 |
generated_ids = model.generate(**inputs, generation_config=generation_config)
|