erax commited on
Commit
08f58a9
·
verified ·
1 Parent(s): 3c14c23

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -650,7 +650,7 @@ import torch
650
  from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
651
  from qwen_vl_utils import process_vision_info
652
 
653
- model_path = "erax/EraX-VL-7B-V1.5"
654
 
655
  model = Qwen2VLForConditionalGeneration.from_pretrained(
656
  model_path,
@@ -712,13 +712,13 @@ inputs = inputs.to("cuda")
712
  # Generation configs
713
  generation_config = model.generation_config
714
  generation_config.do_sample = True
715
- generation_config.temperature = 1.0
716
  generation_config.top_k = 1
717
- generation_config.top_p = 0.9
718
  generation_config.min_p = 0.1
719
- generation_config.best_of = 5
720
  generation_config.max_new_tokens = 2048
721
- generation_config.repetition_penalty = 1.06
722
 
723
  # Inference
724
  generated_ids = model.generate(**inputs, generation_config=generation_config)
 
650
  from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
651
  from qwen_vl_utils import process_vision_info
652
 
653
+ model_path = "erax-ai/EraX-VL-7B-V2.0-Preview"
654
 
655
  model = Qwen2VLForConditionalGeneration.from_pretrained(
656
  model_path,
 
712
  # Generation configs
713
  generation_config = model.generation_config
714
  generation_config.do_sample = True
715
+ generation_config.temperature = 0.01
716
  generation_config.top_k = 1
717
+ generation_config.top_p = 0.1
718
  generation_config.min_p = 0.1
719
+ generation_config.best_of = 1
720
  generation_config.max_new_tokens = 2048
721
+ generation_config.repetition_penalty = 1.05
722
 
723
  # Inference
724
  generated_ids = model.generate(**inputs, generation_config=generation_config)