Ramji commited on
Commit
e34e79e
·
verified ·
1 Parent(s): 56be026

remove cuda

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -89,7 +89,7 @@ def load_image(image_file, input_size=448, max_num=12):
89
  def prediction(model, image_file, question):
90
  question = f"<image>\n{question}"
91
  # set the max number of tiles in `max_num`
92
- pixel_values = load_image(image_file, max_num=12).to(torch.bfloat16).cuda()
93
  generation_config = dict(max_new_tokens=1024, do_sample=False)
94
 
95
  response = model.chat(tokenizer, pixel_values, question, generation_config)
@@ -103,7 +103,7 @@ intern_model = AutoModel.from_pretrained(
103
  torch_dtype=torch.bfloat16,
104
  low_cpu_mem_usage=True,
105
  use_flash_attn=False,
106
- trust_remote_code=True).eval().cuda()
107
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
108
 
109
  # Title of the Streamlit app
 
89
  def prediction(model, image_file, question):
90
  question = f"<image>\n{question}"
91
  # set the max number of tiles in `max_num`
92
+ pixel_values = load_image(image_file, max_num=12).to(torch.bfloat16)
93
  generation_config = dict(max_new_tokens=1024, do_sample=False)
94
 
95
  response = model.chat(tokenizer, pixel_values, question, generation_config)
 
103
  torch_dtype=torch.bfloat16,
104
  low_cpu_mem_usage=True,
105
  use_flash_attn=False,
106
+ trust_remote_code=True).eval()
107
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
108
 
109
  # Title of the Streamlit app