RPW commited on
Commit
37c32fd
·
verified ·
1 Parent(s): 6de0aed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -4,17 +4,19 @@ import torch
4
  MODEL_NAME = "RPW/NIH-1.2_Llama-3.2-11B-Vision-Instruct"
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
 
 
 
7
  # Load the tokenizer and model
8
  tokenizer = RobertaTokenizer.from_pretrained(MODEL_NAME)
9
  model = RobertaForSequenceClassification.from_pretrained(MODEL_NAME).to(device)
10
 
11
- # Example usage: Tokenizing some input text
12
- input_text = "Describe this image."
13
- inputs = tokenizer(input_text, return_tensors="pt").to(device)
14
-
15
- # Generating output
16
- outputs = model(**inputs)
17
- logits = outputs.logits
18
 
19
- # ใช้ softmax หรือการแปลงค่าอื่นๆ ถ้าต้องการผลลัพธ์ที่เหมาะสมกับการ classification
20
- print(logits)
 
 
4
  MODEL_NAME = "RPW/NIH-1.2_Llama-3.2-11B-Vision-Instruct"
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
 
7
+ instruction = "You are an expert radiographer. Describe accurately what you see in this image."
8
+
9
  # Load the tokenizer and model
10
  tokenizer = RobertaTokenizer.from_pretrained(MODEL_NAME)
11
  model = RobertaForSequenceClassification.from_pretrained(MODEL_NAME).to(device)
12
 
13
+ input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
14
+ inputs = tokenizer(
15
+ image,
16
+ input_text,
17
+ add_special_tokens=False,
18
+ return_tensors="pt").to("cuda")
 
19
 
20
+ text_streamer = TextStreamer(tokenizer, skip_prompt=True)
21
+ _ = model.generate(**inputs, streamer=text_streamer, max_new_tokens=128,
22
+ use_cache=True, temperature=1.5, min_p=0.1)