import comet_ml comet_ml.init(anonymous=True, project_name="3: OWL-ViT + SAM") exp = comet_ml.Experiment() # To display the image from PIL import Image logged_artifact = exp.get_artifact("L3-data", "anmorgan24") # Display the images raw_image = Image.open("L3_data/dogs.jpg") raw_image from transformers import pipeline OWL_checkpoint = "./models/google/owlvit-base-patch32" # Load the model detector = pipeline( model= OWL_checkpoint, task="zero-shot-object-detection" ) # What you want to identify in the image text_prompt = "dog" output = detector( raw_image, candidate_labels = [text_prompt] ) # Print the output to identify the bounding boxes detected output from utils import preprocess_outputs input_scores, input_labels, input_boxes = preprocess_outputs(output) from utils import show_boxes_and_labels_on_image # Show the image with the bounding boxes show_boxes_and_labels_on_image( raw_image, input_boxes[0], input_labels, input_scores ) #Model distillation