DepthPro
Collection
Models and Spaces using DepthPro model for Monocular Depth Estimation, Image Segmentation and Image Super Resolution.
โข
9 items
โข
Updated
Install the required libraries:
pip install -q numpy pillow torch torchvision
pip install -q git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
Import the required libraries:
import requests
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from huggingface_hub import hf_hub_download
import matplotlib.pyplot as plt
# custom installation from this PR: https://github.com/huggingface/transformers/pull/34583
# !pip install git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
from transformers import DepthProConfig, DepthProImageProcessorFast, DepthProForDepthEstimation
Load the model and image processor:
checkpoint = "geetu040/DepthPro"
revision = "project"
image_processor = DepthProImageProcessorFast.from_pretrained(checkpoint, revision=revision)
model = DepthProForDepthEstimation.from_pretrained(checkpoint, revision=revision)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
Inference:
# inference
url = "https://huggingface.co/spaces/geetu040/DepthPro_Segmentation_Human/resolve/main/assets/examples/man_with_arms_open.jpg"
image = Image.open(requests.get(url, stream=True).raw)
image = image.convert("RGB")
# prepare image for the model
inputs = image_processor(images=image, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# interpolate to original size
post_processed_output = image_processor.post_process_depth_estimation(
outputs, target_sizes=[(image.height, image.width)],
)
# visualize the prediction
depth = post_processed_output[0]["predicted_depth"]
depth = (depth - depth.min()) / depth.max()
depth = depth * 255.
depth = depth.detach().cpu().numpy()
depth = Image.fromarray(depth.astype("uint8"))
# visualize the prediction
fig, axes = plt.subplots(1, 2, figsize=(20, 20))
axes[0].imshow(image)
axes[0].set_title(f'Image {image.size}')
axes[0].axis('off')
axes[1].imshow(depth)
axes[1].set_title(f'Depth {depth.size}')
axes[1].axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()