depth-pro 苹果单目深度估计

单目深度估计


项目地址

ml-depth-pro

depth-pro onnx导出


  1. hugginface上已有的onnx模型
    depth_pro.onnx
  2. onnx导出脚本
    注意最好使用opset_version>=17进行导出,否则在转为tensorrt模型使用fp16推理的时候会有溢出
    Running layernorm after self-attention in FP16 may cause overflow. Forcing layernorm layers to run in FP32 precision can help with preserving accuracy.
  • fp16
    正常导出fp32模型会因为模型大于2g,无法保存到单个文件,这里使用fp16进行导出
import torch
from depth_pro import create_model_and_transforms, load_rgb

model, transform = create_model_and_transforms(
    device=torch.device('cuda:0'),
    precision=torch.float16
)
model.eval()

x = torch.randn(1, 3, 1536, 1536, device='cuda:0', dtype=torch.float16)

with torch.no_grad():
    torch.onnx.export(model,
                  x,
                  "model/depth_pro.onnx",
                  export_params=True,
                  opset_version=17,
                  do_constant_folding=True,
                  input_names=['input'],
                  output_names=['depth', 'fov'],
                  keep_initializers_as_inputs=None)
  • fp32
import torch
from depth_pro import create_model_and_transforms, load_rgb

model, transform = create_model_and_transforms(
    device=torch.device('cuda:0')
)
model.eval()

x = torch.randn(1, 3, 1536, 1536, device='cuda:0')

with torch.no_grad():
    torch.onnx.export(model,
                  x,
                  "model/depth_pro.onnx",
                  export_params=True,
                  opset_version=17,
                  do_constant_folding=True,
                  input_names=['input'],
                  output_names=['depth', 'fov'],
                  keep_initializers_as_inputs=None)

depth-pro onnxruntime推理


import cv2
import numpy as np
import onnxruntime
from matplotlib import pyplot as plt
import PIL.Image
from scipy.ndimage import zoom

def preprocess_image(image, precision='float16'):
    # 将图片转换为[0, 1]范围的float32类型
    image = cv2.resize(image, (1536, 1536))
    image = image.astype(np.float32) / 255.0

    # Normalize到[-1, 1]范围
    image = (image - 0.5) / 0.5

    # 如果需要转换精度,可以使用astype
    if precision == 'float16':
        image = image.astype(np.float16)
    elif precision == 'float32':
        image = image.astype(np.float32)
    elif precision == 'float64':
        image = image.astype(np.float64)

    image = np.transpose(image, (2, 0, 1))  # 假设输入图像为 HWC 格式,输出为 CHW 格式

    return image

def resize_inverse_depth(inverse_depth, H, W, interpolation_mode='bilinear'):
    # 获取输入数据的形状
    original_shape = inverse_depth.shape
    _, _, original_height, original_width = original_shape

    # 计算缩放因子
    scale_factor_y = H / original_height
    scale_factor_x = W / original_width

    # 确保只缩放 height 和 width 维度
    scale_factors = (1, 1, scale_factor_y, scale_factor_x)

    # 使用 scipy.ndimage.zoom 进行插值
    if interpolation_mode == 'bilinear':
        resized_depth = zoom(inverse_depth, scale_factors, order=1)
    elif interpolation_mode == 'nearest':
        resized_depth = zoom(inverse_depth, scale_factors, order=0)
    else:
        raise ValueError(f"Unsupported interpolation mode: {interpolation_mode}")

    return resized_depth

def compute_depth(W, H, fov_deg, canonical_inverse_depth, f_px=None, resize=False, interpolation_mode='bilinear'):
    if f_px is None:
        f_px = 0.5 * W / np.tan(0.5 * np.radians(fov_deg))
    inverse_depth = canonical_inverse_depth * (W / f_px)
    f_px = np.squeeze(f_px)

    if resize:
        inverse_depth = resize_inverse_depth(inverse_depth, H, W)
    depth = 1.0 / np.clip(inverse_depth, 1e-4, 1e4)
    return depth

def extract_foreground(image, depth_map, depth_threshold=3.0):
    foreground_mask = depth_map < depth_threshold
    foreground_mask = foreground_mask.squeeze()
    foreground_image = np.zeros_like(image)
    foreground_image[foreground_mask] = image[foreground_mask]
    return foreground_image

session = onnxruntime.InferenceSession("models/depth_pro_1.onnx", None)

input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name

image = cv2.imread("images/bus.jpg")[..., ::-1] 
h, w, _ = image.shape

input_data = preprocess_image(image)

input_data = np.expand_dims(input_data, axis=0)  # 增加 batch 维度

raw_result = session.run([], {input_name: input_data})  # 获取反向深度和 FOV
canonical_inverse_depth = raw_result[0] 
fov_deg = raw_result[1]  # 输出视场角

depth = compute_depth(w, h, fov_deg, canonical_inverse_depth, resize=True)

inverse_depth = 1 / depth
max_invdepth_vizu = min(inverse_depth.max(), 1 / 0.1) 
min_invdepth_vizu = max(1 / 250, inverse_depth.min())

inverse_depth_normalized = (inverse_depth - min_invdepth_vizu) / (max_invdepth_vizu - min_invdepth_vizu)

inverse_depth_normalized = np.squeeze(inverse_depth_normalized)

cmap = plt.get_cmap("turbo")

color_depth = (cmap(inverse_depth_normalized)[..., :3] * 255).astype(np.uint8)

color_map_output_file = "result/result.jpg"
PIL.Image.fromarray(color_depth).save(color_map_output_file, format="JPEG", quality=90)

image.png

depth-pro tensorrt推理

项目地址 ml-depth-pro-trt10

  1. 使用opset_version>=17进行导出,否则在转为tensorrt模型使用fp16推理的时候会有溢出,导致fp16模型无法得到正常结果

  2. fp16已经成功运行了

image.png
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容