sfm算法之三角化(三角测量)

        sfm算法流程一般是特征点提取、特征点匹配、计算本质矩阵/基础矩阵,最后三角化。但是利用机械臂去观察周围,前后帧姿态变化参数是具有的,所以不需要通过基础矩阵获取。

        即利用机械臂的信息直接进行深度估计。已知:手眼标定、相机外参(利用机械臂可获取)、两次拍摄图片目标的像素位置。求目标深度。目前从几次测试来看,效果还是可信的,具体误差没有评估。

        利用的公式如下:

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time        :2022/7/27 14:59
# @Author      :weiz
# @ProjectName :robotVision3
# @File        :triangulation.py
# @Description :
import cv2

from triangulation_data import *
import math


def get_M(R, t):
    """
    获取旋转平移的矩阵
    :param R:
    :param t:
    :return:
    """
    M = [[R[0][0], R[0][1], R[0][2], t[0]],
         [R[1][0], R[1][1], R[1][2], t[1]],
         [R[2][0], R[2][1], R[2][2], t[2]]]
    return np.array(M)



def get_M_homo(R, t):
    """
    获取旋转平移的齐次矩阵
    :param R:
    :param t:
    :return:
    """
    M = [[R[0][0], R[0][1], R[0][2], t[0]],
         [R[1][0], R[1][1], R[1][2], t[1]],
         [R[2][0], R[2][1], R[2][2], t[2]],
         [0,       0,       0,       1]]
    return np.array(M)


def get_world2point2_R_t(R_camera2gripper, t_camera2gripper, point1_gripper, point2_gripper):
    """
    获取关键点1坐标系到关键点2坐标系的 R,t
    :param R_camera2gripper:手眼标定的R
    :param t_camera2gripper:手眼标定的t
    :param point1_gripper:视点1时刻机械臂末端的位姿[x,y,z,rx,ry,rz]
    :param point2_gripper:视点2时刻机械臂末端的位姿[x,y,z,rx,ry,rz]
    :return:
    """
    R_gripper2base_point1 = np.zeros((3, 3))
    t_gripper2base_point1 = np.array([point1_gripper[0], point1_gripper[1], point1_gripper[2]])
    cv2.Rodrigues(np.array([point1_gripper[3], point1_gripper[4], point1_gripper[5]]), R_gripper2base_point1)

    R_gripper2base_point2 = np.zeros((3, 3))
    t_gripper2base_point2 = np.array([point2_gripper[0], point2_gripper[1], point2_gripper[2]])
    cv2.Rodrigues(np.array([point2_gripper[3], point2_gripper[4], point2_gripper[5]]), R_gripper2base_point2)

    R_gripper2camera = np.linalg.inv(np.array(R_camera2gripper))
    t_gripper2camera = -np.dot(R_gripper2camera, t_camera2gripper)

    R_base2gripper_point2 = np.linalg.inv(np.array(R_gripper2base_point2))
    t_base2gripper_point2 = -np.dot(R_base2gripper_point2, t_gripper2base_point2)

    M = get_M_homo(R_camera2gripper, t_camera2gripper)
    M = np.matmul(get_M_homo(R_gripper2base_point1, t_gripper2base_point1), M)
    M = np.matmul(get_M_homo(R_base2gripper_point2, t_base2gripper_point2), M)
    M = np.matmul(get_M_homo(R_gripper2camera, t_gripper2camera), M)

    R_world2point2 = M[0:3, 0:3]
    t_world2point2 = np.array([M[0][3], M[1][3], M[2][3]])
    # print(M)
    # print(R_world2point2)
    # print(t_world2point2)

    return R_world2point2, t_world2point2, M


def triangulation(point1, point2, ipm, R_world2point2, t_world2point2):
    """
    三角测量:利用机械臂位姿信息直接进行三维坐标估计
    :param point1:该坐标系为世界坐标系
    :param point2:
    :param ipm:相机内参
    :param R_world2point2:
    :param t_world2point2:
    :return:
    """
    ipm = np.array(ipm)
    M1 = np.matmul(ipm, get_M(np.eye(3, 3), [0, 0, 0]))
    M2 = np.matmul(ipm, get_M(R_world2point2, t_world2point2))
    A = []
    A.append(point1[0] * M1[2] - M1[0])
    A.append(point1[1] * M1[2] - M1[1])
    A.append(point2[0] * M2[2] - M2[0])
    A.append(point2[1] * M2[2] - M2[1])
    # print(np.array(A))
    U, sigma, V = np.linalg.svd(A)
    p_1 = [V[3][0], V[3][1], V[3][2], V[3][3]]
    p_2 = [V[3][0] / V[3][3], V[3][1] / V[3][3], V[3][2] / V[3][3]]
    # print(p_1)
    # print(p_2)

    total_error = 0
    point1_pro, _ = cv2.projectPoints(np.array([p_2]), np.eye(3), np.array([[0.], [0.], [0.]]), ipm, None)
    point2_pro, _ = cv2.projectPoints(np.array([p_2]), R_world2point2, t_world2point2, ipm, None)
    print(point2_pro)
    total_error = total_error + cv2.norm(np.array([[point1]], dtype=np.double), point1_pro, cv2.NORM_L2) / 1.
    total_error = total_error + cv2.norm(np.array([[point1]], dtype=np.float64), point2_pro, cv2.NORM_L2) / 1.
    print(total_error)

    return p_2


def triangulation_opencv(point1, point2, ipm, R_world2point2, t_world2point2):
    point1 = np.array([point1], dtype=np.float32)
    point2 = np.array([point2], dtype=np.float32)

    M1 = np.zeros((3, 4))
    M2 = np.zeros((3, 4))
    M1[0:3, 0:3] = np.eye(3)
    M2[0:3, 0:3] = np.float32(R_world2point2)
    M2[:, 3] = np.float32(t_world2point2)
    M1 = np.matmul(ipm, M1)
    M2 = np.matmul(ipm, M2)
    p_homo = cv2.triangulatePoints(M1, M2, point1.T, point2.T)
    p = []
    # 齐次坐标转三维坐标:前三个维度除以第四个维度
    for i in range(len(p_homo[0])):
        col = p_homo[:, i]
        col /= col[3]
        p.append([col[0], col[1], col[2]])
    return p[0]





R_camera2gripper =[[-0.999266726198567, -0.016016251269208765, -0.0347777171142492],
                   [-0.03487664683144269, 0.005939423009193905, 0.99937397542667],
                   [-0.015799665129108013, 0.9998540908300567, -0.00649366092498505]]
t_camera2gripper = [0.057164748537088694, 0.10581519613132581, 0.1255394553568957]
ipm = np.array([[535.05, 0, 653.005], [0, 535.01, 386.191], [0, 0, 1]])
if __name__ == "__main__":
    R_world2point2, t_world2point2, M = get_world2point2_R_t(R_camera2gripper, t_camera2gripper, point1_gripper, point2_gripper)
    p = triangulation(point1, point2, ipm, R_world2point2, t_world2point2)
    print(p)

    p = triangulation_opencv(point1, point2, ipm, R_world2point2, t_world2point2)
    print(p)

数据:

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time        :2022/7/29 9:28
# @Author      :weiz
# @ProjectName :Sfm-python-master
# @File        :triangulation_data.py
# @Description :
# Copyright (C) 2021-2025 Jiangxi Institute Of Intelligent Industry Technology Innovation
import numpy as np


point1 = [738, 389]  # 0.5410932
point1_gripper = [0.5122332451863898, 0.5128182769039566, 0.9630757531852082, -0.7115522167741687, -1.7916510038378088, 1.7013068394360287]
point2 = [797, 374]
point2_gripper = [0.3820852469525631, 0.5235437570806256, 0.9711089613285708, -0.7071726699555351, -1.9459688091738208, 1.7263544531858268]

# point1 = [797, 374]  # 0.54940385
# point1_gripper = [0.3820852469525631, 0.5235437570806256, 0.9711089613285708, -0.7071726699555351, -1.9459688091738208, 1.7263544531858268]
# point2 = [715, 343]
# point2_gripper = [0.5056936674804589, 0.4650883838281726, 1.0634729391460807, -0.6999271853483486, -1.749012465435611, 1.6882826312357782]

# point1 = [715, 343]  # 0.64878213
# point1_gripper = [0.5056936674804589, 0.4650883838281726, 1.0634729391460807, -0.6999271853483486, -1.749012465435611, 1.6882826312357782]
# point2 = [733, 368]
# point2_gripper = [0.5485518509025477, 0.5157901201998042, 0.9837568437877331, -0.6609672121210775, -1.7084733902118903, 1.7261963960916609]

# point1 = [733, 368]  # 0.5727386
# point1_gripper = [0.5485518509025477, 0.5157901201998042, 0.9837568437877331, -0.6609672121210775, -1.7084733902118903, 1.7261963960916609]
# point2 = [738, 389]
# point2_gripper = [0.5122332451863898, 0.5128182769039566, 0.9630757531852082, -0.7115522167741687, -1.7916510038378088, 1.7013068394360287]


def pixel_normalization(point1, point2):
    points_norm = []
    points_norm.append(point1)
    points_norm.append(point2)
    points_norm = np.array(points_norm)
    points_norm = (points_norm - np.mean(points_norm)) / np.std(points_norm)
    return points_norm[0], points_norm[1]


if __name__ == "__main__":
    p1, p2 = pixel_normalization(point1, point2)
    print(p1)
    print(p2)

猜你喜欢

转载自blog.csdn.net/qq_31112205/article/details/126466206