代码部分
话不多说直接上代码:
新建文件 getdata.py
import cv2
import os
id_image = 0 # 图片的ID
camera = cv2.VideoCapture(1)
# 找到棋盘格的标准
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# 棋盘格数
chessid = (6, 6)
left_save_root = './calibration/left'
right_save_root = './calibration/right'
if not os.path.exists(left_save_root):
os.makedirs(left_save_root)
if not os.path.exists(right_save_root):
os.makedirs(right_save_root)
while True:
ret, frame = camera.read()
# 这里的左右两个摄像头的图像是连在一起的,所以进行一下分割
# 裁剪坐标为[y0:y1, x0:x1]
splity = frame.shape[1] // 2
frame2 = frame[:, :splity]
frame1 = frame[:, splity:]
# 转换成灰度图 棋盘格识别需要时灰度图
grayR = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
grayL = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# 查找棋盘格这个 用的是9*6的棋盘格 用其他的要修改一下
retR, cornersR = cv2.findChessboardCorners(grayR , chessid , None)
retL, cornersL = cv2.findChessboardCorners(grayL , chessid , None)
cv2.imshow('imgR', frame1)
cv2.imshow('imgL', frame2)
# 如果找到了棋盘格就显示叫角点
if (retR == True) & (retL == True):
corners2R = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
corners2L = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
# 画出角点
cv2.drawChessboardCorners(grayR, chessid, corners2R, retR)
cv2.drawChessboardCorners(grayL, chessid, corners2L, retL)
cv2.imshow('VideoR', grayR)
cv2.imshow('VideoL', grayL)
if cv2.waitKey(0) & 0xFF == ord('s'): # S 存储 C 取消存储
print('S PRESSES')
str_id_image = str(id_image)
print('Images ' + str_id_image + ' saved for right and left cameras')
cv2.imwrite(f'{
left_save_root}/left-{
str_id_image}.png', frame1)
cv2.imwrite(f'{
right_save_root}/right-{
str_id_image}.png', frame2)
id_image = id_image + 1
else:
print('Images not saved')
if cv2.waitKey(1) & 0xFF == 27: # esc 结束当前程序
break
# Release the Cameras
camera.release()
cv2.destroyAllWindows()
新建文件 calibration.py
import cv2
import os
import numpy as np
import yaml
# 加载两个摄像头图片文件夹并将里面的彩图转换为灰度图
def load_images(folder, images):
img_list = []
for img_name in images:
img_path = os.path.join(folder, img_name)
frame = cv2.imread(img_path)
if frame is not None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_list.append((frame, gray))
else:
print(f"无法读取图像: {
img_path}")
return img_list
# 检测棋盘格角点
def get_corners(imgs, pattern_size):
corners = []
for frame, gray in imgs:
ret, c = cv2.findChessboardCorners(gray, pattern_size) #ret 表示是否成功找到棋盘格角点,c 是一个数组,包含了检测到的角点的坐标
if not ret:
print("未能检测到棋盘格角点")
continue
c = cv2.cornerSubPix(gray, c, (5, 5), (-1, -1),
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) #cv2.cornerSubPix 函数用于提高棋盘格角点的精确度,对初始检测到的角点坐标 c 进行优化
corners.append(c) #将优化后的角点坐标 c 添加到 corners 列表中
# 绘制角点并显示
vis = frame.copy()
cv2.drawChessboardCorners(vis, pattern_size, c, ret)
new_size = (1280, 800)
resized_img = cv2.resize(vis, new_size)
cv2.imshow('Corners', resized_img)
cv2.waitKey(150)
return corners
# 相机标定
def calibrate_camera(object_points, corners, imgsize):
cm_input = np.eye(3, dtype=np.float32)
ret = cv2.calibrateCamera(object_points, corners, imgsize, cm_input, None)
return ret
def save_calibration_to_yaml(file_path, cameraMatrix_l, distCoeffs_l, cameraMatrix_r, distCoeffs_r, R, T, E, F, imgsize):
data = {
'imgsize': {
'rows': 1,
'cols': 2,
'dt': 'd',
'data': imgsize.flatten().tolist()
},
'camera_matrix_left': {
'rows': 3,
'cols': 3,
'dt': 'd',
'data': cameraMatrix_l.flatten().tolist()
},
'dist_coeff_left': {
'rows': 1,
'cols': 5,
'dt': 'd',
'data': distCoeffs_l.flatten().tolist()
},
'camera_matrix_right': {
'rows': 3,
'cols': 3,
'dt': 'd',
'data': cameraMatrix_r.flatten().tolist()
},
'dist_coeff_right': {
'rows': 1,
'cols': 5,
'dt': 'd',
'data': distCoeffs_r.flatten().tolist()
},
'R': {
'rows': 3,
'cols': 3,
'dt': 'd',
'data': R.flatten().tolist()
},
'T': {
'rows': 3,
'cols': 1,
'dt': 'd',
'data': T.flatten().tolist()
},
'E': {
'rows': 3,
'cols': 3,
'dt': 'd',
'data': E.flatten().tolist()
},
'F': {
'rows': 3,
'cols': 3,
'dt': 'd',
'data': F.flatten().tolist()
}
}
with open(file_path, 'w') as file:
yaml.dump(data, file, default_flow_style=False)
print(f"Calibration parameters saved to {
file_path}")
# 计算重投影误差
def compute_reprojection_errors(objpoints, imgpoints, rvecs, tvecs, mtx, dist):
total_error = 0
total_points = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
total_error += error
total_points += len(imgpoints2)
mean_error = total_error / total_points
return mean_error
# 标定程序
def calibrate(left_folder = "./calibration/left", # 左相机采集图像
right_folder = "./calibration/right", # 左相机采集图像
pattern_size = (8, 6), # 棋盘格大小
pattern_width = 17, # 棋盘格边长 (mm)
config_yaml_root = 'camera_config.yaml'):
# 定义文件夹路径
# 获取图像文件列表并排序
left_images = sorted(os.listdir(left_folder))
right_images = sorted(os.listdir(right_folder))
# 确保左右相机图像数量一致
assert len(left_images) == len(right_images), "左右相机图像数量不一致"
img_left = load_images(left_folder, left_images) #img_left是个列表,存放左摄像头所有的灰度图片。
img_right = load_images(right_folder, right_images)
# pattern_width = 16 # 棋盘格边长15mm,需要测量
print('开始识别角点...')
corners_left = get_corners(img_left, pattern_size) #corners_left的长度表示检测到棋盘格角点的图像数量。corners_left[i] 和 corners_right[i] 中存储了第 i 张图像检测到的棋盘格角点的二维坐标。
corners_right = get_corners(img_right, pattern_size)
cv2.destroyAllWindows()
# 断言,确保所有图像都检测到角点
assert len(corners_left) == len(img_left), "有图像未检测到左相机的角点"
assert len(corners_right) == len(img_right), "有图像未检测到右相机的角点"
# 准备标定所需数据
points = np.zeros((pattern_size[0] * pattern_size[1], 3), dtype=np.float32) #创建40 行 3 列的零矩阵,用于存储棋盘格的三维坐标点。棋盘格的大小是 8 行 5 列,40 个角点。数据类型为 np.float32,这是一张图的,因为一个角点对应一个三维坐标
points[:, :2] = np.mgrid[0:pattern_size[0], 0:pattern_size[1]].T.reshape(-1, 2) * pattern_width #给这些点赋予实际的物理坐标,* 每个棋盘格的大小(mm)
object_points = [points] * len(corners_left) #包含了所有图像中棋盘格的三维物理坐标点 points。这里假设所有图像中棋盘格的物理坐标是相同的,因此用 points 复制 len(corners_left) 次。
imgsize = img_left[0][1].shape[::-1] #img_left[0] 是左相机图像列表中的第一张图像。img_left[0][1] 是该图像的灰度图像。shape[::-1] 取灰度图像的宽度和高度,并反转顺序,以符合 calibrateCamera 函数的要求。
print(imgsize)
print('开始左相机标定')
ret_l = calibrate_camera(object_points, corners_left, imgsize) #object_points表示标定板上检测到的棋盘格角点的三维坐标;corners_left[i]表示棋盘格角点在图像中的二维坐标;imgsize表示图像大小
retval_l, cameraMatrix_l, distCoeffs_l, rvecs_l, tvecs_l = ret_l[:pattern_size[1]] #返回值里就包含了标定的参数
print('开始右相机标定')
ret_r = calibrate_camera(object_points, corners_right, imgsize)
retval_r, cameraMatrix_r, distCoeffs_r, rvecs_r, tvecs_r = ret_r[:pattern_size[1]]
# 立体标定,得到左右相机的外参:旋转矩阵、平移矩阵、本质矩阵、基本矩阵
print('开始立体标定')
criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-5)
ret_stereo = cv2.stereoCalibrate(object_points, corners_left, corners_right,
cameraMatrix_l, distCoeffs_l,
cameraMatrix_r, distCoeffs_r,
imgsize, criteria=criteria_stereo,
flags=cv2.CALIB_FIX_INTRINSIC)
_, _, _, _, _, R, T, E, F = ret_stereo
# 输出结果
print("左相机内参:\n", cameraMatrix_l)
print("左相机畸变系数:\n", distCoeffs_l)
print("右相机内参:\n", cameraMatrix_r)
print("右相机畸变系数:\n", distCoeffs_r)
print("旋转矩阵 R:\n", R)
print("平移向量 T:\n", T)
print("本质矩阵 E:\n", E)
print("基本矩阵 F:\n", F)
print("标定完成")
# 保存标定结果
save_calibration_to_yaml(config_yaml_root, cameraMatrix_l, distCoeffs_l, cameraMatrix_r, distCoeffs_r, R, T, E, F, np.array(list(imgsize)))
print(f'保存标定结果至{
config_yaml_root}')
# 计算并打印左相机和右相机的重投影误差
print("左相机重投影误差: ", compute_reprojection_errors(object_points, corners_left, rvecs_l, tvecs_l, cameraMatrix_l, distCoeffs_l))
print("右相机重投影误差: ", compute_reprojection_errors(object_points, corners_right, rvecs_r, tvecs_r, cameraMatrix_r, distCoeffs_r))
if __name__ == '__main__':
'''
left_folder: 左视角存储路径
right_folder: 右视角存储路径
pattern_width: 标定板正方形边长
pattern_size: 网格数
config_yaml_root: 标注结果保存地址
'''
calibrate(left_folder = "./calibration_2/left",
right_folder = "./calibration_2/right",
pattern_size = (11, 8),
pattern_width = 25,
config_yaml_root = './model_data/camera_config.yaml')
新建 CameraConfig.py
import numpy as np
import yaml
import cv2
class CameraConfig:
def __init__(self, yaml_file_path):
with open(yaml_file_path, 'r') as file:
data = yaml.safe_load(file)
# 将数据转换为numpy矩阵
self.E = np.array(data['E']['data']).reshape((3, 3))
self.F = np.array(data['F']['data']).reshape((3, 3))
self.R = np.array(data['R']['data']).reshape((3, 3))
self.T = np.array(data['T']['data']).reshape((3, 1))
self.left_camera_matrix = np.array(data['camera_matrix_left']['data']).reshape((3, 3))
self.right_camera_matrix = np.array(data['camera_matrix_right']['data']).reshape((3, 3))
self.left_distortion = np.array(data['dist_coeff_left']['data']).reshape((1, 5))
self.right_distortion = np.array(data['dist_coeff_right']['data']).reshape((1, 5))
self.size = tuple(np.array(data['imgsize']['data']).reshape((1, 2))[0])
# print(self.size)
self.om, _ = cv2.Rodrigues(self.R)
self.R1, self.R2, self.P1, self.P2, self.Q, self.validPixROI1, self.validPixROI2 = \
cv2.stereoRectify(self.left_camera_matrix, self.left_distortion, self.right_camera_matrix, \
self.right_distortion, self.size, self.R, self.T)
self.left_map1, self.left_map2 = cv2.initUndistortRectifyMap(self.left_camera_matrix, self.left_distortion, self.R1, self.P1, self.size, cv2.CV_16SC2)
self.right_map1, self.right_map2 = cv2.initUndistortRectifyMap(self.right_camera_matrix, self.right_distortion, self.R2, self.P2, self.size, cv2.CV_16SC2)
class RGBDProcess():
def __init__(self, yaml_file_path='./model_data/camera_config.yaml'):
self.camera_config = CameraConfig(yaml_file_path)
modes = [cv2.STEREO_SGBM_MODE_SGBM, cv2.STEREO_SGBM_MODE_HH,
cv2.STEREO_SGBM_MODE_SGBM_3WAY, cv2.STEREO_SGBM_MODE_HH4]
self.defaults = {
'minDisparity':2, # 最小视差值
'numDisparities':192, # 视差值的范围
'blockSize':2, # 匹配块的大小
'P1':8*3**2, # 计算视差图时的参数1
'P2':32*3**2, # 计算视差图时的参数2
'disp12MaxDiff':0, # 左右视差图的最大差异
'uniquenessRatio':20, # 唯一性比率
'speckleWindowSize':3, # 斑点窗口大小
'speckleRange':3, # 斑点范围
'preFilterCap':60, # 预滤波器的阈值
'mode':modes[3], # 可选模式
}
self.sgbm = cv2.StereoSGBM_create(**self.defaults)
# self.defaults['minDisparity'] *= -1
# self.sgbm_right = cv2.StereoSGBM_create(**self.defaults)
# self.sgbm_right = cv2.ximgproc.createRightMatcher(self.sgbm)
self.frame1 = None
self.frame2 = None
self.img1_rectified = None
self.img2_rectified = None
self.depth = None
self.disp = None
self.threeD = None
# self.wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=self.sgbm)
# self.wls_filter.setLambda(8000.)
# self.wls_filter.setSigmaColor(1.3)
# self.wls_filter.setLRCthresh(24)
# self.wls_filter.setDepthDiscontinuityRadius(3)
def getdepth(self, frame1, frame2, model=None):
imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) if len(frame1.shape) == 3 else frame1
imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) if len(frame2.shape) == 3 else frame2
img1_rectified = cv2.remap(imgL, self.camera_config.left_map1, self.camera_config.left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(imgR, self.camera_config.right_map1, self.camera_config.right_map2, cv2.INTER_LINEAR)
disparity = self.sgbm.compute(img1_rectified, img2_rectified)
threeD = cv2.reprojectImageTo3D(disparity, self.camera_config.Q, handleMissingValues=True) #计算三维坐标数据值
threeD = threeD * 16
threeD[:, :, 2] = np.where(threeD[:, :, 2] >= 10000, 10000, threeD[:, :, 2])
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
self.frame1 = frame1
self.frame2 = frame2
self.img1_rectified = img1_rectified
self.img2_rectified = img2_rectified
self.disparity = disparity
# self.disparity_cut = disparity_cut
self.disp = disp
self.threeD = threeD
return disp
if __name__ == '__main__':
# 使用示例
camera_config = CameraConfig('./model_data/camera_config.yaml')
print(camera_config.Q)
使用流程
得到标定数据
本例需要一个双目摄像头并能获取图像,打开 getdata.py
选择合适的摄像头参数,括号中选择1
还是0
camera = cv2.VideoCapture(1)
准备好标定板,并得到棋盘格的内角点个数,以 6 × 6 6\times6 6×6 为例,对应的是 7 × 7 7\times7 7×7 的网格,修改
chessid = (6, 6)
修改左右相机视角标定数据保存路径
left_save_root = './calibration/left'
right_save_root = './calibration/right'
运行 getdata.py
当相机检测到角点后会弹出绘制角点的新窗口,保存按 S
,不保存按 C
,得到大约十几张标定数据后按 Esc
结束运行。
得到标定结果
打开 calibration.py
,修改最后的函数参数
if __name__ == '__main__':
'''
left_folder: 左视角存储路径
right_folder: 右视角存储路径
pattern_width: 标定板正方形边长
pattern_size: 网格数
config_yaml_root: 标注结果保存地址
'''
calibrate(left_folder = "./calibration_2/left",
right_folder = "./calibration_2/right",
pattern_size = (11, 8),
pattern_width = 25,
config_yaml_root = './model_data/camera_config.yaml')
运行即可得到标定结果文件 ./model_data/camera_config.yaml
在 CmeraConfig.py
中调整yaml文件路径即可获得所有标注结果的矩阵格式