计算机视觉攻略 笔记14 (计算两幅图像之间的单应矩阵)

没有伞的孩子要学会奔跑

计算两幅图像之间的单应矩阵

单应矩阵( Homography) H,它描述了两个平面之间的映射关系。若场景中的特征点都落在同一平面上(比如墙、地面等),则可以通过单应性来进行运动估计。这种情况在无人机携带的俯视相机或扫地机携带的顶视相机中比较常见。
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
参考文献中的投影关系公式 可知
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
其中,H即为单应矩阵。

示例程序

/*------------------------------------------------------------------------------------------*\
This file contains material supporting chapter 10 of the book:
OpenCV3 Computer Vision Application Programming Cookbook
Third Edition
by Robert Laganiere, Packt Publishing, 2016.

This program is free software; permission is hereby granted to use, copy, modify,
and distribute this source code, or portions thereof, for any purpose, without fee,
subject to the restriction that the copyright notice may not be removed
or altered from any source or altered source distribution.
The software is released on an as-is basis and without any warranties of any kind.
In particular, the software is not guaranteed to be fault-tolerant or free from failure.
The author disclaims all warranties with regard to this software, any use,
and any consequent failure, is purely the responsibility of the user.

Copyright (C) 2016 Robert Laganiere, www.laganiere.name
\*------------------------------------------------------------------------------------------*/

#include <iostream>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <opencv2/stitching.hpp>

int main()
{
    
    
	// Read input images
	cv::Mat image1= cv::imread("parliament1.jpg",0);
	cv::Mat image2= cv::imread("parliament2.jpg",0);
	if (!image1.data || !image2.data)
		return 0; 

    // Display the images
	cv::namedWindow("Image 1");
	cv::imshow("Image 1",image1);
	cv::namedWindow("Image 2");
	cv::imshow("Image 2",image2);
	
	// vector of keypoints and descriptors
	std::vector<cv::KeyPoint> keypoints1;
	std::vector<cv::KeyPoint> keypoints2;
	cv::Mat descriptors1, descriptors2;

	// 1. Construction of the SIFT feature detector 
	cv::Ptr<cv::Feature2D> ptrFeature2D = cv::xfeatures2d::SIFT::create(74);

	// 2. Detection of the SIFT features and associated descriptors
	ptrFeature2D->detectAndCompute(image1, cv::noArray(), keypoints1, descriptors1);
	ptrFeature2D->detectAndCompute(image2, cv::noArray(), keypoints2, descriptors2);

	std::cout << "Number of feature points (1): " << keypoints1.size() << std::endl;
	std::cout << "Number of feature points (2): " << keypoints2.size() << std::endl;

	// 3. Match the two image descriptors
   
	// Construction of the matcher with crosscheck 
	cv::BFMatcher matcher(cv::NORM_L2, true);                            
	// matching
	std::vector<cv::DMatch> matches;
	matcher.match(descriptors1,descriptors2,matches);

	// draw the matches
	cv::Mat imageMatches;
	cv::drawMatches(image1,keypoints1,  // 1st image and its keypoints
		            image2,keypoints2,  // 2nd image and its keypoints
					matches,			// the matches
					imageMatches,		// the image produced
					cv::Scalar(255,255,255),  // color of the lines
					cv::Scalar(255,255,255),  // color of the keypoints
					std::vector<char>(),
					2); 
	cv::namedWindow("Matches (pure rotation case)");
	cv::imshow("Matches (pure rotation case)",imageMatches);
	
	// Convert keypoints into Point2f
	std::vector<cv::Point2f> points1, points2;
	for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
		 it!= matches.end(); ++it) {
    
    

			 // Get the position of left keypoints
			 float x= keypoints1[it->queryIdx].pt.x;
			 float y= keypoints1[it->queryIdx].pt.y;
			 points1.push_back(cv::Point2f(x,y));
			 // Get the position of right keypoints
			 x= keypoints2[it->trainIdx].pt.x;
			 y= keypoints2[it->trainIdx].pt.y;
			 points2.push_back(cv::Point2f(x,y));
	}

	std::cout << points1.size() << " " << points2.size() << std::endl; 

	// Find the homography between image 1 and image 2
	std::vector<char> inliers;
	cv::Mat homography= cv::findHomography(
		points1,points2, // corresponding points
		inliers,	     // outputed inliers matches 输出的局内匹配项
		cv::RANSAC,	     // RANSAC method
		1.);	         // max distance to reprojection point
	
    // Draw the inlier points
	cv::drawMatches(image1, keypoints1,  // 1st image and its keypoints
		image2, keypoints2,  // 2nd image and its keypoints
		matches,			// the matches
		imageMatches,		// the image produced
		cv::Scalar(255, 255, 255),  // color of the lines
		cv::Scalar(255, 255, 255),  // color of the keypoints
		inliers,
		2);
	cv::namedWindow("Homography inlier points");
	cv::imshow("Homography inlier points", imageMatches);


//    在计算单应矩阵后,就可以把一幅图像的点转移到另
//    一幅图像上。实际上,图像中的每个像素都可以转移,因此可以把整幅图像迁移到另一幅图像的
//    的视点上。这个过程称为图像拼接,常用于将多幅图像构建成一幅大型全景图。
    // 将第一幅图像扭曲到第二幅图像
	cv::Mat result;
	cv::warpPerspective(image1, // input image
		result,			// output image
		homography,		// homography
		cv::Size(2*image1.cols,image1.rows)); // size of output image

	// Copy image 1 on the first half of full image
    // 把第一幅图像复制到完整图像的第一个半边
	cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
	image2.copyTo(half);// 把 image2 复制到 image1 的 ROI 区域

    // Display the warp image
	cv::namedWindow("Image mosaic");
	cv::imshow("Image mosaic",result);

	// Read input images
	std::vector<cv::Mat> images;
	images.push_back(cv::imread("parliament1.jpg"));
	images.push_back(cv::imread("parliament2.jpg"));

	cv::Mat panorama; // output panorama输出的全景图
	// create the stitcher 创建拼接器
	cv::Stitcher stitcher = cv::Stitcher::createDefault();
	// stitch the images 拼接图像
	cv::Stitcher::Status status = stitcher.stitch(images, panorama);

	if (status == cv::Stitcher::OK) // success?
	{
    
    
		// Display the panorama
		cv::namedWindow("Panorama");
		cv::imshow("Panorama", panorama);
	}

	cv::waitKey();
	return 0;
}

猜你喜欢

转载自blog.csdn.net/jlm7689235/article/details/108180075
今日推荐