现在正在做的一个项目,现将代码备份一下,循环可以设置任意幅图像拼接。
#ifndef debug
#include <opencv2/opencv.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
#include<vector>
#include<iostream>
#include<sstream>
#include<string>
#include<time.h>
using namespace std;
using namespace cv;
#include "SurfStitch.h"
int main() {
//Mat img1 = imread("1.jpg");
Mat img1 = imread("E:\\DATA\\银行票据\\1\\1 (14).jpg");
Mat stitchedImage;
int n;
cout << "Dataset2" << endl;
cout << "请输入想拼接的图片数量(大于1小于18)" << endl;
cin >> n;
cout << "输入成功,开始计时" << endl;
clock_t start,finish;
double totaltime;
start=clock();
//将首张图片翻转
transpose(img1,img1);//转置
flip(img1,img1,0);
for (int k = 15; k <= n; k++) {
stringstream stream;
string str;
stream << k;
stream >> str;
string filename = "1 ";
filename = "E:\\DATA\\银行票据\\1\\"+filename+"("+str+")" + ".jpg";
cout << "正在拼接......." << filename << endl;
//const char *wh = filename.c_str();
//IplImage *temp = cvLoadImage(wh, -1);
//cvTranspose(temp,temp);
//Mat img(temp,0);
Mat img = imread(filename);
if (img.empty())
{
break;
}
transpose(img,img);//转置
//flip(img,img,1);
flip(img,img,0);
imwrite("1.bmp",img);
stitchedImage = SurfStich(img1,img);
//stitchedImage = Stitched(img1, img);
imwrite("ResultImage1.jpg", stitchedImage);
img1 = stitchedImage;
}
finish = clock();
totaltime = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "拼接成功" << endl;
cout << "拼接花费总时间为:" << totaltime << "秒!" << endl;
imshow("ResultImage", stitchedImage);
imwrite("ResultImage.jpg", stitchedImage);
waitKey(0);
return 0;
}
#endif
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
typedef struct
{
Point2f left_top;
Point2f left_bottom;
Point2f right_top;
Point2f right_bottom;
}four_corners_t;
four_corners_t corners;
void CalcCorners(const Mat& H, const Mat& src)
{
double v2[] = { 0, 0, 1 };//左上角
double v1[3];//变换后的坐标值
Mat V2 = Mat(3, 1, CV_64FC1, v2); //列向量
Mat V1 = Mat(3, 1, CV_64FC1, v1); //列向量
V1 = H * V2;
//左上角(0,0,1)
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.left_top.x = v1[0] / v1[2];
corners.left_top.y = v1[1] / v1[2];
//左下角(0,src.rows,1)
v2[0] = 0;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2); //列向量
V1 = Mat(3, 1, CV_64FC1, v1); //列向量
V1 = H * V2;
corners.left_bottom.x = v1[0] / v1[2];
corners.left_bottom.y = v1[1] / v1[2];
//右上角(src.cols,0,1)
v2[0] = src.cols;
v2[1] = 0;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2); //列向量
V1 = Mat(3, 1, CV_64FC1, v1); //列向量
V1 = H * V2;
corners.right_top.x = v1[0] / v1[2];
corners.right_top.y = v1[1] / v1[2];
//右下角(src.cols,src.rows,1)
v2[0] = src.cols;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2); //列向量
V1 = Mat(3, 1, CV_64FC1, v1); //列向量
V1 = H * V2;
corners.right_bottom.x = v1[0] / v1[2];
corners.right_bottom.y = v1[1] / v1[2];
}
//优化两图的连接处,使得拼接自然
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
int start = MIN(corners.left_top.x, corners.left_bottom.x);//开始位置,即重叠区域的左边界
double processWidth = img1.cols - start;//重叠区域的宽度
int rows = dst.rows;
int cols = img1.cols; //注意,是列数*通道数
double alpha = 1;//img1中像素的权重
for (int i = 0; i < rows; i++)
{
uchar* p = img1.ptr<uchar>(i); //获取第i行的首地址
uchar* t = trans.ptr<uchar>(i);
uchar* d = dst.ptr<uchar>(i);
for (int j = start; j < cols; j++)
{
//如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
{
alpha = 1;
}
else
{
//img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好
alpha = (processWidth - (j - start)) / processWidth;
}
d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
}
}
}
//dwhui 主接口
Mat SurfStich(Mat img1, Mat img2)
{
Mat image1, image2;
cvtColor(img1, image1, CV_RGB2GRAY);
cvtColor(img2, image2, CV_RGB2GRAY);
//提取特征点
SurfFeatureDetector Detector(2000);
vector<KeyPoint> keyPoint1, keyPoint2;
Detector.detect(image1, keyPoint1);
Detector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor Descriptor;
Mat imageDesc1, imageDesc2;
Descriptor.compute(image1, keyPoint1, imageDesc1);
Descriptor.compute(image2, keyPoint2, imageDesc2);
FlannBasedMatcher matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat first_match;
drawMatches(img2, keyPoint2, img1, keyPoint1, GoodMatchePoints, first_match);
//imwrite("first_match.bmp", first_match);
/*判断图像在左边还是在右边*/
int propimg1 = 0, propimg2 = 0;
for (int i = 0; i < GoodMatchePoints.size(); i++) {
if (keyPoint2[GoodMatchePoints[i].queryIdx].pt.x > img2.cols/2 ) {
propimg2++;
}
if (keyPoint1[GoodMatchePoints[i].trainIdx].pt.x > img1.cols/2 ) {
propimg1++;
}
}
bool flag = false;
Mat imgright;
Mat imgleft;
if ((propimg1 / (GoodMatchePoints.size() + 0.0)) > (propimg2 / (GoodMatchePoints.size() + 0.0))) {
imgleft = img1.clone();
flag = true;
}
else {
imgleft = img2.clone();
flag = false;
}
if (flag) {
imgright = img2.clone();
flag = false;
}
else {
imgright = img1.clone();
flag = true; //dwhui增加标识位 这个时候不用交换
}
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i<GoodMatchePoints.size(); i++)
{
imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
}
if (GoodMatchePoints.size()<4)
{
return img1;
}
//获取图像1到图像2的投影映射矩阵 尺寸为3*3
Mat homo;
if (flag)
{
homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);//输入至少四个点,不然崩溃
}
else
{
homo = findHomography(imagePoints2, imagePoints1, CV_RANSAC);
}
////也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
//Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
//计算配准图的四个顶点坐标
CalcCorners(homo, imgright);
cout << "left_top:" << corners.left_top << endl;
cout << "left_bottom:" << corners.left_bottom << endl;
cout << "right_top:" << corners.right_top << endl;
cout << "right_bottom:" << corners.right_bottom << endl;
//图像配准
Mat imageTransform1, imageTransform2;
warpPerspective(imgright, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), imgleft.rows));
//warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
//imshow("直接经过透视矩阵变换", imageTransform1);
//imwrite("trans1.jpg", imageTransform1);
//创建拼接后的图,需提前计算图的大小
int dst_width = imageTransform1.cols; //取最右点的长度为拼接图的长度
int dst_height = imgleft.rows;
int leftw = imgleft.cols;
Mat dst(dst_height, dst_width, CV_8UC3);
dst.setTo(0);
if (dst_width >= leftw)
{
imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
imgleft.copyTo(dst(Rect(0, 0, imgleft.cols, imgleft.rows)));
OptimizeSeam(imgleft, imageTransform1, dst);
return dst;
}
else
{
return img1;
}
//imshow("dst", dst);
//imwrite("dst.jpg", dst);
//return dst;
}
//dwhui 主接口
Mat SurfStich2(Mat img1, Mat img2)
{
Mat image1, image2;
cvtColor(img1, image1, CV_RGB2GRAY);
cvtColor(img2, image2, CV_RGB2GRAY);
//提取特征点
SurfFeatureDetector Detector(2000);
vector<KeyPoint> keyPoint1, keyPoint2;
Detector.detect(image1, keyPoint1);
Detector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor Descriptor;
Mat imageDesc1, imageDesc2;
Descriptor.compute(image1, keyPoint1, imageDesc1);
Descriptor.compute(image2, keyPoint2, imageDesc2);
FlannBasedMatcher matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat first_match;
drawMatches(img2, keyPoint2, img1, keyPoint1, GoodMatchePoints, first_match);
imshow("first_match ", first_match);
imwrite("first_match.bmp", first_match);
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i<GoodMatchePoints.size(); i++)
{
imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
}
//获取图像1到图像2的投影映射矩阵 尺寸为3*3
Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
////也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
//Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
//计算配准图的四个顶点坐标
CalcCorners(homo, img1);
cout << "left_top:" << corners.left_top << endl;
cout << "left_bottom:" << corners.left_bottom << endl;
cout << "right_top:" << corners.right_top << endl;
cout << "right_bottom:" << corners.right_bottom << endl;
/*判断图像在左边还是在右边*/
int propimg1 = 0, propimg2 = 0;
for (int i = 0; i < GoodMatchePoints.size(); i++) {
if (keyPoint2[GoodMatchePoints[i].queryIdx].pt.x > img2.cols/2 ) {
propimg2++;
}
if (keyPoint1[GoodMatchePoints[i].trainIdx].pt.x > img1.cols/2 ) {
propimg1++;
}
}
bool flag = false;
Mat imgright;
Mat imgleft;
if ((propimg1 / (GoodMatchePoints.size() + 0.0)) > (propimg2 / (GoodMatchePoints.size() + 0.0))) {
imgleft = img1.clone();
flag = true;
}
else {
imgleft = img2.clone();
flag = false;
}
if (flag) {
imgright = img2.clone();
flag = false;
}
else {
imgright = img1.clone();
}
//图像配准
Mat imageTransform1, imageTransform2;
warpPerspective(imgright, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), imgleft.rows));
//warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
imshow("直接经过透视矩阵变换", imageTransform1);
imwrite("trans1.jpg", imageTransform1);
//创建拼接后的图,需提前计算图的大小
int dst_width = imageTransform1.cols; //取最右点的长度为拼接图的长度
int dst_height = imgleft.rows;
Mat dst(dst_height, dst_width, CV_8UC3);
dst.setTo(0);
imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
imgleft.copyTo(dst(Rect(0, 0, imgleft.cols, imgleft.rows)));
imshow("b_dst", dst);
OptimizeSeam(imgleft, imageTransform1, dst);
imshow("dst", dst);
imwrite("dst.jpg", dst);
return dst;
}