求助,SIFT+RANSAC算法匹配两幅图像,程序运行时出现问题

qq_35890021 2017-03-26 10:36:19
想用sift算法实现在一张含有多个物体的图片中找到我想要的物体。然后发现只用sift误差太大,网上搜索发现还可以进一步用ransac提出误差点,用从网上搜到的代码结合自己的代码做出了程序,但是在运行的时候发现自己那部分可以运行,在ransac算法那一部分总显示错误。
下周就要交这个代码了,挺急的,拜托各位大牛能指导一下。第一次接触这方面的东西,挺多东西不懂得,麻烦大牛说详细一点
环境是opencv2.4.10+vs2013
#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
#include<vector>

#include <iostream>//

using namespace std;
using namespace cv;

int _tmain(int argc, _TCHAR* argv[])
{
const char* imagename = "F:/vs2013/images/1.jpg";

cv::Mat img1, img2, img1_e, img1_b, img2_e, img2_b;
//从文件中读入图像
Mat img1_m = imread(imagename);
Mat img2_m = imread("F:/vs2013/images/2.jpg");

//如果读入图像失败
if (img1_m.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
if (img2_m.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
//显示图像
imshow("image before", img1_m);
imshow("image2 before", img2_m);



cv::medianBlur(img1_m, img1, 9);//
cv::medianBlur(img2_m, img2, 9);//





cv::imshow("Img1_e", img1);
cv::imshow("Img2_e", img2);


cv::waitKey(0);//






//sift特征检测
SiftFeatureDetector siftdtc;
vector<KeyPoint>kp1, kp2;

siftdtc.detect(img1, kp1);
Mat outimg1;
drawKeypoints(img1, kp1, outimg1);
imshow("image1 keypoints", outimg1);
KeyPoint kp;

vector<KeyPoint>::iterator itvc;
for (itvc = kp1.begin(); itvc != kp1.end(); itvc++)
{
cout << "angle:" << itvc->angle << "\t" << itvc->class_id << "\t" << itvc->octave << "\t" << itvc->pt << "\t" << itvc->response << endl;
}

siftdtc.detect(img2, kp2);
Mat outimg2;
drawKeypoints(img2, kp2, outimg2);
imshow("image2 keypoints", outimg2);


SiftDescriptorExtractor extractor;
Mat descriptor1, descriptor2;
BruteForceMatcher<L2<float>> matcher;
vector<DMatch> matches;
Mat img_matches;
extractor.compute(img1, kp1, descriptor1);
extractor.compute(img2, kp2, descriptor2);


imshow("desc", descriptor1);
cout << endl << descriptor1 << endl;
matcher.match(descriptor1, descriptor2, matches);

drawMatches(img1, kp1, img2, kp2, matches, img_matches);
imshow("matches", img_matches);

Point2f m1(93.0, 158.0), m2(394.0, 150.0), m3(62.0, 16.0), m4(388.0, 62.0);
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(93.0, 158.0);
obj_corners[1] = cvPoint(394.0, 150.0);
obj_corners[2] = cvPoint(62.0, 16.0);
obj_corners[3] = cvPoint(388.0, 62.0);

Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create("BruteForce");//创建特征匹配器
int count = kp1.size();

Mat p1(kp1.size(), 2, CV_32F);
Mat p2(kp2.size(), 2, CV_32F);
for (int i = 0; i<kp1.size(); i++)
{



p1.at<float>(i, 0) = kp1[i].pt.x;
p1.at<float>(i, 1) = kp1[i].pt.y;

for (int j = 0; j < kp2.size(); j++)
{

p2.at<float>(j, 0) = kp2[j].pt.x;//
p2.at<float>(j, 1) = kp2[j].pt.y;
}
}
// 用RANSAC方法计算F
Mat m_Fundamental,out;
// 上面这个变量是基本矩阵
vector<uchar> m_RANSACStatus;
// 上面这个变量已经定义过,用于存储RANSAC后每个点的状态

//一开始使用findFundamentalMat函数,发现可以消除错误匹配,实现很好的效果,但是
//就是函数返回值不是变换矩阵,而是没有什么用的基础矩阵
m_Fundamental = findFundamentalMat(p1, p2, out, CV_FM_RANSAC);

//这里使用findHomography函数,这个函数的返回值才是真正的变换矩阵
Mat m_homography;
vector<uchar> m;
m_homography = findHomography(p1, p2, CV_FM_RANSAC, 3, m);

//由变换矩阵,求得变换后的物体边界四个点
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, m_homography);
line(p2, scene_corners[0], scene_corners[1], Scalar(0, 0, 255), 2);
line(p2, scene_corners[1], scene_corners[2], Scalar(0, 0, 255), 2);
line(p2, scene_corners[2], scene_corners[3], Scalar(0, 0, 255), 2);
line(p2, scene_corners[3], scene_corners[0], Scalar(0, 0, 255), 2);



int nr = m_Fundamental.rows; // number of rows
int nc = m_Fundamental.cols *m_Fundamental.channels(); // total number of elements per line

// 计算野点个数
int OutlinerCount = 0;
for (int i = 0; i<count; i++)
{
if (m_RANSACStatus[i] == 0) // 状态为0表示野点
{
OutlinerCount++;
}
}

// 计算内点
vector<Point2f> m_LeftInlier;
vector<Point2f> m_RightInlier;
vector<DMatch> m_InlierMatches;
// 上面三个变量用于保存内点和匹配关系
int ptCount = (int)matches.size();
int InlinerCount = ptCount - OutlinerCount;
m_InlierMatches.resize(InlinerCount);
m_LeftInlier.resize(InlinerCount);
m_RightInlier.resize(InlinerCount);
InlinerCount = 0;
for (int i = 0; i<ptCount; i++)
{
if (m_RANSACStatus[i] != 0)
{
m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
InlinerCount++;
}
}
// //printf("最终的匹配点个数为:%d\n",InlinerCount);
//// 把内点转换为drawMatches可以使用的格式
vector<KeyPoint> key1(InlinerCount);
vector<KeyPoint> key2(InlinerCount);
KeyPoint::convert(m_LeftInlier, key1);
KeyPoint::convert(m_RightInlier, key2);

// 显示计算F过后的内点匹配
Mat m_matLeftImage;
Mat m_matRightImage;
// 以上两个变量保存的是左右两幅图像

/*line(src_1, m1, m2, Scalar(0, 255, 0), 2);
line(src_1, m2, m3, Scalar(0, 255, 0), 2);
line(src_1, m3, m4, Scalar(0, 255, 0), 2);
line(src_1, m4, m1, Scalar(0, 255, 0), 2);
*/
Mat OutImage;
drawMatches(m_matLeftImage, key1, m_matRightImage, key2, m_InlierMatches, OutImage);
imshow("SIFT_RANSAC", OutImage);
//imwrite("F:\\ylab\\CSDN_image\\5.jpg",OutImage);
cvWaitKey(0);
return 0;
}
...全文
371 3 打赏 收藏 转发到动态 举报
写回复
用AI写文章
3 条回复
切换为时间正序
请发表友善的回复…
发表回复
赵4老师 2017-04-07
  • 打赏
  • 举报
回复
崩溃的时候在弹出的对话框按相应按钮进入调试,按Alt+7键查看Call Stack即“调用堆栈”里面从上到下列出的对应从里层到外层的函数调用历史。双击某一行可将光标定位到此次调用的源代码或汇编指令处,看不懂时双击下一行,直到能看懂为止
angel6709 2017-04-07
  • 打赏
  • 举报
回复
下面代码是我写的,请参考。。

#include <iostream>
//#include "opencv2/core/core.hpp"
//#include "opencv2/features2d/features2d.hpp"
//#include "opencv2/highgui/highgui.hpp"

#include "opencv2/xfeatures2d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"

#include <opencv2/imgproc.hpp>  // Gaussian Blur
#include <opencv2/core.hpp>        // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>  // OpenCV window I/O
#include <opencv2/features2d.hpp>
#include <opencv2/objdetect.hpp>


#include <stdio.h>
#include <string>
#include <vector>



using namespace std;
using namespace cv;

vector<string> paths =
{ "d:\\images\\fbl.jpg",
"d:\\images\\fpl.jpg" ,
"d:\\images\\lsa.jpg",
"d:\\images\\mjh.jpg",
"d:\\images\\mls.jpg",
"d:\\images\\xsy.jpg",
"d:\\images\\ysy.jpg",
"d:\\images\\ysw.jpg",
"d:\\images\\bbf.jpg",
"d:\\images\\gsn.jpg",
"d:\\images\\ysk.jpg",
"d:\\images\\yss.jpg",
"d:\\images\\ysq.jpg"
};

const char *windowvideoName = "videoCap window";
const char *windowmatchName = "match window";

const int CLIP_WIDTH = 360;
const int CLIP_HEIGHT = 200;
const int TOP_MATCH_COUNT = 25;
const float NOT_SURE_RATE_THRESHOD = 0.5f; //0---1 
const float SURE_RATE_THRESHOD = 0.8f; //0---1 
const float DISTANCE_THRESHOD = 0.16f; 
const bool BREAK_ON_SUCCESS = false;
const bool USE_SIFT = false;

int IMGS = 0;


vector<vector<KeyPoint>> keypoints;
vector<Mat> descriptors;
vector<Mat> srcs;
vector<double> rates;

Rect CLIP_EREA;

Ptr<BFMatcher>  matcher = BFMatcher::create(NORM_L2, false);


Ptr<FeatureDetector> detector;
Ptr<FeatureDetector> extractor;


void init()
{
	if (USE_SIFT) {

		detector = xfeatures2d::SiftFeatureDetector::create();
		extractor = xfeatures2d::SiftFeatureDetector::create();
	}
	else
	{
		detector = xfeatures2d::SurfFeatureDetector::create(400);
		extractor = xfeatures2d::SurfDescriptorExtractor::create(400);

	}


	IMGS = paths.size();
	keypoints.resize(IMGS);
	descriptors.resize(IMGS);
	srcs.resize(IMGS);
	rates.resize(IMGS);
	for (int i = 0; i < IMGS; i++)
	{
		srcs[i] = imread(paths[i]);
		detector->detect(srcs[i], keypoints[i]);
		extractor->compute(srcs[i], keypoints[i], descriptors[i]);
	}
}

CvScalar rankCoror(float rate)
{
	if (rate < 0.1) { return CV_RGB(150, 0, 0); }
	if (rate < 0.25&&rate >= 0.1) { return CV_RGB(0, 0, 255); }
	if (rate < NOT_SURE_RATE_THRESHOD&&rate >= 0.25) { return CV_RGB(0, 100, 255); }
	if (rate < SURE_RATE_THRESHOD&&rate >= NOT_SURE_RATE_THRESHOD) { return CV_RGB(0, 255, 100); }
	if (rate >= NOT_SURE_RATE_THRESHOD) { return CV_RGB(0, 255, 0); }
	return CV_RGB(150, 0, 0);
}


void main2()
{

	init();

	namedWindow(windowvideoName);
	namedWindow(windowmatchName);
	VideoCapture VideoStream(0);
	Mat videocapture;
	Mat clip;

	do
	{
		VideoStream >> videocapture;
		CLIP_EREA.x = 0;
		CLIP_EREA.y = 0;
		CLIP_EREA.width = videocapture.cols;
		CLIP_EREA.height = videocapture.rows;

		clip = Mat(videocapture);
		int rows = clip.rows;
		int cols = clip.cols;
		if (cols > CLIP_WIDTH)
		{
			clip = clip.colRange((cols - CLIP_WIDTH) / 2, (cols + CLIP_WIDTH) / 2);
			CLIP_EREA.x = (cols - CLIP_WIDTH) / 2;
			CLIP_EREA.width = CLIP_WIDTH;

		}
		if (rows > CLIP_HEIGHT)
		{
			clip = clip.rowRange(0, CLIP_HEIGHT);
			CLIP_EREA.height = CLIP_HEIGHT;

		}

		vector<KeyPoint> keypoint2;
		Mat   descriptor2;
		vector<DMatch>matches;

		detector->detect(clip, keypoint2);
		if (keypoint2.size() == 0) { continue; }
		extractor->compute(clip, keypoint2, descriptor2);

		for (int i = 0; i < IMGS; i++)
		{
			matcher->match(descriptors[i], descriptor2, matches);
			//std::sort(matches.begin(), matches.end()); 
			nth_element(matches.begin(), matches.begin() + TOP_MATCH_COUNT, matches.end());
			matches.erase(matches.begin() + TOP_MATCH_COUNT, matches.end()); 

			double dist = 0;
			int count = 0;
			for each (DMatch m in matches)
			{
				if (m.distance <= DISTANCE_THRESHOD)
				{
					count++;
				}
				dist += m.distance;
			}
			float rate1 = count*1.0f / matches.size();
			rates[i] = rate1;
			if (rate1 >= NOT_SURE_RATE_THRESHOD)
			{
				Mat output;
				drawMatches(srcs[i], keypoints[i], clip, keypoint2, matches, output);
				imshow(windowmatchName, output);
				cout << "MATCH:  " << paths[i] << "rate1:   " << rate1 << "  rate2:  " << dist / matches.size() << endl;
				if (rate1 >= SURE_RATE_THRESHOD&&BREAK_ON_SUCCESS)
				{
					break;
				}
			}
		}

		for (int x = 0; x < IMGS; x++)
		{
			char temp[150];
			sprintf(temp, "%s : RATE: %.2f", paths[x].c_str(), (float)rates[x]);
			putText(videocapture, temp, Point(10 + (x % 2) * 300, 10 + x * 10), FONT_HERSHEY_SIMPLEX, 0.5, rankCoror(rates[x]), 2);

		}

		rectangle(videocapture, CLIP_EREA, CV_RGB(0, 0, 255));
		imshow(windowvideoName, videocapture);


	} while (waitKey(30) < 0);


}

qq_35890021 2017-03-26
  • 打赏
  • 举报
回复

33,311

社区成员

发帖
与我相关
我的任务
社区描述
C/C++ 新手乐园
社区管理员
  • 新手乐园社区
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧