简单的寻求中心点和选择角度程序处理速度极慢
代码如下,时间测算一帧5000ms,试过并行运行,只优化400ms。。
#include<opencv2/nonfree/features2d.hpp> //使用SiftFeatureDetector需要加上此头文件
#include<opencv2\opencv.hpp>
#include<iostream>
#include<fstream>
#include "iostream"
#include "highgui.h"
#include "cv.h"
#include "cxcore.h"
#include <opencv/cxcore.h>
using namespace cv;
using namespace std;
#define ERROR 1234
CvBox2D Box2D;
void drawCross(CvArr* img, CvPoint2D32f point, CvScalar color, int size, int thickness)
{
//绘制横线
cvLine(img, cvPoint(point.x - size / 2, point.y), cvPoint(point.x + size / 2, point.y), color, thickness, 8, 0);
//绘制竖线
cvLine(img, cvPoint(point.x, point.y - size / 2), cvPoint(point.x, point.y + size / 2), color, thickness, 8, 0);
return;
}
CvSeq *GetAreaMaxContour(CvSeq *contour)
{//在给定的contour中找到面积最大的一个轮廓,并返回指向该轮廓的指针
double contour_area_temp = 0, contour_area_max = 0;
CvSeq * area_max_contour = 0;//指向面积最大的轮廓
CvSeq* c = 0;
//printf( "Total Contours Detected: %d\n", Nc );
for (c = contour; c != NULL; c = c->h_next)
{//寻找面积最大的轮廓,即循环结束时的area_max_contour
contour_area_temp = fabs(cvContourArea(c, CV_WHOLE_SEQ)); //获取当前轮廓面积
if (contour_area_temp > contour_area_max)
{
contour_area_max = contour_area_temp; //找到面积最大的轮廓
area_max_contour = c;//记录面积最大的轮廓
}
}
return area_max_contour;
}
void getCenter(IplImage *img){
IplImage *src = img;
int data[16] =
{
0, 0, 0, 0,
0, 0, 1, 0, ///图像的腐蚀
0, 1, 1, 0,
0, 0, 0, 0
};
IplConvKernel *Element = cvCreateStructuringElementEx(4, 4, 2, 2, CV_SHAPE_RECT, data);;
cvSmooth(src, src, CV_MEDIAN, 5, 5, 0, 0);
cvErode(src, src, Element, 2);
// ***********************************************/
IplImage *src_RGB = cvCreateImage(cvGetSize(src), 8, 3);
IplImage *ZeroImage = cvCreateImage(cvGetSize(src), 8, 3);
cvZero(ZeroImage);
src_RGB = cvCloneImage(src); //copy Image
IplImage *src_gray = cvCreateImage(cvGetSize(src), 8, 1);
IplImage *Canny_Image = cvCreateImage(cvGetSize(src), 8, 1);
IplImage *Threshold = cvCreateImage(cvGetSize(src), 8, 1);
cvCvtColor(src, src_gray, CV_RGB2GRAY);
//cvNamedWindow("GRAY", 0);
//cvResizeWindow("GRAY", 500, 500);
//cvShowImage("GRAY", src_gray);//显示灰色图像
//转为二值化图像
cvThreshold(src_gray, Threshold, 150, 255, CV_THRESH_BINARY);//参数调整阈值大小影响图像取轮廓 150阈值
//cvNamedWindow("Threshold", 0);
//cvResizeWindow("Threshold", 500, 500);
//cvShowImage("Threshold", Threshold);
CvMemStorage *storage = cvCreateMemStorage(0);
CvSeq * Image_Seq;
//使用canny算法提取图像的轮廓
cvCanny(Threshold, Canny_Image, 125, 255, 3);
//使用cvfindcontour函数提取轮廓
cvFindContours(Canny_Image, storage, &Image_Seq, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
cvDrawContours(src_RGB, Image_Seq, CV_RGB(200, 65, 45), CV_RGB(100, 165, 45), 2, 2, 8);
cvDrawContours(ZeroImage, Image_Seq, CV_RGB(200, 65, 45), CV_RGB(255, 2555, 255), 2, 2, 8);
CvSeq * Cal_Contours = GetAreaMaxContour(Image_Seq);
CvPoint2D32f Array[4];
for (; Cal_Contours != 0; Cal_Contours = Cal_Contours->h_next)
{ //show Angle to Image
CvSeq* InterCon = Cal_Contours->v_next;
for (; InterCon != 0; InterCon = InterCon->h_next)
{
Box2D = cvMinAreaRect2(Cal_Contours);
CvBox2D FitBox;
FitBox = cvMinAreaRect2(Cal_Contours);
// cvFitEllipse2(Cal_Contours);
// printf("Cal_Contours of angle %f",Box2D.angle);
CvBox2D ellipse = cvFitEllipse2(Cal_Contours);
cvBoxPoints(FitBox, Array);
cvEllipseBox(src_RGB, ellipse, CV_RGB(255, 255, 0)); //在图上画椭圆
printf("%f %f ", Array[0].x, Array[0].y);
float a = (Array[0].x + Array[1].x + Array[2].x + Array[3].x) / 4;
float b = (Array[0].y + Array[1].y + Array[2].y + Array[3].y) / 4;
printf("%f %f ", a, b);
printf("\n");
cvLine(ZeroImage, cvPoint(Array[0].x, Array[0].y), cvPoint(Array[1].x, Array[1].y), CV_RGB(114, 45, 255), 2, 8);
cvLine(ZeroImage, cvPoint(Array[1].x, Array[1].y), cvPoint(Array[2].x, Array[2].y), CV_RGB(114, 45, 255), 2, 8);
cvLine(ZeroImage, cvPoint(Array[2].x, Array[2].y), cvPoint(Array[3].x, Array[3].y), CV_RGB(114, 45, 255), 2, 8);
cvLine(ZeroImage, cvPoint(Array[3].x, Array[3].y), cvPoint(Array[0].x, Array[0].y), CV_RGB(114, 45, 255), 2, 8);
drawCross(ZeroImage, Box2D.center, cvScalar(0, 0, 255), 200, 10);//十字中点
//在图上画圆
//cvCircle(ZeroImage,cvPoint((int)Box2D.center.x,(int)Box2D.center.y) ,50,CV_RGB(45,89,56),2,8);
////draw RectAngle to Image
// CvRect rect= cvBoundingRect(Cal_Contours);
// cvRectangle(ZeroImage,cvPoint(rect.x,rect.y),cvPoint(rect.x+rect.width ,rect.y+rect.width),CV_RGB(114,45,255),2,8);
}
}
//显示图像
cvNamedWindow("ZeroImage", 0);
cvResizeWindow("ZeroImage", 500, 500);
cvShowImage("ZeroImage", ZeroImage);
//cvNamedWindow("src", 0);
//cvResizeWindow("src", 500, 500);
//cvShowImage("src", Canny_Image);
//cvNamedWindow("src_RGB", 0);
//cvResizeWindow("src_RGB", 500, 500);
//cvShowImage("src_RGB", src_RGB);
return;
}
int getSift(IplImage *img){
Mat c_src2(img, true);
Mat c_src1 = imread("s2.jpg");
Mat src1 = imread("s2.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat src2;
cvtColor(c_src2, src2, CV_RGB2GRAY);
if (!src1.data || !src2.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
///***************************************************
//sift feature detect
SurfFeatureDetector detector;
vector<KeyPoint> kp1, kp2;
detector.detect(src1, kp1);
detector.detect(src2, kp2);
SurfDescriptorExtractor extractor;
Mat des1, des2;//descriptor
extractor.compute(src1, kp1, des1);
extractor.compute(src2, kp2, des2);
Mat res1, res2;
int drawmode = DrawMatchesFlags::DRAW_RICH_KEYPOINTS;
drawKeypoints(c_src1, kp1, res1, Scalar::all(-1), drawmode);//在内存中画出特征点
drawKeypoints(c_src2, kp2, res2, Scalar::all(-1), drawmode);
cout << "size of description of Img1: " << kp1.size() << endl;
cout << "size of description of Img2: " << kp2.size() << endl;
BFMatcher matcher(NORM_L2);
vector<DMatch> matches;
matcher.match(des1, des2, matches);
Mat img_match;
drawMatches(src1, kp1, src2, kp2, matches, img_match);//,Scalar::all(-1),Scalar::all(-1),vector<char>(),drawmode);
//cout << kp1.at(matches.at(1).queryIdx).angle << endl;
//cout<<kp2.at(matches.at(1).trainIdx).angle << endl;
//投票表决,获得近似的角度
int angle_count[361] = { 0 };
int count = 0;
#pragma omp parallel for
for (int i = 0; i < matches.size(); i++)
{
int index = kp2.at(matches.at(i).trainIdx).angle - kp1.at(matches.at(i).queryIdx).angle + 0.5;
//fileo << kp2.at(matches.at(i).trainIdx).angle - kp1.at(matches.at(i).queryIdx).angle << endl;
index = index > 0 ? index : index + 360;
//cout << index << endl;
angle_count[index]++;
}
int maxindex = 0;
#pragma omp parallel for
for (int i = 0; i < 361; i++)
{
//cout << i << " " << angle_count[i]<<endl;
if (angle_count[i] >= angle_count[maxindex])
{
maxindex = i;
}
}
maxindex = (maxindex - 360) < 0 ? maxindex : (maxindex - 360);
cout << "旋转角度:" << maxindex - 1 << endl;
//对临界值的处理,存在一些问题
int down, up;
down = maxindex - 1 < 0 ? maxindex : maxindex - 1;
up = maxindex + 1 > 360 ? maxindex : maxindex + 1;
double angle = 0;
count = 0;
#pragma omp parallel for
for (int i = 0; i < matches.size(); i++)
{
double temp = kp2.at(matches.at(i).trainIdx).angle - kp1.at(matches.at(i).queryIdx).angle;
if (temp >= down&&temp <= up)
{
count++;
angle += temp;
}
}
angle /= count;
cout << "精确角度:" << angle - 0.2 << endl;
cout << "number of matched points: " << matches.size() << endl;
cvNamedWindow("matches", 0);
cvResizeWindow("matches", 500, 500);
imshow("matches", img_match);
////////////////////////////中心点
return 0;
}
int main(int argc, char* argv[])
{
//ofstream fileo("data.txt");
IplImage* pFrame = NULL;
//获取摄像头
CvCapture* pCapture = cvCreateCameraCapture(-1);
//创建窗口
cvNamedWindow("video", 0);
cvResizeWindow("video", 640, 480);
//显示视屏
while (true)
{
double start,end, t1, t2;
start = (double)cvGetTickCount();//记下开始的时钟计数,以便计算函数或用户代码执行时间
pFrame = cvQueryFrame(pCapture);
if (!pFrame)break;
cvShowImage("video", pFrame);
IplImage *img = pFrame;
getCenter(img);
getSift(img);
if (waitKey(33) == 'q')
break;
end = (double)cvGetTickCount();//记下结束的时钟计数
t1 = (end - start) / ((double)cvGetTickFrequency()*1000.);//计算运行时间,以毫秒为单位
printf("Run time without OpenMP = %g ms\n", t1);
}
cvReleaseCapture(&pCapture);
cvDestroyWindow("video");
cvWaitKey();
return 0;
}