自学图像处理,有没有大哥给指点一下?区分两种插值那部分有点没看明白?

南极最酷小企鹅 2019-05-09 03:26:45
#include <iostream> #include <vector> #include <opencv2/core/core.hpp>#include <opencv2/highgui/highgui.hpp>using namespace cv;using namespace std;void findNearPoint(int height, int width, int cHeight, int cWidth, int vec[], int x, int y);void resize(IplImage* src, IplImage* dst, int mode);void generateHistogramEqualizationImage(IplImage* input, int rgb);int main(int argc, char *argv[]){ if (argc == 3) { string imgPath = string(argv[1]); double zoom = atof(argv[2]); cout << "放大系数:" << zoom << endl; Mat originalMatImg = imread(imgPath.c_str()); IplImage iplImage = IplImage(originalMatImg); double x = iplImage.width*zoom; double y = iplImage.height*zoom; IplImage *dst = cvCreateImage(cvSize(x, y), iplImage.depth, iplImage.nChannels); resize(&iplImage, dst, CV_INTER_LINEAR); for (int i = 0; i < dst->nChannels; ++i) generateHistogramEqualizationImage(dst, i); Mat finalMatImg = cvarrToMat(dst); namedWindow("原图"); imshow("原图", originalMatImg); namedWindow("结果图片"); imshow("结果图片", finalMatImg); imwrite("result.png", finalMatImg); waitKey(6000); return 0; } else { cout << "Command: \n main.exe img.png zoomOut" << endl; }}void findNearPoint(int height, int width, int cHeight, int cWidth, int vec[], int x, int y){ if (cHeight + x < height && cWidth + y < width && cHeight + x >= 0 && cWidth + y >= 0) { vec[0] = cHeight + x; vec[1] = cWidth + y; } if (cHeight + x >= height) { vec[0] = height; } if (cHeight + x < 0) { vec[0] = 0; } if (cWidth + y >= width) { vec[1] = width; } if (cWidth + y < 0) { vec[1] = 0; }}void resize(IplImage* src, IplImage* dst, int mode){ int height_src = src->height; int width_src = src->width; int step_src = src->widthStep / sizeof(uchar); int channels = src->nChannels; uchar* data_src = (uchar *)src->imageData; int height_dst = dst->height; int width_dst = dst->width; int step_dst = dst->widthStep / sizeof(uchar); uchar* data_dst = (uchar *)dst->imageData; switch (mode) { case CV_INTER_NN: { for (int i = 0; i < height_dst; ++i) { for (int j = 0; j < width_dst; ++j) { int mapHeight = i * ((float)height_src / (float)height_dst); int mapWidth = j * ((float)width_src / (float)width_dst); for (int k = 0; k < channels; ++k) { data_dst[i*step_dst + j * channels + k] = data_src[mapHeight*step_src + mapWidth * channels + k]; } } } break; } case CV_INTER_LINEAR: { for (int i = 0; i < height_dst; ++i) { for (int j = 0; j < width_dst; ++j) { float mapHeight = i * ((float)height_src / (float)height_dst); float mapWidth = j * ((float)width_src / (float)width_dst); int mapIHeight = (int)mapHeight; int mapIWidth = (int)mapWidth; float u = mapHeight - mapIHeight; float v = mapWidth - mapIWidth; int vec_0[2] = { 0,0 }; int vec_1[2] = { 0,0 }; int vec_2[2] = { 0,0 }; int vec_3[2] = { 0,0 }; findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_0, 0, 0);//f(x,y) findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_1, 1, 0);//f(x+1,y) findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_2, 0, 1);//f(x,y+1) findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_3, 1, 1);//f(x+1,y+1) for (int k = 0; k < channels; ++k) { data_dst[i*step_dst + j * channels + k] = data_src[vec_0[0] * step_src + vec_0[1] * channels + k] * (1 - u) * (1 - v) + data_src[vec_1[0] * step_src + vec_1[1] * channels + k] * (1 - u) * v + data_src[vec_2[0] * step_src + vec_2[1] * channels + k] * u * (1 - v) + data_src[vec_3[0] * step_src + vec_3[1] * channels + k] * u * v; } } } break; } }}void generateHistogramEqualizationImage(IplImage* input, int rgb){ int height = input->height; int width = input->width; int step = input->widthStep / sizeof(uchar); int channels = input->nChannels; uchar* data1 = (uchar *)input->imageData; vector<int> countPix(256); vector<float> percentPix(256); int wholePixel = height * width; //count the pixel for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { countPix[data1[i*step + j * channels + rgb]]++; } } //percent of the pixel for (int i = 0; i < countPix.size(); ++i) { int wholeForwardPixel = 0; for (int j = 0; j <= i; ++j) { wholeForwardPixel += countPix[j]; } percentPix[i] = (float)wholeForwardPixel / (float)wholePixel; } // data[i*step+j*channels+k] = 111; for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { data1[i*step + j * channels + rgb] = percentPix[data1[i*step + j * channels + rgb]] * 255; } }}
...全文
238 4 打赏 收藏 转发到动态 举报
AI 作业
写回复
用AI写文章
4 条回复
切换为时间正序
请发表友善的回复…
发表回复
绿色盒子 2019-05-10
  • 打赏
  • 举报
回复
1、最邻近插值法(Nearest Interpolation)

这是最简单的一种插值方法,不需要计算。在待求像素的四邻像素中,将距离待求像素最近的邻接像素灰度值赋予待求像素。设i+u, j+v(i, j为正整数, u, v为大于零小于1的小数,下同)为待求象素坐标,则待求象素灰度的值 f(i+u, j+v) 如下图所示:



如果(i+u, j+v)落在A区,即u<0.5, v<0.5,则将左上角象素的灰度值赋给待求象素,同理,落在B区则赋予右上角的象素灰度值,落在C区则赋予左下角象素的灰度值,落在D区则赋予右下角象素的灰度值。

最邻近元法计算量较小,但可能会造成插值生成的图像灰度上的不连续,在灰度变化的地方可能出现明显的锯齿状。



2、双线性内插法(Bilinear Interpolation)

双线性内插法是利用待求象素四个邻象素的灰度在两个方向上作线性内插,如下图所示:



对于 (i, j+v),f(i, j) 到 f(i, j+1) 的灰度变化为线性关系,则有:

f(i, j+v) = [f(i, j+1) - f(i, j)] * v + f(i, j)

同理对于 (i+1, j+v) 则有:

f(i+1, j+v) = [f(i+1, j+1) - f(i+1, j)] * v + f(i+1, j)

从f(i, j+v) 到 f(i+1, j+v) 的灰度变化也为线性关系,由此可推导出待求象素灰度的计算式如下:

f(i+u, j+v) = (1-u) * (1-v) * f(i, j) + (1-u) * v * f(i, j+1) + u * (1-v) * f(i+1, j) + u * v * f(i+1, j+1)

双线性内插法的计算比最邻近点法复杂,计算量较大,但没有灰度不连续的缺点。它具有低通滤波性质,使高频分量受损,图像轮廓可能会有一点模糊。图像看起来更光滑。
绿色盒子 2019-05-10
  • 打赏
  • 举报
回复

#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
void findNearPoint(int height, int width, int cHeight, int cWidth, int vec[], int x, int y);
void resize(IplImage* src, IplImage* dst, int mode);
void generateHistogramEqualizationImage(IplImage* input, int rgb);
int main(int argc, char *argv[])
{
if (argc == 3)
{
string imgPath = string(argv[1]);
double zoom = atof(argv[2]);
cout << "放大系数:" << zoom << endl;
Mat originalMatImg = imread(imgPath.c_str());
IplImage iplImage = IplImage(originalMatImg);
double x = iplImage.width*zoom;
double y = iplImage.height*zoom;
IplImage *dst = cvCreateImage(cvSize(x, y), iplImage.depth, iplImage.nChannels);
resize(&iplImage, dst, CV_INTER_LINEAR);
for (int i = 0; i < dst->nChannels; ++i)
generateHistogramEqualizationImage(dst, i);
Mat finalMatImg = cvarrToMat(dst);
namedWindow("原图");
imshow("原图", originalMatImg);
namedWindow("结果图片");
imshow("结果图片", finalMatImg);
imwrite("result.png", finalMatImg);
waitKey(6000);
return 0;
}
else
{
cout << "Command: \n main.exe img.png zoomOut" << endl;
}
}

void findNearPoint(int height, int width, int cHeight, int cWidth, int vec[], int x, int y)
{
if (cHeight + x < height && cWidth + y < width && cHeight + x >= 0 && cWidth + y >= 0)
{
vec[0] = cHeight + x; vec[1] = cWidth + y;
}
if (cHeight + x >= height)
{
vec[0] = height;
}
if (cHeight + x < 0)
{
vec[0] = 0;
}
if (cWidth + y >= width)
{
vec[1] = width;
}
if (cWidth + y < 0)
{
vec[1] = 0;
}
}

void resize(IplImage* src, IplImage* dst, int mode)
{
int height_src = src->height;
int width_src = src->width;
int step_src = src->widthStep / sizeof(uchar);
int channels = src->nChannels;
uchar* data_src = (uchar *)src->imageData;
int height_dst = dst->height;
int width_dst = dst->width;
int step_dst = dst->widthStep / sizeof(uchar);
uchar* data_dst = (uchar *)dst->imageData;
switch (mode)
{
case CV_INTER_NN:
{
for (int i = 0; i < height_dst; ++i)
{
for (int j = 0; j < width_dst; ++j)
{
int mapHeight = i * ((float)height_src / (float)height_dst);
int mapWidth = j * ((float)width_src / (float)width_dst);
for (int k = 0; k < channels; ++k)
{
data_dst[i*step_dst + j * channels + k] = data_src[mapHeight*step_src + mapWidth * channels + k];
}
}
}
break;
}
case CV_INTER_LINEAR:
{
for (int i = 0; i < height_dst; ++i)
{
for (int j = 0; j < width_dst; ++j)
{
float mapHeight = i * ((float)height_src / (float)height_dst);
float mapWidth = j * ((float)width_src / (float)width_dst);
int mapIHeight = (int)mapHeight;
int mapIWidth = (int)mapWidth;
float u = mapHeight - mapIHeight;
float v = mapWidth - mapIWidth;
int vec_0[2] = { 0,0 };
int vec_1[2] = { 0,0 };
int vec_2[2] = { 0,0 };
int vec_3[2] = { 0,0 };
findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_0, 0, 0);//f(x,y)
findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_1, 1, 0);//f(x+1,y)
findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_2, 0, 1);//f(x,y+1)
findNearPoint(height_src, width_src, mapIHeight, mapIWidth, vec_3, 1, 1);//f(x+1,y+1)
for (int k = 0; k < channels; ++k)
{
data_dst[i*step_dst + j * channels + k] = data_src[vec_0[0] * step_src + vec_0[1] * channels + k] * (1 - u) * (1 - v) + data_src[vec_1[0] * step_src + vec_1[1] * channels + k] * (1 - u) * v + data_src[vec_2[0] * step_src + vec_2[1] * channels + k] * u * (1 - v) + data_src[vec_3[0] * step_src + vec_3[1] * channels + k] * u * v;
}
}
}
break;
}
}
}

void generateHistogramEqualizationImage(IplImage* input, int rgb)
{
int height = input->height;
int width = input->width;
int step = input->widthStep / sizeof(uchar);
int channels = input->nChannels;
uchar* data1 = (uchar *)input->imageData;
vector<int> countPix(256);
vector<float> percentPix(256);
int wholePixel = height * width; //count the pixel
for (int i = 0; i < height; ++i)
{
for (int j = 0; j < width; ++j)
{
countPix[data1[i*step + j * channels + rgb]]++;
}
} //percent of the pixel
for (int i = 0; i < countPix.size(); ++i)
{
int wholeForwardPixel = 0;
for (int j = 0; j <= i; ++j)
{
wholeForwardPixel += countPix[j];
}
percentPix[i] = (float)wholeForwardPixel / (float)wholePixel;
} //
data[i*step+j*channels+k] = 111;
for (int i = 0; i < height; ++i)
{
for (int j = 0; j < width; ++j)
{
data1[i*step + j * channels + rgb] = percentPix[data1[i*step + j * channels + rgb]] * 255;
}
}
}
绿色盒子 2019-05-10
  • 打赏
  • 举报
回复
现在问问题的是大爷,回答的是孙子
weixin_44418697 2019-05-09
  • 打赏
  • 举报
回复
看得眼花缭乱

65,187

社区成员

发帖
与我相关
我的任务
社区描述
C++ 语言相关问题讨论,技术干货分享,前沿动态等
c++ 技术论坛(原bbs)
社区管理员
  • C++ 语言社区
  • encoderlee
  • paschen
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
  1. 请不要发布与C++技术无关的贴子
  2. 请不要发布与技术无关的招聘、广告的帖子
  3. 请尽可能的描述清楚你的问题,如果涉及到代码请尽可能的格式化一下

试试用AI创作助手写篇文章吧