4.1.1 最近鄰插值
最簡單圖像縮放方法,原理:提取源數據圖像中與其鄰域最近像素值來作為目標圖像相對應點的像素值。
目標各像素點的灰度值-->源圖像中與其最鄰近像素的灰度值。
OpenCV中提供3個將浮點型數轉換成整數的函數:cvRound/cvFloor/cvCeil

1 ////////https://blog.csdn.net/linqianbi/article/details/78593724 2 ////////https://blog.csdn.net/qq_22424571/article/details/80918549 3 #include<opencv2\core\core.hpp> 4 #include<opencv2\highgui\highgui.hpp> 5 #include<opencv2\imgproc\imgproc.hpp> 6 #include<iostream> 7 using namespace cv; 8 //最近鄰插值 9 //基於等間隔提取圖像的縮放 10 Mat imageReduction1(Mat &srcImage, float kx, float ky)//原始圖形以及縮放比例 11 { 12 //獲取輸出圖像分辨率 13 int nRows = cvRound(srcImage.rows * kx);//cvRound這個函數返回的是和參數最接近的整數 14 int nCols = cvRound(srcImage.cols * ky); 15 Mat resultImage(nRows, nCols, srcImage.type());//創建一張輸出的圖像 16 for (int i = 0; i < nRows; i++) 17 { 18 //根據水平因子計算在原圖中的坐標 19 int x = static_cast<int>((i + 1) / kx + 0.5) - 1; 20 for (int j = 0; j < nCols; j++) 21 { 22 //根據垂直因子計算在原圖中的坐標 23 int y = static_cast<int>((j + 1) / ky + 0.5) - 1; 24 resultImage.at<Vec3b>(i, j) = srcImage.at<Vec3b>(x, y); 25 } 26 } 27 return resultImage; 28 } 29 30 //基於區域子塊提取圖像縮放 31 //求每個區域子塊中的像素的通道的平均值 32 Vec3b areaAverage(Mat &srcImage, Point_<int> leftPoint, Point_<int> rightPoint) 33 { 34 int temp1 = 0, temp2 = 0, temp3 = 0;//用來保存區域塊中的每個通道像素的和 35 //計算區域塊中的像素點的個數 36 int nPix = (rightPoint.x - leftPoint.x + 1)*(rightPoint.y - leftPoint.y + 1); 37 //計算區域子塊中的各個通道的像素和 38 for (int i = leftPoint.x; i <= rightPoint.x; i++) 39 { 40 for (int j = leftPoint.y; j <= rightPoint.y; j++) 41 { 42 temp1 += srcImage.at<Vec3b>(i, j)[0];//求和區域塊中的藍綠紅通道的像素和 43 temp2 += srcImage.at<Vec3b>(i, j)[1]; 44 temp3 += srcImage.at<Vec3b>(i, j)[2]; 45 } 46 } 47 //對區域塊中的每個通道求平均值 48 Vec3b vecTemp; 49 vecTemp[0] = temp1 / nPix; 50 vecTemp[1] = temp2 / nPix; 51 vecTemp[2] = temp3 / nPix; 52 return vecTemp; 53 } 54 55 Mat imageReduction2(Mat &srcImage, float kx, float ky) 56 { 57 //獲取輸出圖像分辨率 58 int nRows = cvRound(srcImage.rows * kx);//cvRound這個函數返回的是和參數最接近的整數 59 int nCols = cvRound(srcImage.cols * ky); 60 Mat resultImage(nRows, nCols, srcImage.type());//創建一張輸出的圖像 61 //區域子塊的左上角行列坐標 62 int leftRowcoordinate = 0; 63 int leftColcoordinate = 0; 64 65 for (int i = 0; i < nRows; i++) 66 { 67 //根據水平因子計算在原圖中的坐標 68 int x = static_cast<int>((i + 1) / kx + 0.5) - 1; 69 for (int j = 0; j < nCols; j++) 70 { 71 //根據垂直因子計算在原圖中的坐標 72 int y = static_cast<int>((j + 1) / ky + 0.5) - 1; 73 //求區域子塊的均值 74 resultImage.at<Vec3b>(i, j) = areaAverage(srcImage, Point_<int>(leftRowcoordinate, leftColcoordinate), Point_<int>(x, y)); 75 //更新下子塊左上角的列坐標,行坐標不變 76 leftColcoordinate = y + 1; 77 } 78 //一列循環完畢重新將列坐標置零 79 leftColcoordinate = 0; 80 //更新下子塊左上角的行坐標 81 leftRowcoordinate = x + 1; 82 } 83 return resultImage; 84 85 } 86 87 88 int main() 89 { 90 Mat srcIamge = imread("D:\\大海.jpg"); 91 if (!srcIamge.data) 92 { 93 printf("image could not load...\n"); 94 return -1; 95 } 96 imshow("srcIamge", srcIamge); 97 Mat resultImage1 = imageReduction1(srcIamge, 0.5, 0.5); 98 imshow("res1", resultImage1); 99 100 Mat resultImage2 = imageReduction2(srcIamge, 0.5, 0.5); 101 imshow("res2", resultImage2); 102 waitKey(0); 103 return 0; 104 105 }
參考:
https://blog.csdn.net/linqianbi/article/details/78593724
https://blog.csdn.net/qq_22424571/article/details/80918549
https://blog.csdn.net/gbyy42299/article/details/80406509
這是一種最基本、最簡單的圖像縮放算法,效果也是最不好的,放大后的圖像有很嚴重的馬賽克,縮小后的圖像有很嚴重的失真;效果不好的根源就是其簡單的最臨近插值方法引入了嚴重的圖像失真,比如,當由目標圖的坐標反推得到的源圖的的坐標是一個浮點數的時候,采用了四舍五入的方法,直接采用了和這個浮點數最接近的象素的值,這種方法是很不科學的,當推得坐標值為 0.75的時候,不應該就簡單的取為1,既然是0.75,比1要小0.25 ,比0要大0.75 ,那么目標象素值其實應該根據這個源圖中虛擬的點四周的四個真實的點來按照一定的規律計算出來的,這樣才能達到更好的縮放效果。
雙線型內插值算法就是一種比較好的圖像縮放算法,它充分的利用了源圖中虛擬點四周的四個真實存在的像素值來共同決定目標圖中的一個像素值,因此縮放效果比簡單的最鄰近插值要好很多。
4.1.2 雙線性插值

1 ////////https://blog.csdn.net/Gone_HuiLin/article/details/53223222 2 #include <opencv2/imgproc/imgproc.hpp> 3 #include <opencv2/core/core.hpp> 4 #include <opencv2/highgui/highgui.hpp> 5 #include <iostream> 6 // 實現雙線性插值圖像縮放 7 cv::Mat BilinearInterpolation(cv::Mat srcImage) 8 { 9 CV_Assert(srcImage.data != NULL); 10 int srcRows = srcImage.rows; 11 int srcCols = srcImage.cols; 12 int srcStep = srcImage.step; 13 // 構建目標圖像 14 cv::Mat dstImage = cv::Mat( 15 cv::Size(640, 480), srcImage.type(), 16 cv::Scalar::all(0)); 17 int dstRows = dstImage.rows; 18 int dstCols = dstImage.cols; 19 int dstStep = dstImage.step; 20 // 數據定義及轉換 21 IplImage src = srcImage; 22 IplImage dst = dstImage; 23 std::cout << "srcCols:" << srcCols << " srcRows:" << 24 srcRows << "srcStep:" << srcStep << std::endl; 25 std::cout << "dstCols:" << dstCols << " dstRows:" << 26 dstRows << "dstStep:" << dstStep << std::endl; 27 // 坐標定義 28 float srcX = 0, srcY = 0; 29 float t1X = 0, t1Y = 0, t1Z = 0; 30 float t2X = 0, t2Y = 0, t2Z = 0; 31 for (int j = 0; j < dstRows - 1; j++) 32 { 33 for (int i = 0; i < dstCols - 1; i++) 34 { 35 // 縮放映射關系 36 srcX = (i + 0.5)*((float)srcCols) / (dstCols)-0.5; 37 srcY = (j + 0.5)*((float)srcRows) / (dstRows)-0.5; 38 int iSrcX = (int)srcX; 39 int iSrcY = (int)srcY; 40 // 三通道求鄰域加權值1 41 t1X = ((uchar*)(src.imageData + srcStep*iSrcY))[ 42 iSrcX * 3 + 0] * (1 - std::abs(srcX - iSrcX)) + 43 ((uchar*)(src.imageData + srcStep*iSrcY))[ 44 (iSrcX + 1) * 3 + 0] * (srcX - iSrcX); 45 t1Y = ((uchar*)(src.imageData + srcStep*iSrcY))[ 46 iSrcX * 3 + 1] * (1 - std::abs(srcX - iSrcX)) + 47 ((uchar*)(src.imageData + srcStep*iSrcY))[ 48 (iSrcX + 1) * 3 + 1] * (srcX - iSrcX); 49 t1Z = ((uchar*)(src.imageData + srcStep*iSrcY))[ 50 iSrcX * 3 + 2] * (1 - std::abs(srcX - iSrcX)) + 51 ((uchar*)(src.imageData + srcStep*iSrcY))[ 52 (iSrcX + 1) * 3 + 2] * (srcX - iSrcX); 53 // 三通道求鄰域加權值2 54 t2X = ((uchar*)(src.imageData + srcStep*( 55 iSrcY + 1)))[iSrcX * 3] * (1 - std::abs(srcX - iSrcX)) 56 + ((uchar*)(src.imageData + srcStep*( 57 iSrcY + 1)))[(iSrcX + 1) * 3] * (srcX - iSrcX); 58 t2Y = ((uchar*)(src.imageData + srcStep*( 59 iSrcY + 1)))[iSrcX * 3 + 1] * (1 - std::abs(srcX - iSrcX)) 60 + ((uchar*)(src.imageData + srcStep*( 61 iSrcY + 1)))[(iSrcX + 1) * 3 + 1] * (srcX - iSrcX); 62 t2Z = ((uchar*)(src.imageData + srcStep*( 63 iSrcY + 1)))[iSrcX * 3 + 2] * (1 - std::abs(srcX - iSrcX)) 64 + ((uchar*)(src.imageData + srcStep*(iSrcY + 1)))[( 65 iSrcX + 1) * 3 + 2] * (srcX - iSrcX); 66 // 根據公式求解目標圖像加權 67 ((uchar*)(dst.imageData + dstStep*j))[i * 3] = 68 t1X*(1 - std::abs(srcY - iSrcY)) + t2X*( 69 std::abs(srcY - iSrcY)); 70 ((uchar*)(dst.imageData + dstStep*j))[i * 3 + 1] = 71 t1Y*(1 - std::abs(srcY - iSrcY)) + t2Y*( 72 std::abs(srcY - iSrcY)); 73 ((uchar*)(dst.imageData + dstStep*j))[i * 3 + 2] = 74 t1Z*(1 - std::abs(srcY - iSrcY)) + t2Z*( 75 std::abs(srcY - iSrcY)); 76 } 77 // 列操作 78 ((uchar*)(dst.imageData + dstStep*j))[(dstCols - 1) * 3] = 79 ((uchar*)(dst.imageData + dstStep*j))[(dstCols - 2) * 3]; 80 ((uchar*)(dst.imageData + dstStep*j))[(dstCols - 1) * 3 + 81 1] = ((uchar*)(dst.imageData + dstStep*j))[( 82 dstCols - 2) * 3 + 1]; 83 ((uchar*)(dst.imageData + dstStep*j))[(dstCols - 1) * 3 84 + 2] = ((uchar*)(dst.imageData + dstStep*j))[( 85 dstCols - 2) * 3 + 2]; 86 } 87 // 行操作 88 for (int i = 0; i < dstCols * 3; i++) 89 { 90 ((uchar*)(dst.imageData + dstStep*(dstRows - 1)))[i] = 91 ((uchar*)(dst.imageData + dstStep*(dstRows - 2)))[i]; 92 } 93 return dstImage; 94 } 95 96 int main() 97 { 98 cv::Mat srcImage = cv::imread("D:\\大海.jpg"); 99 if (!srcImage.data) 100 return -1; 101 cv::Mat dstImage = BilinearInterpolation(srcImage); 102 cv::imshow("srcImage", srcImage); 103 cv::imshow("dstImage", dstImage); 104 cv::waitKey(0); 105 return 0; 106 }
參考:
https://www.cnblogs.com/yssongest/p/5303151.html
4.1.3 插值操作性能對比

1 //////////https://blog.csdn.net/spw_1201/article/details/53544014 2 //////////最鄰近、雙線性、基於像素區域、立方插值及蘭索斯插值 3 #include <opencv2/imgproc/imgproc.hpp> 4 #include <opencv2/core/core.hpp> 5 #include <opencv2/highgui/highgui.hpp> 6 #include <iostream> 7 using namespace cv; 8 using namespace std; 9 void ResizeExample(Mat srcImage) 10 { 11 //判斷輸入有效性 12 CV_Assert(srcImage.data != NULL); 13 imshow("srcImage", srcImage); 14 Mat dstImage(256, 256, CV_8UC3); 15 //測試1:默認參數為雙線性插值 16 double tTime; 17 tTime = (double)getTickCount(); 18 const int nTimes = 100; 19 for (int i = 0; i < nTimes; i++) 20 { 21 resize(srcImage, dstImage, dstImage.size(), 0, 0); 22 } 23 tTime = 1000 * ((double)getTickCount() - tTime) / getTickFrequency(); 24 tTime /= nTimes; 25 cout << "text1: " << tTime << endl; 26 imshow("1 default parameters:dstImage", dstImage); 27 //測試2:最鄰近插值 28 tTime = (double)getTickCount(); 29 30 for (int i = 0; i < nTimes; i++) 31 { 32 resize(srcImage, dstImage, Size(256, 256), 0, 0, INTER_NEAREST); 33 } 34 tTime = 1000 * ((double)getTickCount() - tTime) / getTickFrequency(); 35 tTime /= nTimes; 36 cout << "text2: " << tTime << endl; 37 imshow("2 INTER_NEAREST:dstImage", dstImage); 38 //測試3:像素區域插值 39 tTime = (double)getTickCount(); 40 41 for (int i = 0; i < nTimes; i++) 42 { 43 resize(srcImage, dstImage, Size(256, 256), 0.5, 0.5, INTER_AREA); 44 } 45 tTime = 1000 * ((double)getTickCount() - tTime) / getTickFrequency(); 46 tTime /= nTimes; 47 cout << "text3: " << tTime << endl; 48 imshow("3 INTER_AREA : dstImage", dstImage); 49 //測試4:三次插值 50 tTime = (double)getTickCount(); 51 52 for (int i = 0; i < nTimes; i++) 53 { 54 resize(srcImage, dstImage, Size(), 0.5, 0.5, INTER_CUBIC); 55 } 56 tTime = 1000 * ((double)getTickCount() - tTime) / getTickFrequency(); 57 tTime /= nTimes; 58 cout << "text4: " << tTime << endl; 59 imshow("4 INTER_CUBIC : dstImage", dstImage); 60 //測試5:三次插值 61 tTime = (double)getTickCount(); 62 63 for (int i = 0; i < nTimes; i++) 64 { 65 resize(srcImage, dstImage, Size(), 0.5, 0.5, INTER_LANCZOS4); 66 } 67 tTime = 1000 * ((double)getTickCount() - tTime) / getTickFrequency(); 68 tTime /= nTimes; 69 cout << "text5: " << tTime << endl; 70 imshow("5 INTER_LANCZOS4 : dstImage", dstImage); 71 } 72 int main() 73 { 74 Mat srcImage = imread("D:\\大海.jpg"); 75 if (!srcImage.data) 76 return -1; 77 ResizeExample(srcImage); 78 waitKey(0); 79 return 0; 80 }
4.1.4圖像金字塔
其實非常好理解,如上圖所示,我們將一層層的圖像比喻為金字塔,層級越高,則圖像尺寸越小,分辨率越低。
兩種類型的金字塔:
- 高斯金字塔:用於下采樣,主要的圖像金字塔;
- 拉普拉斯金字塔:用於重建圖像,也就是預測殘差(我的理解是,因為小圖像放大,必須插入一些像素值,那這些像素值是什么才合適呢,那就得進行根據周圍像素進行預測),對圖像進行最大程度的還原。比如一幅小圖像重建為一幅大圖像,
圖像金字塔有兩個高頻出現的名詞:上采樣和下采樣。現在說說他們倆。
- 上采樣:就是圖片放大(所謂上嘛,就是變大),使用PryUp函數
- 下采樣:就是圖片縮小(所謂下嘛,就是變小),使用PryDown函數
下采樣將步驟:
- 對圖像進行高斯內核卷積
- 將所有偶數行和列去除
下采樣就是圖像壓縮,會丟失圖像信息。
上采樣步驟:
- 將圖像在每個方向放大為原來的兩倍,新增的行和列用0填充;
- 使用先前同樣的內核(乘以4)與放大后的圖像卷積,獲得新增像素的近似值。
函數 cv2.pyrDown() 從一個高分辨率大尺寸的圖像向上構建一個金字塔(尺寸變小,分辨率降低)。
函數 cv2.pyrUp() 從一個低分辨率小尺寸的圖像向下構建一個金子塔(尺寸變大,但分辨率不會)
圖像金字塔的一個應用是圖像融合。例如,在圖像縫合中,你需要將兩幅圖疊在一起,但是由於連接區域圖像像素的不連續性,整幅圖的效果看起來會很差,這時圖像金字塔就可以排上用場了,可以實現無縫連接。

1 #include <iostream> 2 #include <opencv2/core.hpp> 3 #include <opencv2/highgui.hpp> 4 #include <opencv2/imgproc.hpp> 5 6 using namespace std; 7 using namespace cv; 8 9 int main() 10 { 11 Mat srcImage = imread("D:\\大海.jpg"); 12 13 //判斷圖像是否加載成功 14 if (srcImage.empty()) 15 { 16 cout << "圖像加載失敗!" << endl; 17 return -1; 18 } 19 else 20 cout << "圖像加載成功!" << endl << endl; 21 22 namedWindow("原圖像", WINDOW_AUTOSIZE); 23 imshow("原圖像", srcImage); 24 25 //兩次向下采樣操作分別輸出 26 Mat pyrDownImage_1, pyrDownImage_2; 27 28 pyrDown(srcImage, pyrDownImage_1); 29 namedWindow("向下采樣-1", WINDOW_AUTOSIZE); 30 imshow("向下采樣-1", pyrDownImage_1); 31 32 pyrDown(pyrDownImage_1, pyrDownImage_2); 33 namedWindow("向下采樣-2", WINDOW_AUTOSIZE); 34 imshow("向下采樣-2", pyrDownImage_2); 35 36 //利用向下采樣的結果進行向上采樣操作 37 Mat pyrUpImage_1, pyrUpImage_2; 38 39 pyrUp(pyrDownImage_2, pyrUpImage_1); 40 namedWindow("向上采樣-1", WINDOW_AUTOSIZE); 41 imshow("向上采樣-1", pyrUpImage_1); 42 43 pyrUp(pyrUpImage_1, pyrUpImage_2); 44 namedWindow("向上采樣-2", WINDOW_AUTOSIZE); 45 imshow("向上采樣-2", pyrUpImage_2); 46 47 waitKey(0); 48 49 return 0; 50 }
https://blog.csdn.net/u010682375/article/details/70147508
https://www.cnblogs.com/skyfsm/p/6876732.html