python中的opencv基础-1

人工智能 2025-04-16 19:42:03
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt

############################基础############################
#加载彩色灰度图像
# img = cv.imread('66.png',0)
# cv.imshow('pkq',img)
# cv.waitKey(0)
# cv.destroyAllWindows()

#加载彩色灰度图像保存图片
# img = cv.imread('66.png',0)
# cv.namedWindow('image',cv.WINDOW_NORMAL)
# cv.imshow('pkq',img)
# cv.imwrite('pkq.png',img)
# cv.waitKey(0)
# cv.destroyAllWindows()


#使用matplotlib
# img = cv.imread('66.png',0)
# plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([])  # 隐藏 x 轴和 y 轴上的刻度值
# plt.show()

#从相机中读取视频
# cap = cv.VideoCapture(0)
# if not cap.isOpened():
#     print("Cannot open camera")
#     exit()
# while True:
#     # 逐帧捕获
#     ret, frame = cap.read()
#     # 如果正确读取帧,ret为True
#     if not ret:
#         print("Can't receive frame (stream end?). Exiting ...")
#         break
#     # 我们在框架上的操作到这里
#     gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#     # 显示结果帧e
#     cv.imshow('frame', gray)
#     if cv.waitKey(1) == ord('q'):
#         break
# # 完成所有操作后,释放捕获器
# cap.release()
# cv.destroyAllWindows()

#从文件播放视频
# cap = cv.VideoCapture('Record-rec20250411165905.mp4')
# while cap.isOpened():
#     ret, frame = cap.read()
#     # 如果正确读取帧,ret为True
#     if not ret:
#         print("Can't receive frame (stream end?). Exiting ...")
#         break
#     gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#     cv.imshow('frame', gray)
#     if cv.waitKey(1) == ord('q'):
#         break
# cap.release()
# cv.destroyAllWindows()

#保存视频
# #cap = cv.VideoCapture(0)
# cap = cv.VideoCapture('Record-rec20250411165905.mp4')
# # 定义编解码器并创建VideoWriter对象
# fourcc = cv.VideoWriter_fourcc(*'XVID')
# out = cv.VideoWriter('output.avi', fourcc, 20.0, (640,  480))
# while cap.isOpened():
#     ret, frame = cap.read()
#     if not ret:
#         print("Can't receive frame (stream end?). Exiting ...")
#         break
#     frame = cv.flip(frame, 0)
#     # 写翻转的框架
#     out.write(frame)
#     cv.imshow('frame', frame)
#     if cv.waitKey(1) == ord('q'):
#         break
# # 完成工作后释放所有内容
# cap.release()
# out.release()
# cv.destroyAllWindows()

#绘图功能
# # 创建黑色的图像
# img = np.zeros((520,520,3), np.uint8)
# # 绘制一条厚度为5的蓝色对角线
# cv.line(img,(0,0),(512,512),(255,0,0),5)
# #矩形
# cv.rectangle(img,(275,0),(510,128),(0,255,0),3)
# #圆圈
# cv.circle(img,(280,75), 70, (0,0,255), -1)
# #椭圆
# cv.ellipse(img,(361,361),(100,50),0,0,180,255,-1)
# #画多边形
# pts = np.array([[10,5],[20,25],[70,100],[50,85]], np.int32)
# pts = pts.reshape((-1,1,2))
# cv.polylines(img,[pts],True,(0,255,255))
# #向图像添加文本
# font = cv.FONT_HERSHEY_SIMPLEX
# cv.putText(img,'Candy',(10,500), font, 4,(255,255,255),2,cv.LINE_AA)
# cv.imshow('draw',img)
# cv.waitKey(0)
# cv.destroyAllWindows()

#鼠标作为画笔--双击的地方绘制一个圆圈
# 鼠标回调函数
# def draw_circle(event,x,y,flags,param):
#     if event == cv.EVENT_LBUTTONDBLCLK:
#         cv.circle(img,(x,y),100,(255,0,0),-1)
# # 创建一个黑色的图像,一个窗口,并绑定到窗口的功能
# img = np.zeros((512,512,3), np.uint8)
# cv.namedWindow('image')
# cv.setMouseCallback('image',draw_circle)
# while(1):
#     cv.imshow('image',img)
#     if cv.waitKey(20) & 0xFF == 27:
#         break
# cv.destroyAllWindows()

#鼠标作为画笔
# 鼠标回调函数
# drawing = False # 如果按下鼠标,则为真
# mode = True # 如果为真,绘制矩形。按 m 键可以切换到曲线
# ix,iy = -1,-1
# def draw_circle(event,x,y,flags,param):
#     global ix,iy,drawing,mode
#     if event == cv.EVENT_LBUTTONDOWN:
#         drawing = True
#         ix,iy = x,y
#     elif event == cv.EVENT_MOUSEMOVE:
#         if drawing == True:
#             if mode == True:
#                 cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
#             else:
#                 cv.circle(img,(x,y),5,(0,0,255),-1)
#     elif event == cv.EVENT_LBUTTONUP:
#         drawing = False
#         if mode == True:
#             cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
#         else:
#             cv.circle(img,(x,y),5,(0,0,255),-1)
# img = np.zeros((512,512,3), np.uint8)
# cv.namedWindow('image')
# cv.setMouseCallback('image',draw_circle)
# while(1):
#     cv.imshow('image',img)
#     if cv.waitKey(20) & 0xFF == 27:
#         break
# cv.destroyAllWindows()

#轨迹栏作为调色板
# def nothing(x):
#     pass
# # 创建一个黑色的图像,一个窗口
# img = np.zeros((300,512,3), np.uint8)
# cv.namedWindow('image')
# # 创建颜色变化的轨迹栏
# cv.createTrackbar('R','image',0,255,nothing)
# cv.createTrackbar('G','image',0,255,nothing)
# cv.createTrackbar('B','image',0,255,nothing)
# # 为 ON/OFF 功能创建开关
# switch = '0 : OFF \n1 : ON'
# cv.createTrackbar(switch, 'image',0,1,nothing)
# while(1):
#     cv.imshow('image',img)
#     k = cv.waitKey(1) & 0xFF
#     if k == 27:
#         break
#     # 得到四条轨迹的当前位置
#     r = cv.getTrackbarPos('R','image')
#     g = cv.getTrackbarPos('G','image')
#     b = cv.getTrackbarPos('B','image')
#     s = cv.getTrackbarPos(switch,'image')
#     if s == 0:
#         img[:] = 0
#     else:
#         img[:] = [b,g,r]
# cv.destroyAllWindows()


############################图像操作############################
# img = cv.imread('66.png')
# #访问和修改像素值
# px = img[100,100]
# print( px )
# blue = img[100,100,0]
# print( blue )
# img[100,100] = [255,255,255]
# print( img[100,100] )
# # img.item(10,10,2)
# # img.itemset((10,10,2),100)
# # img.item(10,10,2)
# #访问图像属性行数,列数和通道数,图像数据类型,像素数等
# print( img.shape )
# print( img.size )
# print( img.dtype )
# #图像感兴趣区域ROI
# ball = img[280:340, 330:390]
# img[273:333, 100:160] = ball
# #拆分和合并图像通道
# # b = img [:, :, 0]
# # img [:, :, 2] = 0
#
# cv.imshow('pkq.png',img)
# cv.waitKey(0)
# cv.destroyAllWindows()

#为图像设置边框(填充)
# BLUE = [255,0,0]
# img1 = cv.imread('66.png')
# replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE)
# reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT)
# reflect101 = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT_101)
# wrap = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_WRAP)
# constant= cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE)
# plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
# plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
# plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
# plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
# plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
# plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
# plt.show()

#图像上的算术运算
#图像融合
# img1 = cv.imread('1.jpg')
# img2 = cv.imread('0.jpg')
##dst = cv.add(img1,0.7,img2,0.3,0)
# dst = cv.addWeighted(img1,0.7,img2,0.3,0)
# cv.imshow('dst',dst)
# cv.waitKey(0)
# cv.destroyAllWindows()

#按位运算
# 加载两张图片
# img1 = cv.imread('1.jpg')
# img2 = cv.imread('0.jpg')
# rows,cols,channels = img2.shape
# roi = img1[0:rows, 0:cols ]
# img2gray = cv.cvtColor(img2,cv.COLOR_BGR2GRAY)
# ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY)
# mask_inv = cv.bitwise_not(mask)
# img1_bg = cv.bitwise_and(roi,roi,mask = mask_inv)
# img2_fg = cv.bitwise_and(img2,img2,mask = mask)
# dst = cv.add(img1_bg,img2_fg)
# img1[0:rows, 0:cols ] = dst
# cv.imshow('res',img1)
# cv.waitKey(0)
# cv.destroyAllWindows()

#性能衡量
# e1 = cv.getTickCount()
# # 你的执行代码
# e2 = cv.getTickCount()
# time = (e2 - e1)/ cv.getTickFrequency()

############################图像处理############################

#改变颜色空间
# flags = [i for i in dir(cv) if i.startswith('COLOR_')]
# print( flags )

#对象追踪
# cap = cv.VideoCapture(0)
# while(1):
#     # 读取帧
#     _, frame = cap.read()
#     # 转换颜色空间 BGR 到 HSV
#     hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#     # 定义HSV中蓝色的范围
#     lower_blue = np.array([110,50,50])
#     upper_blue = np.array([130,255,255])
#     # 设置HSV的阈值使得只取蓝色
#     mask = cv.inRange(hsv, lower_blue, upper_blue)
#     # 将掩膜和图像逐像素相加
#     res = cv.bitwise_and(frame,frame, mask= mask)
#     cv.imshow('frame',frame)
#     cv.imshow('mask',mask)
#     cv.imshow('res',res)
#     k = cv.waitKey(5) & 0xFF
#     if k == 27:
#         break
# cv.destroyAllWindows()

#找到要追踪的HSV值
# green = np.uint8([[[0,255,0 ]]])
# hsv_green = cv.cvtColor(green,cv.COLOR_BGR2HSV)
# print( hsv_green )

#图像的几何变换
#缩放
# img = cv.imread('66.png')
# res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC)
# #或者
# # height, width = img.shape[:2]
# # res = cv.resize(img,(2*width, 2*height), interpolation = cv.INTER_CUBIC)
# cv.imshow('img',res)
# cv.waitKey(0)
# cv.destroyAllWindows()

#平移
# import numpy as np
# import cv2 as cv
# img = cv.imread('66.png',0)
# rows,cols = img.shape
# M = np.float32([[1,0,100],[0,1,50]])
# dst = cv.warpAffine(img,M,(cols,rows))
# cv.imshow('img',dst)
# cv.waitKey(0)
# cv.destroyAllWindows()

#旋转
# img = cv.imread('66.png',0)
# rows,cols = img.shape
# # cols-1 和 rows-1 是坐标限制
# M = cv.getRotationMatrix2D(((cols-1)/2.0,(rows-1)/2.0),90,1)
# dst = cv.warpAffine(img,M,(cols,rows))
# cv.imshow('img',dst)
# cv.waitKey(0)
# cv.destroyAllWindows()

#仿射变换
# img = cv.imread('66.png')
# rows,cols,ch = img.shape
# pts1 = np.float32([[50,50],[200,50],[50,200]])
# pts2 = np.float32([[10,100],[200,50],[100,250]])
# M = cv.getAffineTransform(pts1,pts2)
# dst = cv.warpAffine(img,M,(cols,rows))
# plt.subplot(121),plt.imshow(img),plt.title('Input')
# plt.subplot(122),plt.imshow(dst),plt.title('Output')
# plt.show()

#透视变换
# img = cv.imread('66.png')
# rows,cols,ch = img.shape
# pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
# pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
# M = cv.getPerspectiveTransform(pts1,pts2)
# dst = cv.warpPerspective(img,M,(300,300))
# plt.subplot(121),plt.imshow(img),plt.title('Input')
# plt.subplot(122),plt.imshow(dst),plt.title('Output')
# plt.show()

#图像阈值
#简单阈值
# img = cv.imread('66.png',0)
# ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
# ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)
# ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC)
# ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO)
# ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV)
# titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
# images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
# for i in range(6):
#     plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
#     plt.title(titles[i])
#     plt.xticks([]),plt.yticks([])
# plt.show()

#Otsu的二值化
# img = cv.imread('66.png',0)
# # 全局阈值
# ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
# # Otsu阈值
# ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# # 高斯滤波后再采用Otsu阈值
# blur = cv.GaussianBlur(img,(5,5),0)
# ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# # 绘制所有图像及其直方图
# images = [img, 0, th1,
#           img, 0, th2,
#           blur, 0, th3]
# titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
#           'Original Noisy Image','Histogram',"Otsu's Thresholding",
#           'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
# for i in range(3):
#     plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
#     plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
#     plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
#     plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
#     plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
#     plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
# plt.show()

#Otsu的二值化实现
# img = cv.imread('66.png',0)
# blur = cv.GaussianBlur(img,(5,5),0)
# # 寻找归一化直方图和对应的累积分布函数
# hist = cv.calcHist([blur],[0],None,[256],[0,256])
# hist_norm = hist.ravel()/hist.max()
# Q = hist_norm.cumsum()
# bins = np.arange(256)
# fn_min = np.inf
# thresh = -1
# for i in range(1,256):
#     p1,p2 = np.hsplit(hist_norm,[i]) # 概率
#     q1,q2 = Q[i],Q[255]-Q[i] # 对类求和
#     b1,b2 = np.hsplit(bins,[i]) # 权重
#     # 寻找均值和方差
#     m1,m2 = np.sum(p1*b1)/q1, np.sum(p2*b2)/q2
#     v1,v2 = np.sum(((b1-m1)**2)*p1)/q1,np.sum(((b2-m2)**2)*p2)/q2
#     # 计算最小化函数
#     fn = v1*q1 + v2*q2
#     if fn < fn_min:
#         fn_min = fn
#         thresh = i
# # 使用OpenCV函数找到otsu的阈值
# ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# print( "{} {}".format(thresh,ret) )

#图像平滑
#2D卷积(图像过滤)
# img = cv.imread('66.png')
# kernel = np.ones((5,5),np.float32)/25
# dst = cv.filter2D(img,-1,kernel)
# plt.subplot(121),plt.imshow(img),plt.title('Original')
# plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(dst),plt.title('Averaging')
# plt.xticks([]), plt.yticks([])
# plt.show()

#图像模糊(图像平滑)
# img = cv.imread('66.png')
# # 平均模糊
# # blur = cv.blur(img,(5,5))
# # #高斯模糊
# # blur = cv.GaussianBlur(img,(5,5),0)
# #中位模糊
# # blur = cv.medianBlur(img,5)
# #双边滤波
# blur = cv.bilateralFilter(img,9,75,75)
# plt.subplot(121),plt.imshow(img),plt.title('Original')
# plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
# plt.xticks([]), plt.yticks([])
# plt.show()

#形态学转换
#
# img = cv.imread('66.png',0)
# kernel = np.ones((5,5),np.uint8)
# #侵蚀
# erosion = cv.erode(img,kernel,iterations = 1)
# #扩张
# dilation = cv.dilate(img,kernel,iterations = 1)
# #开运算
# opening = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
# #闭运算
# closing = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)
# #形态学梯度
# gradient = cv.morphologyEx(img, cv.MORPH_GRADIENT, kernel)
# #顶帽
# tophat = cv.morphologyEx(img, cv.MORPH_TOPHAT, kernel)
# #黑帽
# blackhat = cv.morphologyEx(img, cv.MORPH_BLACKHAT, kernel)
#
# cv.imshow('img',blackhat)
# cv.waitKey(0)
# cv.destroyAllWindows()

#图像梯度
#Sobel 和 Scharr 算子  Laplacian 算子
# img = cv.imread('66.png',0)
# laplacian = cv.Laplacian(img,cv.CV_64F)
# sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
# sobely = cv.Sobel(img,cv.CV_64F,0,1,ksize=5)
# plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
# plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
# plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
# plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
# plt.show()

#
# img = cv.imread('66.png',0)
# # Output dtype = cv.CV_8U
# sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5)
# # Output dtype = cv.CV_64F. Then take its absolute and convert to cv.CV_8U
# sobelx64f = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
# abs_sobel64f = np.absolute(sobelx64f)
# sobel_8u = np.uint8(abs_sobel64f)
# plt.subplot(1,3,1),plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(1,3,2),plt.imshow(sobelx8u,cmap = 'gray')
# plt.title('Sobel CV_8U'), plt.xticks([]), plt.yticks([])
# plt.subplot(1,3,3),plt.imshow(sobel_8u,cmap = 'gray')
# plt.title('Sobel abs(CV_64F)'), plt.xticks([]), plt.yticks([])
# plt.show()

#Canny边缘检测
# img = cv.imread('66.png',0)
# edges = cv.Canny(img,100,200)
# plt.subplot(121),plt.imshow(img,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()

#图像金字塔 高斯金字塔 --拉普拉斯金字塔
# A = cv.imread('1.jpg')
# B = cv.imread('0.jpg')
# # 生成A的高斯金字塔
# G = A.copy()
# gpA = [G]
# for i in range(6):
#     G = cv.pyrDown(G)
#     gpA.append(G)
# # 生成B的高斯金字塔
# G = B.copy()
# gpB = [G]
# for i in range(6):
#     G = cv.pyrDown(G)
#     gpB.append(G)
# # 生成A的拉普拉斯金字塔
# lpA = [gpA[5]]
# for i in range(5,0,-1):
#     GE = cv.pyrUp(gpA[i])
#     L = cv.subtract(gpA[i-1],GE)
#     lpA.append(L)
# # 生成B的拉普拉斯金字塔
# lpB = [gpB[5]]
# for i in range(5,0,-1):
#     GE = cv.pyrUp(gpB[i])
#     L = cv.subtract(gpB[i-1],GE)
#     lpB.append(L)
# # 现在在每个级别中添加左右两半图像
# LS = []
# for la,lb in zip(lpA,lpB):
#     rows,cols,dpt = la.shape
#     ls = np.hstack((la[:,0:cols/2], lb[:,cols/2:]))
#     LS.append(ls)
# # 现在重建
# ls_ = LS[0]
# for i in range(1,6):
#     ls_ = cv.pyrUp(ls_)
#     ls_ = cv.add(ls_, LS[i])
# # 图像与直接连接的每一半
# real = np.hstack((A[:,:cols/2],B[:,cols/2:]))
# cv.imwrite('Pyramid_blending2.jpg',ls_)
# cv.imwrite('Direct_blending.jpg',real)

#轮廓
# img = cv.imread('66.png')
# imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# ret, thresh = cv.threshold(imgray, 127, 255, 0)
# contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# cv.drawContours(img, contours, -1, (0,255,0), 3)
# cv.drawContours(img, contours, 3, (0,255,0), 3)
# cnt = contours[4]
# cv.drawContours(img, [cnt], 0, (0,255,0), 3)

#轮廓特征

# img = cv.imread('66.png',0)
# ret,thresh = cv.threshold(img,127,255,0)
# contours,hierarchy = cv.findContours(thresh, 1, 2)
# cnt = contours[0]
# #特征矩
# M = cv.moments(cnt)
# # print( M )
# # #质心
# # cx = int(M['m10']/M['m00'])
# # cy = int(M['m01']/M['m00'])
# #轮廓面积
# area = cv.contourArea(cnt)
# #轮廓周长
# perimeter = cv.arcLength(cnt,True)
# #轮廓近似
# epsilon = 0.1*cv.arcLength(cnt,True)
# approx = cv.approxPolyDP(cnt,epsilon,True)
# #轮廓凸包
# hull = cv.convexHull(cnt)
# #检查凸度
# k = cv.isContourConvex(cnt)
# #边界矩形
# #直角矩形
# x,y,w,h = cv.boundingRect(cnt)
# cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
# #旋转矩形
# # rect = cv.minAreaRect(cnt)
# # box = cv.boxPoints(rect)
# # box = np.int(box)
# # cv.drawContours(img,[box],0,(0,0,255),2)
# #最小闭合圈
# (x,y),radius = cv.minEnclosingCircle(cnt)
# center = (int(x),int(y))
# radius = int(radius)
# cv.circle(img,center,radius,(0,255,0),2)
# #拟合一个椭圆
# ellipse = cv.fitEllipse(cnt)
# cv.ellipse(img,ellipse,(0,255,0),2)
# #拟合直线
# # rows,cols = img.shape[:2]
# # [vx,vy,x,y] = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01)
# # lefty = int((-x*vy/vx) + y)
# # righty = int(((cols-x)*vy/vx)+y)
# # cv.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)

#轮廓属性
#长宽比
# x,y,w,h = cv.boundingRect(cnt)
# aspect_ratio = float(w)/h
# #范围
# area = cv.contourArea(cnt)
# x,y,w,h = cv.boundingRect(cnt)
# rect_area = w*h
# extent = float(area)/rect_area
# #坚实度
# area = cv.contourArea(cnt)
# hull = cv.convexHull(cnt)
# hull_area = cv.contourArea(hull)
# solidity = float(area)/hull_area
# #等效直径
# area = cv.contourArea(cnt)
# equi_diameter = np.sqrt(4*area/np.pi)
# # 取向
# (x,y),(MA,ma),angle = cv.fitEllipse(cnt)
# #掩码和像素点
# # mask = np.zeros(imgray.shape,np.uint8)
# # cv.drawContours(mask,[cnt],0,255,-1)
# # pixelpoints = np.transpose(np.nonzero(mask))
# # #pixelpoints = cv.findNonZero(mask)
# #最大值,最小值及其位置
# # min_val, max_val, min_loc, max_loc = cv.minMaxLoc(imgray,mask = mask)
# #平均颜色或平均强度
# # mean_val = cv.mean(img,mask = mask)
# #极端点
# leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
# rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
# topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
# bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])

#轮廓:更多属性
#凸性缺陷
# hull = cv.convexHull(cnt,returnPoints = False)
# defects = cv.convexityDefects(cnt,hull)

# img = cv.imread('66.png')
# img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# ret,thresh = cv.threshold(img_gray, 127, 255,0)
# contours,hierarchy = cv.findContours(thresh,2,1)
# cnt = contours[0]
# hull = cv.convexHull(cnt,returnPoints = False)
# defects = cv.convexityDefects(cnt,hull)
# for i in range(defects.shape[0]):
#     s,e,f,d = defects[i,0]
#     start = tuple(cnt[s][0])
#     end = tuple(cnt[e][0])
#     far = tuple(cnt[f][0])
#     cv.line(img,start,end,[0,255,0],2)
#     cv.circle(img,far,5,[0,0,255],-1)
# cv.imshow('img',img)
# cv.waitKey(0)
# cv.destroyAllWindows()

#点多边形测试
# dist = cv.pointPolygonTest(cnt,(50,50),True)

#形状匹配
# img1 = cv.imread('0.jpg',0)
# img2 = cv.imread('1.jpg',0)
# ret, thresh = cv.threshold(img1, 127, 255,0)
# ret, thresh2 = cv.threshold(img2, 127, 255,0)
# contours,hierarchy = cv.findContours(thresh,2,1)
# cnt1 = contours[0]
# contours,hierarchy = cv.findContours(thresh2,2,1)
# cnt2 = contours[0]
# ret = cv.matchShapes(cnt1,cnt2,1,0.0)
# print( ret )

#轮廓分层

#直方图-1:查找、绘制和分析
# img = cv.imread('66.png',0)
# hist = cv.calcHist([img],[0],None,[256],[0,256])

# hist,bins = np.histogram(img.ravel(),256,[0,256])

#绘制直方图
# img = cv.imread('66.png',0)
# plt.hist(img.ravel(),256,[0,256])
# plt.show()
#使用matplotlib
# img = cv.imread('66.png')
# color = ('b','g','r')
# for i,col in enumerate(color):
#     histr = cv.calcHist([img],[i],None,[256],[0,256])
#     plt.plot(histr,color = col)
#     plt.xlim([0,256])
# plt.show()

#使用opencv
# img = cv.imread('66.png',0)
# # create a mask
# mask = np.zeros(img.shape[:2], np.uint8)
# mask[100:300, 100:400] = 255
# masked_img = cv.bitwise_and(img,img,mask = mask)
# # 计算掩码区域和非掩码区域的直方图
# # 检查作为掩码的第三个参数
# hist_full = cv.calcHist([img],[0],None,[256],[0,256])
# hist_mask = cv.calcHist([img],[0],mask,[256],[0,256])
# plt.subplot(221), plt.imshow(img, 'gray')
# plt.subplot(222), plt.imshow(mask,'gray')
# plt.subplot(223), plt.imshow(masked_img, 'gray')
# plt.subplot(224), plt.plot(hist_full), plt.plot(hist_mask)
# plt.xlim([0,256])
# plt.show()

#直方图-2:直方图均衡
# img = cv.imread('66.png',0)
# hist,bins = np.histogram(img.flatten(),256,[0,256])
# cdf = hist.cumsum()
# cdf_normalized = cdf * float(hist.max()) / cdf.max()
# plt.plot(cdf_normalized, color = 'b')
# plt.hist(img.flatten(),256,[0,256], color = 'r')
# plt.xlim([0,256])
# plt.legend(('cdf','histogram'), loc = 'upper left')
# plt.show()
#
# cdf_m = np.ma.masked_equal(cdf,0)
# cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
# cdf = np.ma.filled(cdf_m,0).astype('uint8')
# img2 = cdf[img]

#使用opencv的直方图均衡
# img = cv.imread('66.png',0)
# equ = cv.equalizeHist(img)
# res = np.hstack((img,equ)) #stacking images side-by-side
# cv.imwrite('res.png',res)

#CLAHE(对比度受限的自适应直方图均衡)
# img = cv.imread('66.png',0)
# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# cl1 = clahe.apply(img)
# cv.imwrite('clahe_2.jpg',cl1)

#直方图-3:二维直方图
# img = cv.imread('66.png')
# hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
# hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])

#Numpy中的二维直方图
# img = cv.imread('66.png')
# hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
# hist, xbins, ybins = np.histogram2d(h.ravel(),s.ravel(),[180,256],[[0,180],[0,256]])

#绘制二维直方图 Matplotlib
# from matplotlib import pyplot as plt
# img = cv.imread('66.png')
# hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
# hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
# plt.imshow(hist,interpolation = 'nearest')
# plt.show()

#直方图4:直方图反投影
# roi = cv.imread('66.png')
# hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
# #目标是我们搜索的图像
# target = cv.imread('66.png')
# hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# # 使用calcHist查找直方图。也可以使用np.histogram2d完成
# M = cv.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
# I = cv.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )
#
# disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
# cv.filter2D(B,-1,disc,B)
# B = np.uint8(B)
# cv.normalize(B,B,0,255,cv.NORM_MINMAX)
# ret,thresh = cv.threshold(B,50,255,0)

#OpenCV的反投影
roi = cv.imread('66.png')
hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
target = cv.imread('66.png')
hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# 计算对象的直方图
roihist = cv.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
# 直方图归一化并利用反传算法
cv.normalize(roihist,roihist,0,255,cv.NORM_MINMAX)
dst = cv.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# 用圆盘进行卷积
disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
cv.filter2D(dst,-1,disc,dst)
# 应用阈值作与操作
ret,thresh = cv.threshold(dst,50,255,0)
thresh = cv.merge((thresh,thresh,thresh))
res = cv.bitwise_and(target,thresh)
res = np.vstack((target,thresh,res))
cv.imwrite('res.jpg',res)

#Numpy中的傅里叶变换
# img = cv.imread('66.png',0)
# f = np.fft.fft2(img)
# fshift = np.fft.fftshift(f)
# magnitude_spectrum = 20*np.log(np.abs(fshift))
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()
#
# rows, cols = img.shape
# crow,ccol = rows//2 , cols//2
# fshift[crow-30:crow+31, ccol-30:ccol+31] = 0
# f_ishift = np.fft.ifftshift(fshift)
# img_back = np.fft.ifft2(f_ishift)
# img_back = np.real(img_back)
# plt.subplot(131),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
# plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
# plt.subplot(133),plt.imshow(img_back)
# plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
# plt.show()

#OpenCV中的傅里叶变换
# img = cv.imread('66.png',0)
# dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)
# magnitude_spectrum = 20*np.log(cv.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()
#
# rows, cols = img.shape
# crow,ccol = rows/2 , cols/2
# # 首先创建一个掩码,中心正方形为1,其余全为零
# mask = np.zeros((rows,cols,2),np.uint8)
# mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# # 应用掩码和逆DFT
# fshift = dft_shift*mask
# f_ishift = np.fft.ifftshift(fshift)
# img_back = cv.idft(f_ishift)
# img_back = cv.magnitude(img_back[:,:,0],img_back[:,:,1])
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()

#拉普拉斯算子是高通滤波器
# 没有缩放参数的简单均值滤波器
# mean_filter = np.ones((3,3))
# # 创建高斯滤波器
# x = cv.getGaussianKernel(5,10)
# gaussian = x*x.T
# # 不同的边缘检测滤波器
# # x方向上的scharr
# scharr = np.array([[-3, 0, 3],
#                    [-10,0,10],
#                    [-3, 0, 3]])
# # x方向上的sobel
# sobel_x= np.array([[-1, 0, 1],
#                    [-2, 0, 2],
#                    [-1, 0, 1]])
# # y方向上的sobel
# sobel_y= np.array([[-1,-2,-1],
#                    [0, 0, 0],
#                    [1, 2, 1]])
# # 拉普拉斯变换
# laplacian=np.array([[0, 1, 0],
#                     [1,-4, 1],
#                     [0, 1, 0]])
# filters = [mean_filter, gaussian, laplacian, sobel_x, sobel_y, scharr]
# filter_name = ['mean_filter', 'gaussian','laplacian', 'sobel_x', \
#                 'sobel_y', 'scharr_x']
# fft_filters = [np.fft.fft2(x) for x in filters]
# fft_shift = [np.fft.fftshift(y) for y in fft_filters]
# mag_spectrum = [np.log(np.abs(z)+1) for z in fft_shift]
# for i in range(6):
#     plt.subplot(2,3,i+1),plt.imshow(mag_spectrum[i],cmap = 'gray')
#     plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])
# plt.show()

#模板匹配
# img = cv.imread('66.png',0)
# img2 = img.copy()
# template = cv.imread('1.jpg',0)
# w, h = template.shape[::-1]
# # 列表中所有的6种比较方法
# methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
#             'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
# for meth in methods:
#     img = img2.copy()
#     method = eval(meth)
#     # 应用模板匹配
#     res = cv.matchTemplate(img,template,method)
#     min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
#     # 如果方法是TM_SQDIFF或TM_SQDIFF_NORMED,则取最小值
#     if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
#         top_left = min_loc
#     else:
#         top_left = max_loc
#     bottom_right = (top_left[0] + w, top_left[1] + h)
#     cv.rectangle(img,top_left, bottom_right, 255, 2)
#     plt.subplot(121),plt.imshow(res,cmap = 'gray')
#     plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
#     plt.subplot(122),plt.imshow(img,cmap = 'gray')
#     plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
#     plt.suptitle(meth)
#     plt.show()

#多对象的模板匹配
# img_rgb = cv.imread('66.png')
# img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)
# template = cv.imread('0.png',0)
# w, h = template.shape[::-1]
# res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED)
# threshold = 0.8
# loc = np.where( res >= threshold)
# for pt in zip(*loc[::-1]):
#     cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
# cv.imwrite('res.png',img_rgb)

#霍夫线变换
# img = cv.imread(cv.samples.findFile('66.png'))
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# edges = cv.Canny(gray,50,150,apertureSize = 3)
# lines = cv.HoughLines(edges,1,np.pi/180,200)
# for line in lines:
#     rho,theta = line[0]
#     a = np.cos(theta)
#     b = np.sin(theta)
#     x0 = a*rho
#     y0 = b*rho
#     x1 = int(x0 + 1000*(-b))
#     y1 = int(y0 + 1000*(a))
#     x2 = int(x0 - 1000*(-b))
#     y2 = int(y0 - 1000*(a))
#     cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
# cv.imwrite('houghlines3.jpg',img)

#概率霍夫变换
# img = cv.imread(cv.samples.findFile('66.png'))
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# edges = cv.Canny(gray,50,150,apertureSize = 3)
# lines = cv.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
# for line in lines:
#     x1,y1,x2,y2 = line[0]
#     cv.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# cv.imwrite('houghlines5.jpg',img)

#霍夫圈变换
# img = cv.imread('66.png',0)
# img = cv.medianBlur(img,5)
# cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR)
# circles = cv.HoughCircles(img,cv.HOUGH_GRADIENT,1,20,
#                             param1=50,param2=30,minRadius=0,maxRadius=0)
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
#     # 绘制外圆
#     cv.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
#     # 绘制圆心
#     cv.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# cv.imshow('detected circles',cimg)
# cv.waitKey(0)
# cv.destroyAllWindows()

#图像分割与Watershed算法
# img = cv.imread('66.png')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
#
# # 噪声去除
# kernel = np.ones((3,3),np.uint8)
# opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# # 确定背景区域
# sure_bg = cv.dilate(opening,kernel,iterations=3)
# # 寻找前景区域
# dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
# ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# # 找到未知区域
# sure_fg = np.uint8(sure_fg)
# unknown = cv.subtract(sure_bg,sure_fg)
#
# # 类别标记
# ret, markers = cv.connectedComponents(sure_fg)
# # 为所有的标记加1,保证背景是0而不是1
# markers = markers+1
# # 现在让所有的未知区域为0
# markers[unknown==255] = 0
#
# markers = cv.watershed(img,markers)
# img[markers == -1] = [255,0,0]

#交互式前景提取使用GrabCut算法
# img = cv.imread('66.png')
# mask = np.zeros(img.shape[:2],np.uint8)
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
# rect = (50,50,450,290)
# cv.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv.GC_INIT_WITH_RECT)
# mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# img = img*mask2[:,:,np.newaxis]
# plt.imshow(img),plt.colorbar(),plt.show()

# newmask = cv.imread('66.png',0)
# # 标记为白色(确保前景)的地方,更改mask = 1
# # 标记为黑色(确保背景)的地方,更改mask = 0
# mask[newmask == 0] = 0
# mask[newmask == 255] = 1
# mask, bgdModel, fgdModel = cv.grabCut(img,mask,None,bgdModel,fgdModel,5,cv.GC_INIT_WITH_MASK)
# mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# img = img*mask[:,:,np.newaxis]
# plt.imshow(img),plt.colorbar(),plt.show()

#######################################特征检测与描述#######################################
#哈里斯角检测
# filename = '66.png'
# img = cv.imread(filename)
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# gray = np.float32(gray)
# dst = cv.cornerHarris(gray,2,3,0.04)
# #result用于标记角点,并不重要
# dst = cv.dilate(dst,None)
# #最佳值的阈值,它可能因图像而异。
# img[dst>0.01*dst.max()]=[0,0,255]
# cv.imshow('dst',img)
# if cv.waitKey(0) & 0xff == 27:
#     cv.destroyAllWindows()

#SubPixel精度的转角
# filename = '66.png'
# img = cv.imread(filename)
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# # 寻找哈里斯角
# gray = np.float32(gray)
# dst = cv.cornerHarris(gray,2,3,0.04)
# dst = cv.dilate(dst,None)
# ret, dst = cv.threshold(dst,0.01*dst.max(),255,0)
# dst = np.uint8(dst)
# # 寻找质心
# ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# # 定义停止和完善拐角的条件
# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001)
# corners = cv.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# # 绘制
# res = np.hstack((centroids,corners))
# res = np.int0(res)
# img[res[:,1],res[:,0]]=[0,0,255]
# img[res[:,3],res[:,2]] = [0,255,0]
# cv.imwrite('subpixel5.png',img)

#Shi-tomas拐角检测器和益于跟踪的特征
# from matplotlib import pyplot as plt
# img = cv.imread('66.png')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# corners = cv.goodFeaturesToTrack(gray,25,0.01,10)
# corners = np.int0(corners)
# for i in corners:
#     x,y = i.ravel()
#     cv.circle(img,(x,y),3,255,-1)
# plt.imshow(img),plt.show()

#SIFT尺度不变特征变换
# img = cv.imread('66.png')
# gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# sift = cv.xfeatures2d.SIFT_create()
# kp = sift.detect(gray,None)
# img=cv.drawKeypoints(gray,kp,img)
# cv.imwrite('sift_keypoints.jpg',img)

#img=cv.drawKeypoints(gray,kp,img,flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv.imwrite('sift_keypoints.jpg',img)
# sift = cv.xfeatures2d.SIFT_create()
# kp, des = sift.detectAndCompute(gray,None)

...全文
86 回复 打赏 收藏 转发到动态 举报
写回复
用AI写文章
回复
切换为时间正序
请发表友善的回复…
发表回复

4

社区成员

发帖
与我相关
我的任务
社区描述
学习交流人工智能相关算法及技术栈
opencv计算机视觉人工智能 技术论坛(原bbs) 广东省·深圳市
社区管理员
  • 亿只小灿灿
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧