import numpy as np
import cv2 as cv
from PIL import Image
from matplotlib import pyplot as plt
def sift_kp(image):
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
kp, des = cv.xfeatures2d.SIFT_create().detectAndCompute(image, None)
# kp_image = cv.drawKeypoints(gray_image, kp, None) # 画出特征点图
return kp, des
def get_good_match(des1, des2):
matches = cv.BFMatcher().knnMatch(des1, des2, k=2) # 这个函数返回的是DMatch结构体,就是matches里面有好多对DMatch结构体
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
return good
'''
其中H为求得的单应性矩阵矩阵,
status则返回一个列表来表征匹配成功的特征点,ptsA,ptsB为关键点,
cv2.RANSAC, ransacReprojThreshold这两个参数与RANSAC有关
'''
def get_H(good_matches, img):
imgOut = 0
H = 0
if len(good_matches) > 4:
ptsA = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
ptsB = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status = cv.findHomography(ptsA, ptsB, cv.RANSAC, ransacReprojThreshold)
imgOut = cv.warpPerspective(img, H, (img1.shape[1], img1.shape[0]),
flags=cv.INTER_LINEAR + cv.WARP_INVERSE_MAP)
return H, imgOut
if __name__ == "__main__":
# 得到图像路径
img1_dir = 'F:/Image_Registration/SAR_pool8.png'
img2_dir = 'F:/Image_Registration/SAR_pool16_mf.png'
img3_dir = 'F:/Image_Registration/SAR_pool2.png'
img4_dir = 'F:/Image_Registration/R_pool8.png'
img5_dir = 'F:/Image_Registration/R_pool16_mf.png'
img6_dir = 'F:/Image_Registration/R_pool2.png'
# 打开图像
img1 = cv.imread(img6_dir)
img2 = cv.imread(img3_dir)
# while img1.shape[0] > 1000 or img1.shape[1] > 1000:
# img1 = cv.resize(img1, None, fx=0.5, fy=0.5, interpolation=cv.INTER_AREA)
# while img2.shape[0] > 1000 or img2.shape[1] > 1000:
# img2 = cv.resize(img2, None, fx=0.5, fy=0.5, interpolation=cv.INTER_AREA)
gray_image1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY) # BGR化灰度
gray_image2 = 255 - cv.cvtColor(img2, cv.COLOR_BGR2GRAY)
# gray_image2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)
# 进行sobel运算提取边界
x = cv.Sobel(gray_image1, cv.CV_16S, 1, 0)
y = cv.Sobel(gray_image1, cv.CV_16S, 0, 1)
absX = cv.convertScaleAbs(x) # 转回uint8
absY = cv.convertScaleAbs(y)
dst1 = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
x = cv.Sobel(gray_image2, cv.CV_16S, 1, 0)
y = cv.Sobel(gray_image2, cv.CV_16S, 0, 1)
absX = cv.convertScaleAbs(x) # 转回uint8
absY = cv.convertScaleAbs(y)
dst2 = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
# 得到关键点及描述子
kp1, des1 = cv.xfeatures2d.SIFT_create().detectAndCompute(gray_image1, None)
kp2, des2 = cv.xfeatures2d.SIFT_create().detectAndCompute(gray_image2, None)
# kp1, des1 = cv.xfeatures2d.SURF_create().detectAndCompute(dst1, None)
# kp2, des2 = cv.xfeatures2d.SURF_create().detectAndCompute(dst2, None)
# kp1, des1 = cv.AKAZE_create().detectAndCompute(dst1, None)
# kp2, des2 = cv.AKAZE_create().detectAndCompute(dst2, None)
# kp1, des1 = cv.ORB_create().detectAndCompute(gray_image1, None)
# kp2, des2 = cv.ORB_create().detectAndCompute(gray_image2, None)
# 通过描述子得到好的匹配点对
good_matches = get_good_match(des1, des2)
# 通过匹配点对计算出转换矩阵H和转换后的图像
H, img_out = get_H(good_matches, gray_image2)
# 画图
# 计算出匹配点的连线图
good_matches_1 = np.expand_dims(good_matches, 1)
match_img = cv.drawMatchesKnn(gray_image1, kp1, gray_image2, kp2, good_matches_1, None, flags=2)
img_bianyuan = (img_out == 0) * 1
img_fusion = gray_image1 * img_bianyuan + img_out
# 画出匹配点的连线图和转换后的图
plt.figure()
plt.imshow(dst1, 'gray')
plt.show()
plt.figure()
plt.imshow(dst2, 'gray')
plt.show()
plt.figure()
plt.imshow(match_img)
plt.title('Joint matching figure')
plt.show()
plt.figure()
plt.imshow(img_out, 'gray')
plt.title('Conversion figure')
plt.show()
plt.figure()
plt.imshow(img_fusion, 'gray')
plt.title('SURF Fusion figure')
plt.show()
- 1
- 2
前往页