# -*- coding: utf-8 -*-
"""
@file: register_main.py
@time: 2018/4/26 14:23
"""
import cv2
import register_match
import register_display
import register_ransac
import register_image_fusion
import numpy as np
error_threshold = 1
sift_ratio = 0.7 # knn1/lnn2
# 提取两幅图像的SIFT特征向量生成后,下一步我们采用关键点特征向量的欧式距离来作为两幅图像中关键点的相似性判定度量。
# 取图像1中的某个关键点,并找出其与图像2中欧式距离最近的前两个关键点,在这两个关键点中,如果最近的距离除以次近的距离
# 少于某个比例阈值,则接受这一对匹配点。降低这个比例阈值,SIFT匹配点数目会减少,但更加稳定。
# ratio=0. 4 对于准确度要求高的匹配;
# ratio=0. 6 对于匹配点数目要求比较多的匹配;
# ratio=0. 5 一般情况下。
# 也可按如下原则:当最近邻距离<200时ratio=0. 6,反之ratio=0. 4。ratio的取值策略能排分错误匹配点。
# 以image2为基准图像进行配准
def register(gray1, gray2, img1, img2, register_flag=0):
# 图片image1的大小
M1 = img1.shape[0]
N1 = img1.shape[1]
# 图片image2的大小
M2 = img2.shape[0] # 高
N2 = img2.shape[1] # 宽
if register_flag == 0:
print('\n图片不需配准。')
im1_common = img1
im2_1_common = img2
im2_common = img2
im1_2_common = img1
sift_fusion = img1
matchflag = 0
elif register_flag == 1:
print("\n选择图片配准功能,开始配准...")
# 初始化sift描述符 Initiate SIFT detector
# https://blog.csdn.net/ei1990/article/details/78289898
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT,使用灰度图
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
# 特征点的位置和角度
kp1_location = []
kp2_location = []
kp1_angle = []
kp2_angle = []
for i in range(len(kp1)):
kp1_location.append(kp1[i].pt)
kp1_angle.append(kp1[i].angle)
for i in range(len(kp2)):
kp2_location.append(kp2[i].pt)
kp2_angle.append(kp2[i].angle)
# 调用match.py 直接计算特征点之间的距离RMSE得到的初始结果:
good_kp1, good_kp2 = register_match.match(kp1_location, kp2_location, des1, des2, sift_ratio)
print('特征点对数为: ', len(good_kp1))
# 判断特征点个数,个数小于10,则表示配准失败:
matchflag = 0
if len(good_kp1) < 10:
print('特征点对数小于10,配准失败!请重新选择图片对!')
matchflag = 1
# 利用RANSAC算法对离群点进行移除,得到更好结果
better_kp1, better_kp2 = register_ransac.ransac(good_kp1, good_kp2, error_threshold)
solution, rmse = register_ransac.least_square(better_kp1, better_kp2)
# 配准结果图:400*800,两张图放一起,绘制特征点连接图
img_better = register_display.display1(img1, img2, better_kp1, better_kp2)
# cv2.imshow('img_better',img_better)
# cv2.waitKey(0)
# 图像融合,solution:2*3矩阵
sift_fusion = register_image_fusion.image_fusion(img1, img2, solution)
# cv2.imshow('sift_fusion',sift_fusion)
# cv2.waitKey(0)
# 找出两幅图片重合部分
# mask1:im1中对应的子区域 mask2:im2中对应的子区域,mask与原图进行点乘得到原图对应图像
mask1, mask2 = register_image_fusion.common_region(gray1, gray2, solution)
# im1_common = np.multiply(mask1,gray1).astype(np.uint8)# 强制类型转换
# im2_common = np.multiply(mask2,gray2).astype(np.uint8)# 强制类型转换
im1_common = np.multiply(np.dstack((mask1, mask1, mask1)), img1).astype(np.uint8) # 强制类型转换
im2_common = np.multiply(np.dstack((mask2, mask2, mask2)), img2).astype(np.uint8) # 强制类型转换
# 对im2_common做变换,使之与im1中公共区域的位置匹配,im2_1_common大小为img1:
solution_stack = np.row_stack((solution, [0, 0, 1])) # [a,b,c;d,e,f;0,0,1] 3*3变换矩阵
im2_1_common = cv2.warpPerspective(im2_common, np.matrix(solution_stack).I, (N1, M1)) # N M 图片尺寸
# 对im1_common做变换,使之与im2中公共区域的位置匹配,im1_2_common大小为img2:
im1_2_common = cv2.warpPerspective(im1_common, np.matrix(solution_stack), (N2, M2)) # N M 图片尺寸
# 显示结果图:
# cv2.imshow("im1 common ", im1_common)# im1中公共区域
# cv2.imshow("im2 common ", im2_common)# im2中公共区域
# cv2.imshow("im2_1 common ", im2_1_common) # im2中公共区域变换到与im1相同位置
# cv2.imshow("im1_2 common ", im1_2_common) # im1中公共区域变换到与im2相同位置
cv2.imshow("image match", img_better) #特征点匹配结果
cv2.imshow("image fusion", sift_fusion) #两幅图片拼接融合结果
cv2.waitKey(0)
# 返回三通道公共区域图片
return im1_common, im2_1_common, im1_2_common, im2_common, sift_fusion, matchflag
if __name__ == "__main__":
# img1 = cv2.imdecode(np.fromfile('w1.jpg', dtype=np.uint8),1) # image to be registered
# img2 = cv2.imdecode(np.fromfile('w5.jpg', dtype=np.uint8),1) # reference image
img1 = cv2.imdecode(np.fromfile('51.jpg', dtype=np.uint8), 1) # image to be registered
img2 = cv2.imdecode(np.fromfile('53.jpg', dtype=np.uint8), 1) # reference image
# 图片转为单通道灰度图
gray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 图片对准
im1_common, im2_1_common, im1_2_common, im2_common, sift_fusion, matchflag = register(gray_img1, gray_img2, img1,
img2, register_flag=1)