【OpenCV+Python实现医学影像拼接(一)】

**

OpenCV+Python实现医学影像拼接(一)

**

内容仅供参考

首先是准备拼接的图片,(由于环境原因,本人裁剪的)

【OpenCV+Python实现医学影像拼接(一)】_第1张图片
【OpenCV+Python实现医学影像拼接(一)】_第2张图片【OpenCV+Python实现医学影像拼接(一)】_第3张图片
【OpenCV+Python实现医学影像拼接(一)】_第4张图片
原图片为
【OpenCV+Python实现医学影像拼接(一)】_第5张图片
我的思路是一二先拼接,三四再拼接,拼接后图片如下:
【OpenCV+Python实现医学影像拼接(一)】_第6张图片
与原图像对比还是有较为明显的瑕疵:如拼接缝、底部内容模糊、旁边线条歪了等。

以下是我的程序界面。
给大家介绍下我的环境把
win10,Python 3.7 opencv与contrib版本为4.5.5.62

这里我将一些直接定义了一些函数,调用起来方便。

# coding: utf-8
# 实现上下左右多副图像融合拼接
# 环境py37
import os
import cv2
import numpy as np
import pydicom as pd
from matplotlib import pyplot as plt

'''
    实现对多副图像的拼接融合
    需拼接图像存放到path路径:
        E:/Study/Opencv&Image/waiting_stitch_img
    拼接后图像存放路径为'save'路径:
        E:/Study/Opencv&Image/waiting_stitch_img/waiting_stitch_image
'''

path = r'E:/Study/Opencv&Image/waiting_stitch_img'
save = r'E:/Study/Opencv&Image/waiting_stitch_img/waiting_stitch_image'

image1 = cv2.imread(path + '/' + 'img1.jpg')
image2 = cv2.imread(path + '/' + 'img2.jpg')
image3 = cv2.imread(path + '/' + 'img3.jpg')
image4 = cv2.imread(path + '/' + 'img4.jpg')

gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
gray3 = cv2.cvtColor(image3, cv2.COLOR_BGR2GRAY)
gray4 = cv2.cvtColor(image4, cv2.COLOR_BGR2GRAY)


'''
    函数: read_Dcm
    功能: 读取Dcm文件 
'''


def read_Dcm(path):
    # path = r"G:/DiCom/"
    # savePath = r"G:/save_image/save_dicom/"
    fileList = os.listdir(path)  # 将此路径内的文件或文件夹的路径列出来
    # 统计文件下图像个数
    totalNum = len(fileList)
    print('totalNum', totalNum)

    for filename in os.listdir(path):
        print(filename)
        for file in os.listdir(path + filename):
            # print(path + filename + '/' + file)
            dcm = pd.read_file(path + filename + '/' + file)
            # dcm = path + filename
            plt.imshow(dcm.pixel_array, 'gray')
            plt.title("dcm")
            plt.show()
    return dcm


'''
    函数: dcm_loadFileInformation
    功能: 获取DCM文件内tag信息
    参数: 传入的是DCM文件的路径
    内容: 分别获取患者的就诊ID、姓名、出生日期、性别等。
    返回值: 
            返回DCM值
'''


def dcm_loadFileInformation(path1):
    information = {}
    dcm = pd.read_file(path1)
    information['PatientID'] = dcm.PatientID
    information['PatientName'] = dcm.PatientName
    information['PatientBirthDate'] = dcm.PatientBirthDate
    information['PatientSex'] = dcm.PatientSex
    information['StudyID'] = dcm.studyID
    information['studyDate'] = dcm.studyDate
    information['StudyTime'] = dcm.StudyTime
    information['InstitionName'] = dcm.InstitionName
    information['Manufacturer'] = dcm.Manufacturer
    print("The DCM file information:", dcm)
    return dcm


'''
    函数: dcm2JPG
    功能: 将DCM文件夹转换为 jpg格式文件
    参数: 传入DCM文件路径
    内容: 从指定路径里读取DCM文件,并将其转换为jpg格式图像
    参数: 返回jpg图像
'''


def dcm2JPG(path):
    dcm = pd.read_file(path)
    return dcm


'''
    函数: JPG2dcm
    功能: 将JPG文件夹转换为dcm格式文件
    参数: 传入JPG文件路径
    内容: 从指定路径里读取jpg文件,并将其转换为dcm格式图像;
          并将文件相应信息写入DCM
    参数: 返回dcm图像
'''


def JPG2dcm(newpath):
    return


'''
    函数: SiFt
    功能: 提取图像特征
    参数: image1,image2为传入图像
    内容: 
        1.调用detectAndCompute寻找特征点和描述子;   
        2.调用drawKeypoints绘制特征点
    返回值:
        Imag_1,Image_2 保存特征点
        kp1,des_1,kp2,des_2分别表示两张图片的特征点和描述子
'''


def SIFT(image1, image2):
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des_1 = sift.detectAndCompute(image1, None)
    kp2, des_2 = sift.detectAndCompute(image2, None)
    Image_1 = cv2.drawKeypoints(image1, kp1, None)
    Image_2 = cv2.drawKeypoints(image2, kp2, None)
    return Image_1, Image_2, kp1, des_1, kp2, des_2


'''
    函数: FlAnn快速最近邻搜索包
    功能: 图像特征匹配
    参数: 传入图像的描述子des_1、des_2
    内容:
        1.index_params配置所需算法
        2.search_params设定递归次数    
    返回值:
         matchesc存放匹配对;
         matchesMask# 只需要绘制好的匹配项,因此创建一个掩码
'''


def flann(des_1, des_2):
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE,
                        tress=5)
    search_params = dict(checks=200)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des_1, des_2, k=2)
    matchesMask = [[0, 0] for i in range(len(matches))]
    return matches, matchesMask


'''
    函数: ration_test
    功能: 比率测试 将不满足的最近邻匹配之间距离比率大于设定的阈值的匹配剔除.
    参数: 
         matches匹配对,kp特征点, matchesMask
    内容:
          1.当最小的两个距离的比率超过一定时,判断为不好的点,
          2.good[]用于存放匹配对数;设置matchesMask过滤匹配点
    返回值:
          good:存放匹配对数
          matchesMask:存放最优匹配点
'''


def ration_test(matches, kp1, matchesMask):
    good = []  # 存放匹配对数
    pts1 = []
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [i, 0]
    return good, pts1, matchesMask


'''
    函 数: MakeBorder
    功 能: 创建一个模板,用于匹配
    参 数: 
          Bordermg1、Bordermg2为输入图像;
    内 容: 
          srcImg对img1进行边界扩充后的图像 ;
          testImg对img2进行边界扩充后的图像 ;
    返回值:
          返回srcImg与TestImg
'''


def MakeBorder(BorderImg1, Bordermg2):
    top, bot, left, right = 0, 180, 0, 0
    srcImg = cv2.copyMakeBorder(BorderImg1,
                                top, bot, left, right,
                                cv2.BORDER_CONSTANT,
                                value=(0, 0, 0))
    testImg = cv2.copyMakeBorder(Bordermg2,
                                 top, bot, left, right,
                                 cv2.BORDER_CONSTANT,
                                 value=(0, 0, 0))
    return srcImg, testImg


'''
    函数: Draw_Tool
    功能: draw_params给特征点和匹配的线定义颜色,
         draw_Img连接匹配点;
    内容: draw_params给特征点和匹配的线定义颜色,
         draw_Img连接匹配点;
    参数: 
          matchesMask, 存放最优匹配点
          gray1,  gray2,待匹配图像
          kp1, kp2, 分别表示图像特征点
          matches,保存图像匹配对数
    返回值:
          draw_Img,特征匹配图像
'''


def Draw_Tool(matchesMask, gray1, kp1, gray2, kp2, matches):
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    draw_Img = cv2.drawMatchesKnn(gray1, kp1, gray2,
                                  kp2, matches, None,
                                  **draw_params)
    return draw_Img


'''
    函数: StitchImg
    功能: 实现图像竖向、横向拼接
    内容: 
    参数: 
        srcImg,与上同;testImg,与上同
        good,存放好的匹配点
        kp1, kp2图像的特征点
'''


def StitchImg(srcImg, testImg, good, kp1, kp2):
    print("===========即将进行拼接!==============\n")
    rows, cols = srcImg.shape[:2]
    print("srcImg.shape[:2]", srcImg.shape[:2])
    rows1, cols1 = testImg.shape[:2]
    print("testImg.shape[:2]", testImg.shape[:2])
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        print("len(good)", len(good))
        # 查询图像的特征描述子索引
        src_pts = np.float32([kp1[m.queryIdx].
                             pt for m in good]).reshape(-1, 1, 2)
        # 训练(模板)图像的特征描述子索引
        dst_pts = np.float32([kp2[m.trainIdx].
                             pt for m in good]).reshape(-1, 1, 2)
        # 计算的M是img1转换到img2的转换矩阵
        M, mask = cv2.findHomography(src_pts, dst_pts,
                                     cv2.RANSAC, 5.0)
        # flag用的是warp_inverse_map, 这是对M做一个反变换
        # 透视变换,新图像可容纳完整的两幅图
        warpImg = cv2.warpPerspective(testImg, np.array(M), (
            testImg.shape[1],
            testImg.shape[0]),
                                      flags=cv2.WARP_INVERSE_MAP
                                      )
        plt.imshow(warpImg, )
        plt.title("warpImg")
        plt.show()
        warpImg1 = cv2.warpPerspective(srcImg, np.array(M), (
            srcImg.shape[1] + 100,
            srcImg.shape[0] + 300),
                                       flags=cv2.WARP_INVERSE_MAP
                                       )
        plt.imshow(warpImg1)
        plt.title("warpImg1")
        plt.show()
        # 开始重叠的最左端
        for col in range(0, cols):
            left = col
            break
        print("开始重叠的左端位置是:第" + str(col) + "列!")
        # 从列表的下标为cols-1的元素开始,步长为1,
        # 倒序取到下标为0的元素(但是不包括下标为0元素)
        for col in range(cols - 1, 0, -1):
            # 重叠的最右一列
            if srcImg[:, col].any() and warpImg[:, col].any():
                right = col
                print("right", right)
                break
        print("开始重叠的右端位置是:第" + str(right) + "列!")
        # 存储黑色图像于res中,作为模板
        res = np.zeros([rows, cols, 3], np.uint8)
        plt.imshow(res)
        plt.title("res")
        plt.show()
        print("res_size", res.shape[:2])
        print("rows", rows)
        print("cols", cols)
        # 将图片testImg或srcImg像素填充进入至创建好的res模板中
        for row in range(0, rows):
            # print("row", row)
            for col in range(0, cols):
                # 如果没有原图,用旋转的填充
                if not srcImg[row, col].any():
                    res[row, col] = warpImg[row, col]
                elif not warpImg[row, col].any():
                    res[row, col] = srcImg[row, col]
                else:
                    # 图像融合
                    srcImgLen = float(abs(col - left))
                    testImgLen = float(abs(col - right))
                    alpha = srcImgLen / (srcImgLen + testImgLen)
                    res[row, col] = np.clip(srcImg[row, col]
                                            * (1 - alpha)
                                            + warpImg[row, col]
                                            * alpha
                                            , 0, 255)
        res_RGB = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        plt.figure()
        plt.imshow(res_RGB)
        plt.title("res_RGB")
        plt.show()
        cv2.imwrite(save + '/' + 'img5.jpg', res_RGB)
    else:
        print("Not enough matches are found - {}/{}".format(len(good),
                                                            MIN_MATCH_COUNT))
        matchesMask = None
    print("=====================拼接完成!===========================\n")
    return res_RGB

# 主程序执行模块
# def main():


if __name__ == '__main__':
    print("=============主函数执行!!===========\n")
    # path = r"G:/DiCom/"
    # dcm = read_Dcm(path)
    print("第一次拼接!!!!!")
    img1, img2, k1, des1, k2, des2 = SIFT(image1, image2)
    matches, matchesMask1 = flann(des1, des2)
    good, pts, matchesMask2 = ration_test(matches, k1, matchesMask1)
    srcImg, testImg = MakeBorder(image1, image2)
    draw_Img = Draw_Tool(matchesMask2, gray1, k1, gray2, k2, matches)
    res1 = StitchImg(srcImg, testImg, good, k1, k2)
    print("第二次拼接!!!!!")
    img3, img4, k3, des3, k4, des4 = SIFT(image3, image4)
    matches34, matchesMask34 = flann(des3, des4)
    good1, pts1, matchesMask5 = ration_test(matches34, k3, matchesMask34)
    srcImg1, testImg1 = MakeBorder(image3, image4)
    draw_Img1 = Draw_Tool(matchesMask5, gray3, k3, gray4, k4, matches34)
    res2 = StitchImg(srcImg1, testImg1, good1, k3, k4)
    print("第三次拼接!!!!!")
    img5, img6, k5, des5, k6, des6 = SIFT(res1, res2)
    matches56, matchesMask56 = flann(des5, des6)
    good2, pts2, matchesMask6 = ration_test(matches56, k5, matchesMask56)
    srcImg2, testImg2 = MakeBorder(res1, res2)
    draw_Img2 = Draw_Tool(matchesMask56, res1, k5, res2, k6, matches56)
    res3 = StitchImg(srcImg2, testImg2, good2, k5, k6)

大概的框图如下(没能认真整理真是抱歉):

【OpenCV+Python实现医学影像拼接(一)】_第7张图片
下一把打算对瑕疵部分进行优化T_T,又是头大的一天!!!!!

你可能感兴趣的:(OpenCv图像拼接,python)