医疗图像三维重建方法小结(python+VTK+ITK+Mayavi)

医疗图像三维重建forpython

    • 环境简介
    • 方法
      • 方法一 Poly3DCollection+matplotlib
      • 方法二 VTK+ITK
      • 方法三 Mayavi之contour3d
      • 最终方法Mayavi+TVTK

环境简介

语言是python,主要介绍可能用到的库

  • Scipy
  • ITK
  • VTK
  • Mayavi
  • TVTK
  • Matplotlib

方法

在尝试重建三维模型的过程中,查询了不同版本的方法,在这里记录一下。

方法一 Poly3DCollection+matplotlib

使用mpl_toolkits 的Poly3DCollection,其中使用的是marching_cubes算法。
使用matplotlib进行显示。

import numpy as np
import pandas as pd
import os
import scipy.ndimage
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def plot_3d(image, threshold=-300):
    
    # Position the scan upright, 
    # so the head of the patient would be at the top facing the camera
    p = image.transpose(2,1,0)
    p = p[:,:,::-1]
    
    verts, faces = measure.marching_cubes(p, threshold)

    fig = plt.figure(figsize=(10, 10))
    ax = fig.add_subplot(111, projection='3d')

    # Fancy indexing: `verts[faces]` to generate a collection of triangles
    mesh = Poly3DCollection(verts[faces], alpha=0.1)
    face_color = [0.5, 0.5, 1]
    mesh.set_facecolor(face_color)
    ax.add_collection3d(mesh)

    ax.set_xlabel("x-axis")
    ax.set_ylabel("y-axis")
    ax.set_zlabel("z-axis")

    ax.set_xlim(0, p.shape[0])  # a = 6 (times two for 2nd ellipsoid)
    ax.set_ylim(0, p.shape[1])  # b = 10
    ax.set_zlim(0, p.shape[2])  # c = 16

    plt.show()

优点:轻量化可以嵌入ipython里
缺点:

  • 不能旋转视图观察,可以使用poltly进行交互式显示。
  • 显示效果差,smooth的效果差。可以自己造轮子对源数据进行插值。然而插值始终不是基于Isosurface的,所以显示效果不会太好。

方法二 VTK+ITK

自己造轮子,基于VTK进行重建显示。

import vtk


def main():
    colors = vtk.vtkNamedColors()

    fileName = get_program_parameters()

    colors.SetColor("SkinColor", [255, 125, 64, 255])
    colors.SetColor("BkgColor", [51, 77, 102, 255])

    # Create the renderer, the render window, and the interactor. The renderer
    # draws into the render window, the interactor enables mouse- and
    # keyboard-based interaction with the data within the render window.
    #
    aRenderer = vtk.vtkRenderer()
    renWin = vtk.vtkRenderWindow()
    renWin.AddRenderer(aRenderer)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renWin)

    # The following reader is used to read a series of 2D slices (images)
    # that compose the volume. The slice dimensions are set, and the
    # pixel spacing. The data Endianness must also be specified. The reader
    # uses the FilePrefix in combination with the slice number to construct
    # filenames using the format FilePrefix.%d. (In this case the FilePrefix
    # is the root name of the file: quarter.)
    reader = vtk.vtkMetaImageReader()
    reader.SetFileName(fileName)

    # An isosurface, or contour value of 500 is known to correspond to the
    # skin of the patient.
    # The triangle stripper is used to create triangle strips from the
    # isosurface these render much faster on many systems.
    skinExtractor = vtk.vtkMarchingCubes()
    skinExtractor.SetInputConnection(reader.GetOutputPort())
    skinExtractor.SetValue(0, 500)

    skinStripper = vtk.vtkStripper()
    skinStripper.SetInputConnection(skinExtractor.GetOutputPort())

    skinMapper = vtk.vtkPolyDataMapper()
    skinMapper.SetInputConnection(skinStripper.GetOutputPort())
    skinMapper.ScalarVisibilityOff()

    skin = vtk.vtkActor()
    skin.SetMapper(skinMapper)
    skin.GetProperty().SetDiffuseColor(colors.GetColor3d("SkinColor"))
    skin.GetProperty().SetSpecular(.3)
    skin.GetProperty().SetSpecularPower(20)
    skin.GetProperty().SetOpacity(.5)

    # An isosurface, or contour value of 1150 is known to correspond to the
    # bone of the patient.
    # The triangle stripper is used to create triangle strips from the
    # isosurface these render much faster on may systems.
    boneExtractor = vtk.vtkMarchingCubes()
    boneExtractor.SetInputConnection(reader.GetOutputPort())
    boneExtractor.SetValue(0, 1150)

    boneStripper = vtk.vtkStripper()
    boneStripper.SetInputConnection(boneExtractor.GetOutputPort())

    boneMapper = vtk.vtkPolyDataMapper()
    boneMapper.SetInputConnection(boneStripper.GetOutputPort())
    boneMapper.ScalarVisibilityOff()

    bone = vtk.vtkActor()
    bone.SetMapper(boneMapper)
    bone.GetProperty().SetDiffuseColor(colors.GetColor3d("Ivory"))

    # An outline provides context around the data.
    #
    outlineData = vtk.vtkOutlineFilter()
    outlineData.SetInputConnection(reader.GetOutputPort())

    mapOutline = vtk.vtkPolyDataMapper()
    mapOutline.SetInputConnection(outlineData.GetOutputPort())

    outline = vtk.vtkActor()
    outline.SetMapper(mapOutline)
    outline.GetProperty().SetColor(colors.GetColor3d("Black"))

    # It is convenient to create an initial view of the data. The FocalPoint
    # and Position form a vector direction. Later on (ResetCamera() method)
    # this vector is used to position the camera to look at the data in
    # this direction.
    aCamera = vtk.vtkCamera()
    aCamera.SetViewUp(0, 0, -1)
    aCamera.SetPosition(0, -1, 0)
    aCamera.SetFocalPoint(0, 0, 0)
    aCamera.ComputeViewPlaneNormal()
    aCamera.Azimuth(30.0)
    aCamera.Elevation(30.0)

    # Actors are added to the renderer. An initial camera view is created.
    # The Dolly() method moves the camera towards the FocalPoint,
    # thereby enlarging the image.
    aRenderer.AddActor(outline)
    aRenderer.AddActor(skin)
    aRenderer.AddActor(bone)
    aRenderer.SetActiveCamera(aCamera)
    aRenderer.ResetCamera()
    aCamera.Dolly(1.5)

    # Set a background color for the renderer and set the size of the
    # render window (expressed in pixels).
    aRenderer.SetBackground(colors.GetColor3d("BkgColor"))
    renWin.SetSize(640, 480)

    # Note that when camera movement occurs (as it does in the Dolly()
    # method), the clipping planes often need adjusting. Clipping planes
    # consist of two planes: near and far along the view direction. The
    # near plane clips out objects in front of the plane the far plane
    # clips out objects behind the plane. This way only what is drawn
    # between the planes is actually rendered.
    aRenderer.ResetCameraClippingRange()

    # Initialize the event loop and then start it.
    iren.Initialize()
    iren.Start()
    
def get_program_parameters():
    import argparse
    description = 'The skin and bone is extracted from a CT dataset of the head.'
    epilogue = '''
    Derived from VTK/Examples/Cxx/Medical2.cxx
    This example reads a volume dataset, extracts two isosurfaces that
     represent the skin and bone, and then displays it.
    '''
    parser = argparse.ArgumentParser(description=description, epilog=epilogue,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('filename', help='FullHead.mhd.')
    args = parser.parse_args()
    return args.filename

需要将三维数据转换成.mhd格式,如何直接使用3d numpy.array 进行重建还没有搞明白,搞明白了再补充。

方法三 Mayavi之contour3d

要安装mayavi模块进行显示

import numpy
from mayavi.mlab import *

def test_contour3d():
    x, y, z = np.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]

    scalars = x * x * 0.5 + y * y + z * z * 2.0

    obj = contour3d(scalars, contours=4, transparent=True)
    return obj

这里面不包含任何前处理,只是将三维scalar field显示出来,因此需要自己进行插值,光滑处理等。

最终方法Mayavi+TVTK

可是我不会前处理,但是又比较急怎么办,找一个现成的轮子用吧1

def isosurfacing(data):
	"""data should be a 3d array with channel last."""
	# Heuristic for finding the threshold for the brain
	
	# Exctract the percentile 20 and 80 (without using
	# scipy.stats.scoreatpercentile)
	sorted_data = np.sort(data.ravel())
	l = len(sorted_data)
	lower_thr = sorted_data[0.2*l]
	upper_thr = sorted_data[0.8*l]
	
	# The white matter boundary: find the densest part of the upper half
	# of histogram, and take a value 10% higher, to cut _in_ the white matter
	hist, bins = np.histogram(data[data > np.mean(data)], bins=50)
	brain_thr_idx = np.argmax(hist)
	brain_thr =  bins[brain_thr_idx + 4]
	
	del hist, bins, brain_thr_idx
	
	# Display the data #############################################################
	from mayavi import mlab
	from tvtk.api import tvtk
	
	fig = mlab.figure(bgcolor=(0, 0, 0), size=(400, 500))
	# to speed things up
	fig.scene.disable_render = True
	
	src = mlab.pipeline.scalar_field(data)
	# Our data is not equally spaced in all directions:
	src.spacing = [1, 1, 1.5]
	src.update_image_data = True
	
	#----------------------------------------------------------------------
	# Brain extraction pipeline
	
	# In the following, we create a Mayavi pipeline that strongly
	# relies on VTK filters. For this, we make heavy use of the
	# mlab.pipeline.user_defined function, to include VTK filters in
	# the Mayavi pipeline.
	
	# Apply image-based filters to clean up noise
	thresh_filter = tvtk.ImageThreshold()
	thresh_filter.threshold_between(lower_thr, upper_thr)
	thresh = mlab.pipeline.user_defined(src, filter=thresh_filter)
	
	median_filter = tvtk.ImageMedian3D()
	median_filter.set_kernel_size(3, 3, 3)
	median = mlab.pipeline.user_defined(thresh, filter=median_filter)
	
	diffuse_filter = tvtk.ImageAnisotropicDiffusion3D(
	                                    diffusion_factor=1.0,
	                                    diffusion_threshold=100.0,
	                                    number_of_iterations=5, )
	
	diffuse = mlab.pipeline.user_defined(median, filter=diffuse_filter)
	
	# Extract brain surface
	contour = mlab.pipeline.contour(diffuse, )
	contour.filter.contours = [brain_thr, ]
	
	# Apply mesh filter to clean up the mesh (decimation and smoothing)
	dec = mlab.pipeline.decimate_pro(contour)
	dec.filter.feature_angle = 60.
	dec.filter.target_reduction = 0.7
	
	smooth_ = tvtk.SmoothPolyDataFilter(
	                    number_of_iterations=10,
	                    relaxation_factor=0.1,
	                    feature_angle=60,
	                    feature_edge_smoothing=False,
	                    boundary_smoothing=False,
	                    convergence=0.,
	                )
	smooth = mlab.pipeline.user_defined(dec, filter=smooth_)
	
	# Get the largest connected region
	connect_ = tvtk.PolyDataConnectivityFilter(extraction_mode=4)
	connect = mlab.pipeline.user_defined(smooth, filter=connect_)
	
	# Compute normals for shading the surface
	compute_normals = mlab.pipeline.poly_data_normals(connect)
	compute_normals.filter.feature_angle = 80
	
	surf = mlab.pipeline.surface(compute_normals,
	                                        color=(0.9, 0.72, 0.62))
	
	#----------------------------------------------------------------------
	# Display a cut plane of the raw data
	ipw = mlab.pipeline.image_plane_widget(src, colormap='bone',
	                plane_orientation='z_axes',
	                slice_index=55)
	
	mlab.view(-165, 32, 350, [143, 133, 73])
	mlab.roll(180)
	
	fig.scene.disable_render = False
	
	#----------------------------------------------------------------------
	# To make the link between the Mayavi pipeline and the much more
	# complex VTK pipeline, we display both:
	mlab.show_pipeline(rich_view=False)
	from tvtk.pipeline.browser import PipelineBrowser
	browser = PipelineBrowser(fig.scene)
	browser.show()
	
	mlab.show()

PS.
忙活了好几天才找到这些东西,最终做出了想要的结果,记录一下,要不然忘得快。


  1. Ramachandran, P. and Varoquaux, G., Mayavi: 3D Visualization of Scientific Data IEEE Computing in Science & Engineering, 13 (2), pp. 40-51 (2011) ↩︎

你可能感兴趣的:(医疗图像分割,pyLearning)