本项目采用GoogLeNet的框架进行细胞分类测试,采用keras进行代码的编写。直接可以调用程序进行测试。只需要将需要分类的细胞图像放置在不同的文件夹中,然后文件夹以类别命名,遍历到路径就可以直接读入数据。
# -*- coding: utf-8 -*-
# @Time : 2020/7/6 13:51
# @Author : song
# @File : model.py
# @Software: PyCharm
#-*- coding: UTF-8 -*-
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D
from keras.layers import Flatten, Dense, Dropout,BatchNormalization
from keras.layers import Input, concatenate
from keras.models import Model,load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model,np_utils
from keras import regularizers
import keras.metrics as metric
import os
# Global Constants
NB_CLASS=4
LEARNING_RATE=0.01
MOMENTUM=0.9
ALPHA=0.0001
BETA=0.75
GAMMA=0.1
DROPOUT=0.4
WEIGHT_DECAY=0.0005
LRN2D_NORM=True
DATA_FORMAT='channels_last' # Theano:'channels_first' Tensorflow:'channels_last'
USE_BN=True
IM_WIDTH=224
IM_HEIGHT=224
EPOCH=50
#训练数据,此路径下存在需要分类的图像,每一类的图像放在一个文件夹中,文件夹的名字就为改类的名字
train_root='./date/traindata'
#评估数据,此路径下存在需要分类的图像,每一类的图像放在一个文件夹中,文件夹的名字就为改类的名字
vaildation_root='./date/vaildationdata'
#测试数据,此路径下存在需要分类的图像,每一类的图像放在一个文件夹中,文件夹的名字就为改类的名字
test_root='./date/testdata'
IM_WIDTH=224
IM_HEIGHT=224
batch_size=32
'''
featurewise_center:布尔值,使输入数据集去中心化(均值为0), 按feature执行
samplewise_center:布尔值,使输入数据的每个样本均值为0
featurewise_std_normalization:布尔值,将输入除以数据集的标准差以完成标准化, 按feature执行
samplewise_std_normalization:布尔值,将输入的每个样本除以其自身的标准差
zca_whitening:布尔值,对输入数据施加ZCA白化
zca_epsilon: ZCA使用的eposilon,默认1e-6
rotation_range:整数,数据提升时图片随机转动的角度
width_shift_range:浮点数,图片宽度的某个比例,数据提升时图片水平偏移的幅度
height_shift_range:浮点数,图片高度的某个比例,数据提升时图片竖直偏移的幅度
shear_range:浮点数,剪切强度(逆时针方向的剪切变换角度)
zoom_range:浮点数或形如[lower,upper]的列表,随机缩放的幅度,若为浮点数,则相当于[lower,upper] = [1 - zoom_range, 1+zoom_range]
channel_shift_range:浮点数,随机通道偏移的幅度
fill_mode:;‘constant’,‘nearest’,‘reflect’或‘wrap’之一,当进行变换时超出边界的点将根据本参数给定的方法进行处理
cval:浮点数或整数,当fill_mode=constant时,指定要向超出边界的点填充的值
horizontal_flip:布尔值,进行随机水平翻转
vertical_flip:布尔值,进行随机竖直翻转
rescale: 重放缩因子,默认为None. 如果为None或0则不进行放缩,否则会将该数值乘到数据上(在应用其他变换之前)
preprocessing_function: 将被应用于每个输入的函数。该函数将在图片缩放和数据提升之后运行。该函数接受一个参数,为一张图片(秩为3的numpy array),并且输出一个具有相同shape的numpy array
data_format:字符串,“channel_first”或“channel_last”之一,代表图像的通道维的位置。
该参数是Keras 1.x中的image_dim_ordering,“channel_last”对应原本的“tf”,“channel_first”对应原本的“th”。
以128x128的RGB图像为例,“channel_first”应将数据组织为(3,128,128),
而“channel_last”应将数据组织为(128,128,3)。该参数的默认值是~/.keras/keras.json中设置的值,若从未设置过,则为“channel_last”
'''
#train data
train_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
featurewise_center=True
)
#会自动以文件夹的名字命名类别
train_generator = train_datagen.flow_from_directory(
train_root,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
#vaild data
vaild_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
featurewise_center=True
)
vaild_generator = train_datagen.flow_from_directory(
vaildation_root,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
#test data
test_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
featurewise_center=True
)
test_generator = train_datagen.flow_from_directory(
test_root,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
#normalization
def conv2D_lrn2d(x,filters,kernel_size,strides=(1,1),padding='same',data_format=DATA_FORMAT,dilation_rate=(1,1),activation='relu',use_bias=True,kernel_initializer='glorot_uniform',bias_initializer='zeros',kernel_regularizer=None,bias_regularizer=None,activity_regularizer=None,kernel_constraint=None,bias_constraint=None,lrn2d_norm=LRN2D_NORM,weight_decay=WEIGHT_DECAY):
#l2 normalization
if weight_decay:
kernel_regularizer=regularizers.l2(weight_decay)
bias_regularizer=regularizers.l2(weight_decay)
else:
kernel_regularizer=None
bias_regularizer=None
x=Conv2D(filters=filters,kernel_size=kernel_size,strides=strides,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
if lrn2d_norm:
#batch normalization
x=BatchNormalization()(x)
return x
#inception模块
def inception_module(x,params,concat_axis,padding='same',data_format=DATA_FORMAT,dilation_rate=(1,1),activation='relu',use_bias=True,kernel_initializer='glorot_uniform',bias_initializer='zeros',kernel_regularizer=None,bias_regularizer=None,activity_regularizer=None,kernel_constraint=None,bias_constraint=None,lrn2d_norm=LRN2D_NORM,weight_decay=None):
(branch1,branch2,branch3,branch4)=params
if weight_decay:
kernel_regularizer=regularizers.l2(weight_decay)
bias_regularizer=regularizers.l2(weight_decay)
else:
kernel_regularizer=None
bias_regularizer=None
#1x1
pathway1=Conv2D(filters=branch1[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
#1x1->3x3
pathway2=Conv2D(filters=branch2[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
pathway2=Conv2D(filters=branch2[1],kernel_size=(3,3),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway2)
#1x1->5x5
pathway3=Conv2D(filters=branch3[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
pathway3=Conv2D(filters=branch3[1],kernel_size=(5,5),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway3)
#3x3->1x1
pathway4=MaxPooling2D(pool_size=(3,3),strides=1,padding=padding,data_format=DATA_FORMAT)(x)
pathway4=Conv2D(filters=branch4[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway4)
return concatenate([pathway1,pathway2,pathway3,pathway4],axis=concat_axis)
#创建模型
def create_model():
#Data format:tensorflow,channels_last;theano,channels_last
if DATA_FORMAT=='channels_first':
INP_SHAPE=(3,224,224)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=1
elif DATA_FORMAT=='channels_last':
INP_SHAPE=(224,224,3)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=3
else:
raise Exception('Invalid Dim Ordering')
x=conv2D_lrn2d(img_input,64,(7,7),2,padding='same',lrn2d_norm=False)
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='same',data_format=DATA_FORMAT)(x)
x=BatchNormalization()(x)
x=conv2D_lrn2d(x,64,(1,1),1,padding='same',lrn2d_norm=False)
x=conv2D_lrn2d(x,192,(3,3),1,padding='same',lrn2d_norm=True)
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='same',data_format=DATA_FORMAT)(x)
x=inception_module(x,params=[(64,),(96,128),(16,32),(32,)],concat_axis=CONCAT_AXIS) #3a
x=inception_module(x,params=[(128,),(128,192),(32,96),(64,)],concat_axis=CONCAT_AXIS) #3b
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='same',data_format=DATA_FORMAT)(x)
x=inception_module(x,params=[(192,),(96,208),(16,48),(64,)],concat_axis=CONCAT_AXIS) #4a
x=inception_module(x,params=[(160,),(112,224),(24,64),(64,)],concat_axis=CONCAT_AXIS) #4b
x=inception_module(x,params=[(128,),(128,256),(24,64),(64,)],concat_axis=CONCAT_AXIS) #4c
x=inception_module(x,params=[(112,),(144,288),(32,64),(64,)],concat_axis=CONCAT_AXIS) #4d
x=inception_module(x,params=[(256,),(160,320),(32,128),(128,)],concat_axis=CONCAT_AXIS) #4e
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='same',data_format=DATA_FORMAT)(x)
x=inception_module(x,params=[(256,),(160,320),(32,128),(128,)],concat_axis=CONCAT_AXIS) #5a
x=inception_module(x,params=[(384,),(192,384),(48,128),(128,)],concat_axis=CONCAT_AXIS) #5b
x=AveragePooling2D(pool_size=(7,7),strides=1,padding='valid',data_format=DATA_FORMAT)(x)
x=Flatten()(x)
x=Dropout(DROPOUT)(x)
x=Dense(output_dim=NB_CLASS,activation='linear')(x)
x=Dense(output_dim=NB_CLASS,activation='softmax')(x)
return x,img_input,CONCAT_AXIS,INP_SHAPE,DATA_FORMAT
def check_print():
# Create the Model
x,img_input,CONCAT_AXIS,INP_SHAPE,DATA_FORMAT=create_model()
# Create a Keras Model
model=Model(input=img_input,output=[x])
model.summary()
# Save a PNG of the Model Build
#此路径是安装graphviz-2.38,要显示打印png图像(就是调用plot_model函数),如果不安装无法打印
os.environ["PATH"] += os.pathsep + 'E:/Anconda5.3.0/keras/bin/'
plot_model(model,to_file='GoogLeNet.png')
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc',metric.top_k_categorical_accuracy])
print('Model Compiled')
return model
if __name__=='__main__':
if os.path.exists('inception_1.h5'):
model=load_model('inception_1.h5')
else:
model=check_print()
model.fit_generator(train_generator,validation_data=vaild_generator,epochs=EPOCH,steps_per_epoch=train_generator.n/batch_size
,validation_steps=vaild_generator.n/batch_size)
model.save('inception_1.h5')
model.metrics=['acc',metric.top_k_categorical_accuracy]
loss,acc,top_acc=model.evaluate_generator(test_generator,steps=test_generator.n/batch_size)
print('Test result:loss:%f,acc:%f,top_acc:%f'%(loss,acc,top_acc))