模型构建
import os
from PIL import Image
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from keras import regularizers
from keras.layers.core import Dropout
def read_image(paths):
os.listdir(paths)
filelist = []
for root, dirs, files in os.walk(paths):
for file in files:
if os.path.splitext(file)[1] == ".jpg":
filelist.append(os.path.join(root, file))
return filelist
path_1 = 'CNN/positive_copy/'
path_2 = 'CNN/negative_copy/'
filelist_1 = read_image(path_1)
filelist_2 = read_image(path_2)
def im_xiangsu(paths):
filelist_temp = []
for filename in paths:
try:
im = Image.open(filename)
newim = im.resize((128, 128))
filelist_temp.append(newim)
except OSError as e:
print(e.args)
return filelist_temp
filelist_all = im_xiangsu(filelist_1) + im_xiangsu(filelist_2)
def im_array(paths):
M = []
for filename in paths:
im = filename
im_L = im.convert("L")
Core = im_L.getdata()
arr1 = np.array(Core, dtype='float32') / 255.0
list_img = arr1.tolist()
M.extend(list_img)
return M
M = im_array(filelist_all)
dict_label = {
0: 'positive', 1: 'negative'}
train_images = np.array(M).reshape(len(filelist_all), 128, 128)
train_images = train_images[..., np.newaxis]
label = [0] * len(filelist_1) + [1] * len(filelist_2)
train_lables = np.array(label)
print('_______________________')
print(train_images.shape)
print(train_lables.shape)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 1),
kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(Dropout(0.75))
model.add(layers.Dense(2, activation='softmax', kernel_regularizer=regularizers.l2(0.01)))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_lables, epochs=2, validation_split=0.2)
model.save('my_model.h5')
print(model.summary())
tf.keras.models.save_model(model, 'CNN/models')
print("save model success!")
用模型进行分类
import os
from PIL import Image
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
model = tf.keras.models.load_model('my_model.h5')
model.summary()
print("finish loading model!")
dict_label = {
0: 'positive', 1: 'negative'}
def read_image(paths):
os.listdir(paths)
filelist = []
for root, dirs, files in os.walk(paths):
for file in files:
if os.path.splitext(file)[1] == ".jpg":
filelist.append(os.path.join(root, file))
return filelist
def im_xiangsu(paths):
filelist_temp = []
for filename in paths:
try:
im = Image.open(filename)
newim = im.resize((128, 128))
filelist_temp.append(newim)
except OSError as e:
print(e.args)
return filelist_temp
def im_array(paths):
M = []
for filename in paths:
im = filename
im_L = im.convert("L")
Core = im_L.getdata()
arr1 = np.array(Core, dtype='float32') / 255.0
list_img = arr1.tolist()
M.extend(list_img)
return M
path_1 = 'CNN/positive/'
path_2 = 'CNN/negative/'
path_3 = 'CNN/unverified/'
filelist_1 = read_image(path_1)
filelist_2 = read_image(path_2)
filelist_3 = read_image(path_3)
def getresult(paths):
filelist = read_image(paths)
filelist_test = im_xiangsu(filelist)
img = im_array(filelist_test)
test_images = np.array(img).reshape(len(filelist_test), 128, 128)
test_images = test_images[..., np.newaxis]
predictions_single = model.predict(test_images)
print(predictions_single)
positive_number = 0
negative_number = 0
rate_sure = 0.0
for i in range(len(filelist)):
filename = filelist[i]
if predictions_single[i][0] >= predictions_single[i][1]:
positive_number += 1
rate_sure += predictions_single[i][0] - predictions_single[i][1]
im = Image.open(filelist[i])
ss = filename[15:]
im.save('CNN/positive/' + ss)
else:
negative_number += 1
rate_sure += predictions_single[i][1] - predictions_single[i][0]
im = Image.open(filelist[i])
ss = filename[15:]
im.save('CNN/negative/' + ss)
rate_sure_average = rate_sure / len(predictions_single)
return positive_number, negative_number, rate_sure_average
exist_positive = len(filelist_1)
exist_negative = len(filelist_2)
predict_positive_number, predict_negative_number, predict_rate = getresult('CNN/unverified/')
all_negative = predict_negative_number + exist_negative
all_number = predict_positive_number + predict_negative_number + exist_positive + exist_negative
rate_final = all_negative / all_number
rate_accuracy_1 = (exist_positive + exist_negative) * 1.0
rate_accuracy_2 = (predict_positive_number + predict_negative_number) * predict_rate
rate_accuracy = rate_accuracy_2 / (predict_positive_number + predict_negative_number)
print(predict_positive_number)
print(predict_negative_number)
print(exist_positive)
print(exist_negative)
print('The total numbers of negative id is :' + str(all_negative))
print('The rate of the mistaken classification is : ' + str(rate_final * 100) + '%')
print('The accuracy of the model is : ' + str(rate_accuracy * 100) + '%')
思路分析