在做回归问题的时候,发现keras中自带的acc不太好使,只能自定义一个acc
myacc是我自己定义的,然后最后一句话就是我定义的myacc的等效计算
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.datasets import fashion_mnist
from keras.layers import Input, Conv2D, MaxPooling2D, Dense, Flatten, Dropout,ZeroPadding2D, Activation, BatchNormalization
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
import cv2
import utils_paths # 主要用于图像路径处理操作,具体代码参考最后的附录
import random
import os
from sklearn.preprocessing import LabelBinarizer
print("------开始读取训练集数据------")
data = []
labels = []
# 拿到图像数据路径,方便后续读取
imagePaths1 = sorted(list(utils_paths.list_images('./input')))
imagePaths2 = sorted(list(utils_paths.list_images('./label')))
random.seed(42)
random.shuffle(imagePaths1)
# 遍历读取数据
for imagePath in imagePaths1:
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
#image = cv2.resize(image, (224, 224))###############################
image = image.astype(np.int8) - 100
data.append(image)
for imagePath in imagePaths2:
vector = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
vector = vector.astype(np.int8) - 100
labels.append(vector)
# 归一化
data = np.array(data, dtype="float") / 100
labels = np.array(labels)/100
data = data.reshape(data.shape[0],64,64,1)
labels = labels.reshape(labels.shape[0],64)
# 切分数据集
(x_train, x_validate, y_train, y_validate) = train_test_split(data, labels, test_size=0.5, random_state=42)
print("x_train.shape:",x_train.shape)
print("x_validate.shape:",x_validate.shape)
print("y_train.shape:",y_train.shape)
print("y_validate.shape:",y_validate.shape)
print("------训练集数据读取完毕------")
def simpleModel(input_shape):
X_input = Input(shape=input_shape)
X = ZeroPadding2D(padding=(1, 1))(X_input)
X = Conv2D(8, kernel_size=(3, 3), strides=(1, 1))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(X)
X = ZeroPadding2D(padding=(1, 1))(X)
X = Conv2D(16, kernel_size=(3, 3), strides=(1, 1))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(X)
X = ZeroPadding2D(padding=(1, 1))(X)
X = Conv2D(32, kernel_size=(3, 3), strides=(1, 1))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(X)
# FC
X = Flatten()(X)
Y = Dense(64, activation='sigmoid')(X)
model = Model(inputs=X_input, outputs=Y, name='simpleModel')
return model
def myacc(y_true, y_pred):
SSR = K.mean(K.abs(y_pred-y_true),axis=-1)
#return SSR
return SSR <= 0.01
model = simpleModel(input_shape=(64, 64, 1))
model.compile(optimizer=Adam(lr=0.001), loss='mse', metrics=[myacc])
model.fit(x_train, y_train, epochs=20, batch_size=256)
model.save('./model.h5')
model.save_weights('./weigths.h5')
preds = model.evaluate(x_validate, y_validate)
print("Loss = " + str(preds[0]))
print("Test Accuracy = " + str(preds[1]))
print(model.metrics_names)
predict = model.predict(x_validate)
# print(predict)
# print(y_validate)
#loss的计算如下,和上面输出的一致
print(np.mean(np.mean(np.square(predict-y_validate),axis=-1),axis = 0))
#myacc的计算如下,和上面输出的一致
print(np.mean(np.mean(np.abs(predict-y_validate),axis=-1),axis = 0))