1 制作数据集合
1.1 在word上输入一行数字,我用的是Calibri字体,已经比较接近发票数字了。网友们可以自行定义字体。
1.2 读入图片为灰度图,threshold化;并将图片颜色反转:字体为白背景为黑;对图像进行各种类型的膨胀,多样化数据。
# encoding: utf-8
import cv2
import numpy as np
import os
img0 = cv2.imread("./number.jpg",0)
_, img0 = cv2.threshold(img0, 100, 255, cv2.THRESH_BINARY)
img0 = 255 - img0 #反转:文字置为白色,背景置为黑色
#进行三种尺度的膨胀
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3))
element3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img1 = cv2.dilate(img0, element1, iterations = 1)
img2 = cv2.dilate(img0, element2, iterations = 1)
img3 = cv2.dilate(img0, element3, iterations = 1)
#保存原始图和膨胀图,共四张
img = [img0,img1,img2,img3]
1.3 计算垂直直方图,用投影法分割出各个数字。
#对img0、img1、img2、img3进行分割,并保存分割后的图片
img_seg = []
for img_ in img:
img_seg.append(seg_num(img_))
def cal_hist(img_,flag = 0):
return np.sum(img_,axis = flag)
class Find_num_region:
def __init__(self,img,hist):
self.cursor = -1
self.hist = hist
self.img = img
def next(self):
#将游标的位置前移一步,并返回所在检索位的矩形框
self.cursor = self.cursor+1
return self.hist[self.cursor]
def hasNext(self):
#判断是否已经检查完了所有矩形框
return len(self.hist)> self.cursor + 1
def find_start(self):
while(self.hasNext()):
hist_num = self.next()
if hist_num > 0:
return self.cursor
def find_end(self):
while(self.hasNext()):
hist_num = self.next()
if hist_num == 0:
return self.cursor
def get_num_region(self,flag = 0):
start = self.find_start()
end = self.find_end()
if flag == 0:
return self.img.copy()[:,start:end]
else:
return self.img.copy()[start:end,:]
def seg_num(img_):
hist_1 = cal_hist(img_,1)
find_num_ = Find_num_region(img_,hist_1)
img = find_num_.get_num_region(1)
hist_2 = cal_hist(img_)
find_num = Find_num_region(img_,hist_2)
img_number = []
for i in range(10):
print i
img_number.append( find_num.get_num_region() )
return img_number
1.4 图片的size设置为(28,28)对图片进行,进行缩放、旋转(仿射变换),然后增加随机噪声。
1.5 将数据保存npy格式,共10000组,每个数字满足“粗细、旋转角度、缩放比例、噪声分布”的多样化。数据集制作完毕。(数据量大的话,建议使用tfrecord格式)
img_arr = np.zeros((10000,28,28))
label_arr = np.zeros((10000,10))
for num in range(250): # 随机角度、缩放、噪声,200次
for i in range(10): #“0-9”共10个数
for j in range(4): #不同膨胀比
angle_ = np.random.uniform(-20,20)
scale_ = np.random.uniform(0.9,1.2)
img_c = change_(img_seg[j][i],angle_,scale_)
noise_num = np.random.randint(8,38)#噪声点个数
img_c = add_noise(img_c,noise_num)
img_arr[40*num+4*i+j] = img_c
label_arr[40*num+4*i+j][i] = 1
np.save("img.npy",np.array(img_arr))
np.save("label.npy",np.array(label_arr))
编写的函数如下:
#将图像设置为28×28,不拉伸,全0填充
def change_(img,angle_,scale_):
length = 28
h,w = img.shape
H = np.float32([[1,0,(length-w)/2],[0,1,(length-h)/2]])
img = cv2.warpAffine(img,H,(length,length))
M = cv2.getRotationMatrix2D((length/2,length/2),angle_,scale_)
return cv2.warpAffine(img,M,(length,length))
def add_noise(img,amout):
length = 28
for i in range(amout):
rand_ = int(np.random.rand()*length*length)
row = int(rand_/length)
col = int(rand_%length)
if img[row,col] == 0:
img[row,col] = 255
else:
img[row,col] = 0
return img
2 cnn训练
2.1 使用网络上的一个常见数字识别网络结构:2层卷积,2层全连接。
#encoding:utf-8
import tensorflow as tf
import numpy as np
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def pool(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def inference(x_,keep_prob):
with tf.variable_scope("layer_conv1"): #卷积
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
layer_conv1 = tf.nn.relu(conv2d(x_, W_conv1) + b_conv1)
layer_pool1 = pool(layer_conv1)
with tf.variable_scope("layer_conv2"):#卷积
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
layer_conv2 = tf.nn.relu(conv2d(layer_pool1, W_conv2) + b_conv2)
layer_pool2 = pool(layer_conv2)
with tf.variable_scope("layer_fc3"): #全连接
W_fc3 = weight_variable([7 * 7 * 64, 1024])
b_fc3 = bias_variable([1024])
reshape_pool3 = tf.reshape(layer_pool2, [-1, 7*7*64])
layer_fc3 = tf.nn.relu(tf.matmul(reshape_pool3, W_fc3) + b_fc3)
with tf.variable_scope("dropout4"):
fc_drop4 = tf.nn.dropout(layer_fc3, keep_prob)
with tf.variable_scope("layer_fc5"): #全连接
W_fc5 = weight_variable([1024, 10])
b_fc5 = bias_variable([10])
predict_ = tf.nn.softmax(tf.matmul(fc_drop4, W_fc5) + b_fc5)
return predict_
def train(x_train,y_train,x_test,y_test):
batch_size = 230 #每个批次的大小
all_size = y_train.shape[0] #train集的大小
x_ = tf.placeholder("float", shape=[None,28,28,1],name='x_input')
y_ = tf.placeholder("float", shape=[None,10],name='y_input')
keep_prob = tf.placeholder("float")
predict_ = inference(x_,keep_prob)
cross_entropy = -tf.reduce_sum(y_*tf.log(predict_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#测试准确率
correct_prediction = tf.equal(tf.argmax(predict_,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(500):
start = (i*batch_size)%(all_size-1)
if start <(all_size-batch_size):
end1 = start+batch_size
end2 = 0
else:
end1 = (all_size-1)
end2 = start+batch_size-(all_size-1)
in_x = np.concatenate((x_train[start:end1],x_train[0:end2]),axis=0)
in_y = np.concatenate((y_train[start:end1],y_train[0:end2]),axis=0)
sess.run(train_step, feed_dict={x_:in_x,y_:in_y,keep_prob:0.5})
if i%50 == 0:
print "第",i,"次迭代:"
print "test集精度:", sess.run(accuracy, feed_dict={x_:x_test,y_:y_test,keep_prob:1.0})
print "train集精度:",sess.run(accuracy, feed_dict={x_:in_x,y_:in_y,keep_prob:1.0})
saver.save(sess,"./Model/model.ckpt") #
def main():
x_input = np.load("img.npy") #读入图片
y_input = np.load("label.npy") #读入标签
x_input = x_input/255.0 #原二值化图像的像素值分别为“0”,“255”。将“255”置为“1”
x_train,y_train,x_test,y_test = x_input[0:8000,:],y_input[0:8000,:],x_input[8000:10000,:],y_input[8000:10000,:]
train(x_train,y_train,x_test,y_test)
if __name__ == "__main__":
main()
2.2 模型很快收敛,精度100%。
---------------------
作者:远上寒山
来源:CSDN
原文:https://blog.csdn.net/m0_38097087/article/details/80312994
版权声明:本文为博主原创文章,转载请附上博文链接!