前面有篇文章提到关于验证码的生成:https://blog.csdn.net/huoyingchong64/article/details/89788676
本文主要是针对这个验证码进行的机器学习算法的验证码处理。
主要包含灰值化,二值化,降噪,分割字符。下面就是经过测试后的代码。其中
rate = 0.01 # 在threshold的适当范围内进行处理,由于rate设置为0.1时,有一个字母灰度太接近这个阈值,导致映射为0
这个rate比例值还需要测试。当然比例直接设置为0也是可以的,也就是判断i==threshold
#coding:utf-8
import numpy as np
from PIL import Image,ImageDraw,ImageFile
import cv2
import imagehash
import time
import os
import pytesseract
from collections import defaultdict
rootdir = “D:/CapCha/Test”
# tesseract.exe所在的文件路径
pytesseract.pytesseract.tesseract_cmd = ‘C://Program Files (x86)/Tesseract-OCR/tesseract.exe’
def ReadFileList():
#尝试直接识别
list = os.listdir(rootdir)
for i in range(0,len(list))[0:10]:
path = os.path.join(rootdir,list[i])
print(path)
if os.path.isfile(path):
print(os.path.basename(path))
image = Image.open(path)
code = pytesseract.image_to_string(image)
print(code)
def splitimage(rownum, colnum, dstpath,img_name,outpath):
'''
图片分割
:param rownum: 切割行数
:param colnum: 切割列数
:param dstpath: 图片文件路径
:param img_name: 要切割的图片文件
:param outpath: 输出文件路径
:return:
'''
img = Image.open(img_name)
w, h = img.size
if rownum <= h and colnum <= w:
print('Original image info: %sx%s, %s, %s' % (w, h, img.format, img.mode))
print('开始处理图片切割, 请稍候...')
s = os.path.split(img_name)
if dstpath == '':
dstpath = s[0]
fn = s[1].split('.')
basename = fn[0]
ext = fn[-1]
num = 1
rowheight = h // rownum
colwidth = w // colnum
file_list = []
for r in range(rownum):
index = 0
for c in range(colnum):
# (left, upper, right, lower)
# box = (c * colwidth, r * rowheight, (c + 1) * colwidth, (r + 1) * rowheight)
if index < 1:
colwid = colwidth + 6
elif index < 2:
colwid = colwidth + 1
elif index < 3:
colwid = colwidth
box = (c * colwid, r * rowheight, (c + 1) * colwid, (r + 1) * rowheight)
newfile = os.path.join(dstpath, basename[num - 1] + '_' + basename + "_" + str(num - 1) + '.' + ext)
file_list.append(newfile)
img.crop(box).save(
os.path.join(outpath, basename[num - 1] + '_' + basename + "_" + str(num - 1) + '.' + ext), ext)
num = num + 1
index += 1
for f in file_list:
print(f)
print('图片切割完毕,共生成 %s 张小图片。' % (num - 1))
def get_threshold(image):
# 获取图片中像素点数量最多的像素
pixel_dict = defaultdict(int)
# 像素及该像素出现次数的字典
rows, cols = image.size
for i in range(rows):
for j in range(cols):
pixel = image.getpixel((i, j))
pixel_dict[pixel] += 1
count_max = max(pixel_dict.values()) # 获取像素出现出多的次数
pixel_dict_reverse = {v:k for k,v in pixel_dict.items()}
threshold = pixel_dict_reverse[count_max] # 获取出现次数最多的像素点
return threshold
def get_bin_table(threshold):
# 按照阈值进行二值化处理
# threshold: 像素阈值
# 获取灰度转二值的映射table
table = []
for i in range(256):
rate = 0.01 # 在threshold的适当范围内进行处理,由于rate设置为0.1时,有一个字母灰度太接近这个阈值,导致映射为0
if threshold*(1-rate)<= i <= threshold*(1+rate):
table.append(1)
else:
table.append(0)
return table
def cut_noise(image):
# 去掉二值化处理后的图片中的噪声点
rows, cols = image.size # 图片的宽度和高度
change_pos = [] # 记录噪声点位置
# 遍历图片中的每个点,除掉边缘
for i in range(1, rows-1):
for j in range(1, cols-1):
# pixel_set用来记录该店附近的黑色像素的数量
pixel_set = []
# 取该点的邻域为以该点为中心的九宫格
for m in range(i-1, i+2):
for n in range(j-1, j+2):
if image.getpixel((m, n)) != 1: # 1为白色,0位黑色
pixel_set.append(image.getpixel((m, n)))
# 如果该位置的九宫内的黑色数量小于等于4,则判断为噪声
if len(pixel_set) <= 4:
change_pos.append((i,j))
# 对相应位置进行像素修改,将噪声处的像素置为1(白色)
for pos in change_pos:
image.putpixel(pos, 1)
return image # 返回修改后的图片
def OCR_lmj(img_path):
# 识别图片中的数字加字母
# 传入参数为图片路径,返回结果为:识别结果
image = Image.open(img_path) # 打开图片文件
imgry = image.convert('L') # 转化为灰度图
# 获取图片中的出现次数最多的像素,即为该图片的背景
max_pixel = get_threshold(imgry)
# 将图片进行二值化处理
table = get_bin_table(threshold=max_pixel)
out = imgry.point(table, '1')
# 去掉图片中的噪声(孤立点)
out = cut_noise(out)
#保存图片
# out.save('E://figures/img_gray.jpg')
# 仅识别图片中的数字
#text = pytesseract.image_to_string(out, config='digits')
# 识别图片中的数字和字母
text = pytesseract.image_to_string(out)
# 去掉识别结果中的特殊字符
exclude_char_list = ' .:\\|\'\"?![],()~@#$%^&*_+-={};<>/¥'
text = ''.join([x for x in text if x not in exclude_char_list])
return text
def main():
# pytesseract识别指定文件目录下的图片
# 图片存放目录figures
dir = 'E://figures'
correct_count = 0 # 图片总数
total_count = 0 # 识别正确的图片数量
# 遍历figures下的png,jpg文件
for file in os.listdir(dir):
if file.endswith('.png') or file.endswith('.jpg'):
# print(file)
image_path = '%s/%s'%(dir,file) # 图片路径
answer = file.split('.')[0] # 图片名称,即图片中的正确文字
recognizition = OCR_lmj(image_path) # 图片识别的文字结果
print((answer, recognizition))
if recognizition == answer: # 如果识别结果正确,则total_count加1
correct_count += 1
total_count += 1
print('Total count: %d, correct: %d.'%(total_count, correct_count))
if __name__ == '__main__':
rownum = 1
colnum = 5
dstpath = "D:/CapCha/Test/"
img_name = "0b1F1.png"
outpath = "D:/CapCha/outdir"
grypath = "D:/CapCha/grydir/"
fileName = dstpath+img_name
image = Image.open(fileName) # 打开图片文件
imgry = image.convert('L') # 转化为灰度图L
imgry.save(outpath + img_name)
# 获取图片中的出现次数最多的像素,即为该图片的背景
max_pixel = get_threshold(imgry)
# 将图片进行二值化处理
table = get_bin_table(threshold=max_pixel)
out = imgry.point(table, '1')
# 去掉图片中的噪声(孤立点)
out = cut_noise(out)
out.save(grypath+img_name)
splitimage(rownum, colnum, dstpath, grypath+img_name, outpath)