参考https://blog.csdn.net/qq_39246466/article/details/123740015
package com.acts.opencv.base;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.TreeMap;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.StringUtils;
import org.opencv.core.Core;
import org.opencv.core.Core.MinMaxLocResult;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import org.opencv.photo.Photo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import com.acts.opencv.common.utils.Constants;
import com.acts.opencv.common.utils.OpenCVUtil;
import com.acts.opencv.common.web.BaseController;
@Controller
@RequestMapping(value = "cardPlus")
public class CardPlusController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(CardPlusController.class);
/**
* 答题卡识别优化
* 创建者 Songer
* 创建时间 2018年3月23日
*/
@RequestMapping(value = "answerSheet")
public void answerSheet(HttpServletResponse response, String imagefile, Integer binary_thresh,
String blue_red_thresh) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
logger.info("\n 完整答题卡识别");
String sourcePath = Constants.PATH + imagefile;
logger.info("url==============" + sourcePath);
Mat sourceMat = Highgui.imread(sourcePath, Highgui.CV_LOAD_IMAGE_COLOR);
long t1 = new Date().getTime();
String destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk0.png";
Highgui.imwrite(destPath, sourceMat);
logger.info("原答题卡图片======" + destPath);
// 初始图片灰度图
Mat sourceMat1 = Highgui.imread(sourcePath, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk1.png";
Highgui.imwrite(destPath, sourceMat1);
logger.info("生成灰度图======" + destPath);
// 先膨胀 后腐蚀算法,开运算消除细小杂点
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2 * 1 + 1, 2 * 1 + 1));
Imgproc.morphologyEx(sourceMat1, sourceMat1, Imgproc.MORPH_OPEN, element);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk2.png";
Highgui.imwrite(destPath, sourceMat1);
logger.info("生成膨胀腐蚀后的图======" + destPath);
// 切割右侧和底部标记位图片
Mat rightMark = new Mat(sourceMat1, new Rect(sourceMat1.cols() - 100, 0, 100, sourceMat1.rows()));
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk3.png";
Highgui.imwrite(destPath, rightMark);
logger.info("截取右侧定位点图======" + destPath);
// 平滑处理消除噪点毛刺等等
Imgproc.GaussianBlur(rightMark, rightMark, new Size(3, 3), 0);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk4.png";
Highgui.imwrite(destPath, rightMark);
logger.info("平滑处理后的右侧定位点图======" + destPath);
// 根据右侧定位获取水平投影,并获取纵向坐标
Mat matright = horizontalProjection(rightMark);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk5.png";
Highgui.imwrite(destPath, matright);
logger.info("右侧水平投影图======" + destPath);
// 获取y坐标点,返回的是横向条状图集合
List listy = getBlockRect(matright, 1, 0);
Mat footMark = new Mat(sourceMat1, new Rect(0, sourceMat1.rows() - 150, sourceMat1.cols(), 50));
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk6.png";
Highgui.imwrite(destPath, footMark);
logger.info("截取底部定位点图======" + destPath);
Imgproc.GaussianBlur(footMark, footMark, new Size(3, 3), 0);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk7.png";
Highgui.imwrite(destPath, footMark);
logger.info("平滑处理后的底部定位点图======" + destPath);
// 根据底部定位获取垂直投影,并获取横向坐标
Mat matbootom = verticalProjection(footMark);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk8.png";
Highgui.imwrite(destPath, matbootom);
logger.info("底部垂直投影图======" + destPath);
// 获取x坐标点,返回的是竖向的柱状图集合
List listx = getBlockRect(matbootom, 0, 0);
// 高阶处理:增加HSV颜色查找,查找红色像素点
Mat matRed = findColorbyHSV(sourceMat, 156, 180);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk9.png";
Highgui.imwrite(destPath, matRed);
logger.info("HSV找出红色像素点======" + destPath);
Mat dstNoRed = new Mat(sourceMat1.rows(), sourceMat1.cols(), sourceMat1.type());
dstNoRed = OpenCVUtil.dilation(sourceMat1);
// Imgproc.threshold(sourceMat1, dstNoRed, 190, 255, Imgproc.THRESH_BINARY);
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk10.png";
Highgui.imwrite(destPath, dstNoRed);
logger.info("原灰度图的图片======" + destPath);
Photo.inpaint(dstNoRed, matRed, dstNoRed, 1, Photo.INPAINT_NS);
// findBlackColorbyHSV(sourceMat);
// for (int i = 0;i resultMap = new TreeMap();
StringBuffer resultValue = new StringBuffer();
for (int no = 0; no < listx.size(); no++) {
Rect rectx = listx.get(no);
for (int an = 0; an < listy.size(); an++) {
Rect recty = listy.get(an);
Mat selectdst = new Mat(dstNoRed, new Range(recty.y, recty.y + recty.height), new Range(rectx.x,
rectx.x
+ rectx.width));
// 本来是在每个区域内进行二值化,后来挪至了14步,整体进行二值化,因此注释掉此处2行
// Mat selectdst = new Mat(select.rows(), select.cols(), select.type());
// Imgproc.threshold(select, selectdst, 170, 255, Imgproc.THRESH_BINARY);
// System.out.println("rectx.x, recty.y=="+rectx.x+","+recty.y+"rectx.width,recty.height=="+rectx.width+","+recty.height);
double p100 = Core.countNonZero(selectdst) * 100 / (selectdst.size().area());
String que_answer = getQA(no, an);
Integer que = Integer.valueOf(que_answer.split("_")[0]);
String answer = que_answer.split("_")[1];
// System.out.println(Core.countNonZero(selectdst) + "/" + selectdst.size().area());
System.out.println(que_answer + ": " + p100);
if (p100 >= Integer.valueOf(bluevalue)) {// 蓝色
Core.rectangle(sourceMat, new Point(rectx.x, recty.y), new Point(rectx.x + rectx.width, recty.y
+ recty.height), new Scalar(255, 0, 0), 2);
// logger.info(que_answer + ":填涂");
if (StringUtils.isNotEmpty(resultMap.get(que))) {
resultMap.put(que, resultMap.get(que) + "," + answer);
} else {
resultMap.put(que, answer);
}
} else if (p100 > Integer.valueOf(redvalue) && p100 < Integer.valueOf(bluevalue)) {// 红色
Core.rectangle(sourceMat, new Point(rectx.x, recty.y), new Point(rectx.x + rectx.width, recty.y
+ recty.height), new Scalar(0, 0, 255), 2);
// logger.info(que_answer + ":临界");
if (StringUtils.isNotEmpty(resultMap.get(que))) {
resultMap.put(que, resultMap.get(que) + ",(" + answer + ")");
} else {
resultMap.put(que, "(" + answer + ")");
}
} else {// 绿色
Core.rectangle(sourceMat, new Point(rectx.x, recty.y), new Point(rectx.x + rectx.width, recty.y
+ recty.height), new Scalar(0, 255, 0), 1);
// logger.info(que_answer + ":未涂");
}
}
}
// for (Object result : resultMap.keySet()) {
for (int i = 1; i <= 100; i++) {
// logger.info("key=" + result + " value=" + resultMap.get(result));
resultValue.append(" " + i + "=" + (StringUtils.isEmpty(resultMap.get(i)) ? "未填写" : resultMap.get(i)));
if (i % 5 == 0) {
resultValue.append("
");
}
}
destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk15.png";
Highgui.imwrite(destPath, sourceMat);
logger.info("框选填图区域,绿色为选项,蓝色为填图,红色为临界======" + destPath);
long t2 = new Date().getTime();
System.out.println(t2 - t1);
// logger.info("输出最终结果:" + resultValue.toString());
renderString(response, resultValue.toString());
}
/**
* 绘制灰度直方图用于调整识别区域阈值判断
* @Author 王嵩
* @param 输入Mat对象img
* @return Mat
* @Date 2018年3月28日
* 更新日志
* 2018年3月28日 王嵩 首次创建
*
*/
public Mat getGrayHistogram(Mat img) {
List images = new ArrayList();
images.add(img);
MatOfInt channels = new MatOfInt(0); // 图像通道数,0表示只有一个通道
MatOfInt histSize = new MatOfInt(256); // CV_8U类型的图片范围是0~255,共有256个灰度级
Mat histogramOfGray = new Mat(); // 输出直方图结果,共有256行,行数的相当于对应灰度值,每一行的值相当于该灰度值所占比例
MatOfFloat histRange = new MatOfFloat(0, 255);
Imgproc.calcHist(images, channels, new Mat(), histogramOfGray, histSize, histRange, false); // 计算直方图
MinMaxLocResult minmaxLoc = Core.minMaxLoc(histogramOfGray);
// 按行归一化
// Core.normalize(histogramOfGray, histogramOfGray, 0, histogramOfGray.rows(), Core.NORM_MINMAX, -1, new Mat());
// 创建画布
int histImgRows = 600;
int histImgCols = 1300;
System.out.println("---------" + histSize.get(0, 0)[0]);
int colStep = (int) Math.floor(histImgCols / histSize.get(0, 0)[0]);// 舍去小数,不能四舍五入,有可能列宽不够
Mat histImg = new Mat(histImgRows, histImgCols, CvType.CV_8UC3, new Scalar(255, 255, 255)); // 重新建一张图片,绘制直方图
int max = (int) minmaxLoc.maxVal;
System.out.println("--------" + max);
double bin_u = (double) (histImgRows - 20) / max; // max: 最高条的像素个数,则 bin_u 为单个像素的高度,因为画直方图的时候上移了20像素,要减去
int kedu = 0;
for (int i = 1; kedu <= minmaxLoc.maxVal; i++) {
kedu = i * max / 10;
// 在图像中显示文本字符串
Core.putText(histImg, kedu + "", new Point(0, histImgRows - kedu * bin_u), 1, 1, new Scalar(0, 0, 0));
}
for (int i = 0; i < histSize.get(0, 0)[0]; i++) { // 画出每一个灰度级分量的比例,注意OpenCV将Mat最左上角的点作为坐标原点
// System.out.println(i + ":=====" + histogramOfGray.get(i, 0)[0]);
Core.rectangle(histImg, new Point(colStep * i, histImgRows - 20), new Point(colStep * (i + 1), histImgRows
- bin_u * Math.round(histogramOfGray.get(i, 0)[0]) - 20),
new Scalar(0, 0, 0), 1, 8, 0);
kedu = i * 10;
// 每隔10画一下刻度
Core.rectangle(histImg, new Point(colStep * kedu, histImgRows - 20), new Point(colStep * (kedu + 1),
histImgRows - 20), new Scalar(255, 0, 0), 2, 8, 0);
Core.putText(histImg, kedu + "", new Point(colStep * kedu, histImgRows - 5), 1, 1, new Scalar(255, 0, 0)); // 附上x轴刻度
}
return histImg;
}
// 获取题号及选项填涂情况
public String getQA(int no,int an){
//返回1A、1B、1C...2A类似这样的返回值
int first = no + 1 + an / 4 * 20;
String second = "";
if (an % 4 == 0) {
second = "A";
} else if (an % 4 == 1) {
second = "B";
} else if (an % 4 == 2) {
second = "C";
} else if (an % 4 == 3) {
second = "D";
}
return first + "_" + second;
}
public static void main(String[] args) {
System.out.println(5 / 3);
}
/**
* 红色色系0-20,160-180
* 蓝色色系100-120
* 绿色色系60-80
* 黄色色系23-38
* 识别出的颜色会标记为白色,其他的为黑色
* @param min
* @param max
*/
public static Mat findColorbyHSV(Mat source, int min, int max) {
Mat hsv_image = new Mat();
Imgproc.GaussianBlur(source, source, new Size(3, 3), 0, 0);
Imgproc.cvtColor(source, hsv_image, Imgproc.COLOR_BGR2HSV);
// String imagenameb = "D:\\test\\testImge\\ttbefore.jpg";
// Highgui.imwrite(imagenameb, hsv_image);
Mat thresholded = new Mat();
Core.inRange(hsv_image, new Scalar(min, 90, 90), new Scalar(max, 255, 255), thresholded);
return thresholded;
}
/**
* 查找黑色
* @param source
* @param min
* @param max
* @return
*/
public static Mat findBlackColorbyHSV(Mat source) {
Mat hsv_image = new Mat();
Imgproc.GaussianBlur(source, source, new Size(3, 3), 0, 0);
Imgproc.cvtColor(source, hsv_image, Imgproc.COLOR_BGR2HSV);
String imagenameb = "D:\\test\\testImge\\ttbefore.jpg";
Highgui.imwrite(imagenameb, hsv_image);
Mat thresholded = new Mat();
Core.inRange(hsv_image, new Scalar(0, 0, 0), new Scalar(180, 255, 46), thresholded);
String ttblack = "D:\\test\\testImge\\ttblack.jpg";
Highgui.imwrite(ttblack, thresholded);
return thresholded;
}
/**
* 水平投影
* @param source 传入灰度图片Mat
* @return
*/
public static Mat horizontalProjection(Mat source) {
Mat dst = new Mat(source.rows(), source.cols(), source.type());
// 先进行反转二值化
Imgproc.threshold(source, dst, 150, 255, Imgproc.THRESH_BINARY_INV);
// 水平积分投影
// 每一行的白色像素的个数
int[] rowswidth = new int[dst.rows()];
for (int i = 0; i < dst.rows(); i++) {
for (int j = 0; j < dst.cols(); j++) {
if (dst.get(i, j)[0] == 255) {
rowswidth[i]++;
}
}
}
// 定义一个白色跟原图一样大小的画布
Mat matResult = new Mat(dst.rows(), dst.cols(), CvType.CV_8UC1, new Scalar(255, 255, 255));
// 将每一行按照行像素值大小填充像素宽度
for (int i = 0; i < matResult.rows(); i++) {
for (int j = 0; j < rowswidth[i]; j++) {
matResult.put(i, j, 0);
}
}
return matResult;
}
/**
* 垂直投影
* @param source 传入灰度图片Mat
* @return
*/
public static Mat verticalProjection(Mat source) {
// 先进行反转二值化
Mat dst = new Mat(source.rows(), source.cols(), source.type());
Imgproc.threshold(source, dst, 150, 255, Imgproc.THRESH_BINARY_INV);
// 垂直积分投影
// 每一列的白色像素的个数
int[] colswidth = new int[dst.cols()];
for (int j = 0; j < dst.cols(); j++) {
for (int i = 0; i < dst.rows(); i++) {
if (dst.get(i, j)[0] == 255) {
colswidth[j]++;
}
}
}
Mat matResult = new Mat(dst.rows(), dst.cols(), CvType.CV_8UC1, new Scalar(255, 255, 255));
// 将每一列按照列像素值大小填充像素宽度
for (int j = 0; j < matResult.cols(); j++) {
for (int i = 0; i < colswidth[j]; i++) {
matResult.put(matResult.rows() - 1 - i, j, 0);
}
}
return matResult;
}
/**
* 图片切块
* @param srcMat 传入水平或垂直投影的图片对象Mat
* @param proType 传入投影Mat对象的 投影方式0:垂直投影图片,竖向切割;1:水平投影图片,横向切割
* @param rowY 由于传来的是可能是原始图片的部分切片,要计算切块的实际坐标位置需要给出切片时所在的坐标,所以需要传递横向切片的y坐标或者纵向切片的横坐标
* 如当proType==0时,传入的是切片的垂直投影,那么切成块后能得出x坐标及块宽高度,但是实际y坐标需要加上原切片的y坐标值,所以rowXY为切片的y坐标点,
* 同理当proType==1时,rowXY应该为x坐标
* @return
*/
public static List getBlockRect(Mat srcImg, Integer proType, int rowXY) {
Imgproc.threshold(srcImg, srcImg, 150, 255, Imgproc.THRESH_BINARY_INV);
// 注意 countNonZero 方法是获取非0像素(白色像素)数量,所以一般要对图像进行二值化反转
List rectList = new ArrayList();
int size = proType == 0 ? srcImg.cols() : srcImg.rows();
int[] pixNum = new int[size];
if (proType == 0) {
for (int i = 0; i < srcImg.cols(); i++) {
Mat col = srcImg.col(i);
pixNum[i] = Core.countNonZero(col) > 1 ? Core.countNonZero(col) : 0;
}
} else {// 水平投影只关注行
for (int i = 0; i < srcImg.rows(); i++) {
Mat row = srcImg.row(i);
pixNum[i] = Core.countNonZero(row) > 1 ? Core.countNonZero(row) : 0;
}
}
int startIndex = 0;// 记录进入字符区的索引
int endIndex = 0;// 记录进入空白区域的索引
boolean inBlock = false;// 是否遍历到了字符区内
for (int i = 0; i < size; i++) {
if (!inBlock && pixNum[i] != 0) {// 进入字符区,上升跳变沿
inBlock = true;
startIndex = i;
} else if (pixNum[i] == 0 && inBlock) {// 进入空白区,下降跳变沿存储
endIndex = i;
inBlock = false;
Rect rect = null;
if (proType == 0) {
rect = new Rect(startIndex, rowXY, (endIndex - startIndex), srcImg.rows());
} else {
rect = new Rect(rowXY, startIndex, srcImg.cols(), (endIndex - startIndex));
}
rectList.add(rect);
}
}
return rectList;
}
}
package com.acts.opencv.base;
import java.io.File;
import java.util.Vector;
import javax.servlet.http.HttpServletResponse;
import net.sourceforge.tess4j.Tesseract;
import net.sourceforge.tess4j.TesseractException;
import org.opencv.core.Core;
import org.opencv.core.Core.MinMaxLocResult;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import com.acts.opencv.common.utils.Constants;
import com.acts.opencv.common.utils.OpenCVUtil;
import com.acts.opencv.common.web.BaseController;
@Controller
@RequestMapping(value = "page")
public class PageController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(PageController.class);
/**
* 答题卡识别优化
* 创建者 Songer
* 创建时间 2018年3月23日
*/
@RequestMapping(value = "pageOCR")
public void pageOCR(HttpServletResponse response, String imagefile, Integer ocrType) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
logger.info("\n 页码识别");
String sourcePath = Constants.PATH + imagefile;
logger.info("url==============" + sourcePath);
// Mat sourceMat = Highgui.imread(sourcePath, Highgui.CV_LOAD_IMAGE_COLOR);
// String destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "dtk0.png";
// Highgui.imwrite(destPath, sourceMat);
String result = "";
if (ocrType == 1) {// tesseract ocr 识别方式
result = getPageNoByTesseract(sourcePath);
} else if (ocrType == 2) {// 轮廓识别
result = getPageNoByContours(sourcePath);
} else {
result = getPageNoByTemplate(sourcePath);
}
renderString(response, result);
}
// public static void main(String[] args) {
// long t1 = new Date().getTime();
// try {
// File imageFile = new File("D:\\test\\testImge\\t3.bmp");
// Tesseract instance = new Tesseract(); // JNA Interface Mapping
// instance.setLanguage("chi_sim");
// String result = instance.doOCR(imageFile);
// System.out.println("result=====" + result);
// long t2 = new Date().getTime();
// System.out.println((t2 - t1));
// } catch (TesseractException e) {
// e.printStackTrace();
// }
// }
/**
* 使用tesseract方式识别页码,注意tessdata放到tomcat的bin目录下
* @Author 王嵩
* @param filePath
* @return String
* @Date 2018年4月4日
* 更新日志
* 2018年4月4日 王嵩 首次创建
*
*/
public String getPageNoByTesseract(String filePath) {
String result = "";
try {
File file = new File(filePath);
Tesseract instance = new Tesseract(); // JNA Interface Mapping
instance.setLanguage("chi_sim");//
result = instance.doOCR(file);
logger.info("result====={}", result);
} catch (TesseractException e) {
e.printStackTrace();
}
return result;
}
/**
* 使用轮廓识别页码
* @Author 王嵩
* @param filePath
* @return String
* @Date 2018年4月8日
* 更新日志
* 2018年4月8日 王嵩 首次创建
*
*/
public String getPageNoByContours(String filePath) {
Mat source = Highgui.imread(filePath, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
System.out.println("row" + source.rows() + " col " + source.cols());
// 二值化反转
Mat bininv = new Mat(source.rows(), source.cols(), source.type());
Imgproc.threshold(source, bininv, 170, 255, Imgproc.THRESH_BINARY_INV);
// 腐蚀膨胀,用于消除噪点和干扰项
Mat destination = OpenCVUtil.eroding(bininv);
destination = OpenCVUtil.dilation(destination);
// 轮廓识别
Vector contours = OpenCVUtil.findContours(destination);
int pageSize = 0;
// 原颜色图片加载,用于画出识别轮廓,实际开发不需要
Mat image = Highgui.imread(filePath, Highgui.CV_LOAD_IMAGE_COLOR);
for (int i = 0; i < contours.size(); i++) {
Mat result = new Mat(destination.size(), CvType.CV_8UC3, new Scalar(255, 255, 255));
Imgproc.drawContours(result, contours, i, new Scalar(0, 0, 255), 1);
MatOfPoint mop = contours.get(i);
// 获取轮廓面积
double contArea = Math.abs(Imgproc.contourArea(mop, false));
Rect r = Imgproc.boundingRect(mop);
System.out.println("轮廓面积:" + contArea);
if (contArea > 1200) {// 此处是根据轮廓面积
// 红线画出识别的轮廓
Core.rectangle(image, new Point(r.x, r.y), new Point(r.x + r.width, r.y
+ r.height), new Scalar(0, 0, 255), 2);
pageSize++;
}
}
String destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "page0.png";
Highgui.imwrite(destPath, image);
System.out.println("页码为:" + pageSize);
return pageSize + "";
}
/**
* 使用模板匹配识别页码
* @Author 王嵩
* @param filePath
* @return String
* @Date 2018年4月8日
* 更新日志
* 2018年4月8日 王嵩 首次创建
*/
public String getPageNoByTemplate(String filePath) {
String pageSize = "";
Mat source = Highgui.imread(filePath, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
// 二值化反转
Mat bininv = new Mat(source.rows(), source.cols(), source.type());
Imgproc.threshold(source, bininv, 170, 255, Imgproc.THRESH_BINARY_INV);
Vector contours1 = OpenCVUtil.findContours(bininv);
MatOfPoint mop = contours1.get(0);
Rect rect = Imgproc.boundingRect(mop);
Mat matchtemp = source.submat(rect.y, rect.y + rect.height, rect.x, rect.x + rect.width);
String page_temp = Constants.PATH + Constants.DEST_IMAGE_PATH + "page_temp.png";
Highgui.imwrite(page_temp, matchtemp);
String pagePath = Constants.PATH + Constants.SOURCE_IMAGE_PATH + "shuzi.png";
Mat pageimage = Highgui.imread(pagePath, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
int result_cols = pageimage.cols() - matchtemp.cols() + 1;
int result_rows = pageimage.rows() - matchtemp.rows() + 1;
Mat destination = new Mat(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(pageimage, matchtemp, destination, Imgproc.TM_CCOEFF);
// 矩阵归一化处理
Core.normalize(destination, destination, 0, 255, Core.NORM_MINMAX, -1, new Mat());
MinMaxLocResult minmaxLoc = Core.minMaxLoc(destination);
Point matchLoc = minmaxLoc.maxLoc;
Core.rectangle(pageimage, matchLoc, new Point(matchLoc.x + matchtemp.cols(), matchLoc.y + matchtemp.rows()),
new Scalar(0), 2);
System.out.println(matchLoc.x + " " + matchLoc.y);
pageSize = getPage(matchLoc.x) + "";
String destPath = Constants.PATH + Constants.DEST_IMAGE_PATH + "page1.png";
Highgui.imwrite(destPath, pageimage);
return pageSize;
}
/**
* 根据横坐标返回页码
* @Author 王嵩
* @param x
* @return int
* @Date 2018年4月8日
* 更新日志
* 2018年4月8日 王嵩 首次创建
*
*/
public int getPage(double x) {
// 减去2像素,是因为shuzi.png外边框是预留了2像素的,因此匹配结果坐标为:2,2;72,2;142,2
// Math.floor 返回不大于的最大整数
return (int) Math.floor((x - 2) / 70) + 1;
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat markmat1 = Highgui.imread("D:\\test\\abc\\1.png", Highgui.CV_LOAD_IMAGE_GRAYSCALE);
Mat markmat2 = Highgui.imread("D:\\test\\abc\\2.png", Highgui.CV_LOAD_IMAGE_GRAYSCALE);
Vector contours1 = OpenCVUtil.findContours(markmat1);
Vector contours2 = OpenCVUtil.findContours(markmat2);
Mat mat1 = getSimMark("D:\\test\\abc\\1.png");
Mat mat2 = getSimMark("D:\\test\\abc\\2.png");
double result1 = Imgproc.matchShapes(contours1.get(0), contours2.get(0), Imgproc.CV_CONTOURS_MATCH_I1, 0);
double result2 = Imgproc.matchShapes(mat1, mat2, Imgproc.CV_CONTOURS_MATCH_I1, 0);
System.out.println(result1);
System.out.println(result2);
}
private static MatOfPoint getSimMark(String path) {
Mat markmat = Highgui.imread(path, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
// Imgproc.threshold(markmat, markmat, 190, 255, Imgproc.THRESH_BINARY_INV);
Vector contours = new Vector();
Mat rsmat = new Mat();
Imgproc.findContours(markmat, contours, rsmat, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE, new Point());
MatOfPoint markMop = new MatOfPoint();
Mat result = new Mat(markmat.size(), CvType.CV_8U, new Scalar(255));
Imgproc.drawContours(result, contours, 0, new Scalar(0), 1);
String image1 = "d:\\test\\abc\\t.jpg";
Highgui.imwrite(image1, result);
for (int i = 0; i < contours.size(); i++) {
markMop = contours.get(0);
// MatOfPoint2f mat2f = new MatOfPoint2f();
// MatOfPoint2f dstmat2f = new MatOfPoint2f();
// markMop.convertTo(mat2f, CvType.CV_32FC1);
// // 多边形逼近算法,减少轮廓的顶点,便于对比
// Imgproc.approxPolyDP(mat2f, dstmat2f, markMop.total() * 0.02, true);
// dstmat2f.convertTo(markMop, CvType.CV_32S);
}
return markMop;
}
}
静态资源类
package com.acts.opencv.common.utils;
import org.springframework.web.context.ContextLoader;
/**
* 常量 创建者 Songer 创建时间 2018年3月09日
*
*/
public class Constants {
public static final String CURRENT_USER = "UserInfo";
public static final String WECHAT_USER = "weChatUserInfo";
public static final String REFERENCE_CODE = "referenceCode";
public static final String SUCCESS = "success";
public static final String ERROR = "error";
public static final String SF_FILE_SEPARATOR = System.getProperty("file.separator");// 文件分隔符
public static final String SF_LINE_SEPARATOR = System.getProperty("line.separator");// 行分隔符
public static final String SF_PATH_SEPARATOR = System.getProperty("path.separator");// 路径分隔符
public static final String PATH = ContextLoader.getCurrentWebApplicationContext().getServletContext().getRealPath("/");
/**
* 文件
*/
public static final String SOURCE_IMAGE_PATH = Constants.SF_FILE_SEPARATOR + "statics"
+ Constants.SF_FILE_SEPARATOR + "sourceimage" + Constants.SF_FILE_SEPARATOR;// 图片原地址
public static final String DEST_IMAGE_PATH = Constants.SF_FILE_SEPARATOR + "statics" + Constants.SF_FILE_SEPARATOR
+ "destimage" + Constants.SF_FILE_SEPARATOR;// 图片生成地址
/**
* 返回参数规范
*/
/** 区分类型 1 -- 无错误,Code重复 */
public static final String CODE_DUPLICATE = "1";
/** 区分类型 2 -- 无错误,名称重复 */
public static final String NAME_DUPLICATE = "2";
/** 区分类型 3 -- 数量超出 */
public static final String NUMBER_OVER = "3";
/** 区分类型 0 -- 无错误,程序正常执行 */
public static final String NO_ERROR = "0";
/** 区分类型 -1 -- 无错误,返回结果为空 */
public static final String NULL_POINTER = "-1";
/** 区分类型 -2 -- 错误,参数不正确 */
public static final String INCORRECT_PARAMETER = "-2";
/** 区分类型 -3 -- 错误,程序执行错误 */
public static final String PROGRAM_EXECUTION_ERROR = "-3";
/** 区分类型 -5 -- 错误,数据已删除 */
public static final String DATA_DELETED = "-5";
/** 区分类型 -6 -- 错误,参数不一致(验证码) */
public static final String DATA_NOT_SAME = "-6";
/**json文件缺失 */
public static final String NO_JSON_FILE = "-7";
/**
* 分页中可能用到的常量
*/
public static final Integer PAGE_SIZE=10;//一页共有十条内容
}
opencv工具类
package com.acts.opencv.common.utils;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.io.IOException;
import java.util.Date;
import java.util.Vector;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
public class OpenCVUtil {
public static BufferedImage covertMat2Buffer(Mat mat) throws IOException {
long time1 = new Date().getTime();
// Mat 转byte数组
BufferedImage originalB = toBufferedImage(mat);
long time3 = new Date().getTime();
System.out.println("保存读取方法2转=" + (time3 - time1));
return originalB;
// ImageIO.write(originalB, "jpg", new File("D:\\test\\testImge\\ws2.jpg"));
}
public static byte[] covertMat2Byte(Mat mat) throws IOException {
long time1 = new Date().getTime();
// Mat 转byte数组
byte[] return_buff = new byte[(int) (mat.total() * mat.channels())];
Mat mat1 = new Mat();
mat1.get(0, 0, return_buff);
long time3 = new Date().getTime();
System.out.println(mat.total() * mat.channels());
System.out.println("保存读取方法2转=" + (time3 - time1));
return return_buff;
}
public static byte[] covertMat2Byte1(Mat mat) throws IOException {
long time1 = new Date().getTime();
MatOfByte mob = new MatOfByte();
Highgui.imencode(".jpg", mat, mob);
long time3 = new Date().getTime();
// System.out.println(mat.total() * mat.channels());
System.out.println("Mat转byte[] 耗时=" + (time3 - time1));
return mob.toArray();
}
public static BufferedImage toBufferedImage(Mat m) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if (m.channels() > 1) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = m.channels() * m.cols() * m.rows();
byte[] b = new byte[bufferSize];
m.get(0, 0, b); // get all the pixels
BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(b, 0, targetPixels, 0, b.length);
return image;
}
/**
* 腐蚀膨胀是针对于白色区域来说的,腐蚀即腐蚀白色区域
* 腐蚀算法(黑色区域变大)
* @param source
* @return
*/
public static Mat eroding(Mat source) {
return eroding(source, 1);
}
public static Mat eroding(Mat source, double erosion_size) {
Mat resultMat = new Mat(source.rows(), source.cols(), source.type());
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2 * erosion_size + 1,
2 * erosion_size + 1));
Imgproc.erode(source, resultMat, element);
return resultMat;
}
/**
* 腐蚀膨胀是针对于白色区域来说的,膨胀是膨胀白色区域
* 膨胀算法(白色区域变大)
* @param source
* @return
*/
public static Mat dilation(Mat source) {
return dilation(source, 1);
}
/**
* 腐蚀膨胀是针对于白色区域来说的,膨胀是膨胀白色区域
* @Author 王嵩
* @param source
* @param dilationSize 膨胀因子2*x+1 里的x
* @return Mat
* @Date 2018年2月5日
* 更新日志
* 2018年2月5日 王嵩 首次创建
*
*/
public static Mat dilation(Mat source, double dilation_size) {
Mat resultMat = new Mat(source.rows(), source.cols(), source.type());
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2 * dilation_size + 1,
2 * dilation_size + 1));
Imgproc.dilate(source, resultMat, element);
return resultMat;
}
/**
* 轮廓识别,使用最外轮廓发抽取轮廓RETR_EXTERNAL,轮廓识别方法为CHAIN_APPROX_SIMPLE
* @param source 传入进来的图片Mat对象
* @return 返回轮廓结果集
*/
public static Vector findContours(Mat source) {
Mat rs = new Mat();
/**
* 定义轮廓抽取模式
*RETR_EXTERNAL:只检索最外面的轮廓;
*RETR_LIST:检索所有的轮廓,并将其放入list中;
*RETR_CCOMP:检索所有的轮廓,并将他们组织为两层:顶层是各部分的外部边界,第二层是空洞的边界;
*RETR_TREE:检索所有的轮廓,并重构嵌套轮廓的整个层次。
*/
int mode = Imgproc.RETR_EXTERNAL;
// int mode = Imgproc.RETR_TREE;
/**
* 定义轮廓识别方法
* 边缘近似方法(除了RETR_RUNS使用内置的近似,其他模式均使用此设定的近似算法)。可取值如下:
*CV_CHAIN_CODE:以Freeman链码的方式输出轮廓,所有其他方法输出多边形(顶点的序列)。
*CHAIN_APPROX_NONE:将所有的连码点,转换成点。
*CHAIN_APPROX_SIMPLE:压缩水平的、垂直的和斜的部分,也就是,函数只保留他们的终点部分。
*CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS:使用the flavors of Teh-Chin chain近似算法的一种。
*LINK_RUNS:通过连接水平段的1,使用完全不同的边缘提取算法。使用CV_RETR_LIST检索模式能使用此方法。
*/
int method = Imgproc.CHAIN_APPROX_SIMPLE;
Vector contours = new Vector();
Imgproc.findContours(source, contours, rs, mode, method, new Point());
return contours;
}
}