Android studio3.6.1+ opencv3.4.1进行摄像头颜色识别的调试过程
本文过程建立在成功导入opencv3.4.1library并且成功运行打开摄像头的基础上的。在完成了验证并且使用opencv打开摄像头程序之后,剩下的就是java文件中代码的具体编写了,本人刚刚上手android学习,现在将一些简单的理解与在调试中遇到的困难记录如下,如有不足和错误之处欢迎大家留言交流。
一.一些调试长错误
1.在.xml文件中添加注释,删除注释快捷键
添加注释:选中你要注释的代码->ctrl+shift+/
去掉注释:选中你要去掉注释的代码->ctrl+shift+
2.activity名字的修改,以Main2Activity为例,
如果要修改名字,1.文件名要改2.manifest中注册的名字要改3.Main2Activity中对应的类名也要改–>
3.在进一步 activity_main.xml的名字不必与MainActivity对应,但是一般对应最好,方便查阅,但是调用是在MainActivity中声明的
4.在AndroidManifest.xml里面可以设置横竖屏的方式
5.一个activity对应一个xml,但是在这个activity中可以调用别的activity
二.在进行颜色识别时的过程
1.在官方的示例代码color-blob-detection中的两个java文件复制粘贴在你的相应位置中,其中我们可以将ColorBlobDetectionActivity.java改名成自己的MainActivity.java,但要注意在上方提到的几个位置进行修改,并于显示界面的文件xml对应上,否则就会报错。ColorBlobDetection.java个人理解就相当于是写好的一个类,我们调用其中的一些功能。
2.,官方的代码呢,是打开APP之后,先选中一个色块,然后去找到对应颜色的轮廓,并默认用红色的细线进行框出。
而我想实现的功能是:预先设置好颜色的阈值范围,然后识别出自己颜色的轮廓,并使用在其中心位置画出小矩形,而不是官网的一些opencv的教程提到的画出外接矩形,下面将自己的代码贴在下面,上面有自己在学习的工程中记录的一些注释,可能有些地方不太准确,欢迎交流
MainActivity.java
在这里插入代码片
package com.example.testopencvad;
import java.util.List;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgproc.Moments;
import android.app.Activity;
import android.graphics.drawable.shapes.OvalShape;
import android.os.Bundle;
import android.util.Log;
import android.view.MotionEvent;
import android.view.View;
import android.view.Window;
import android.view.WindowManager;
import android.view.View.OnTouchListener;
import android.view.SurfaceView;
import static org.opencv.imgproc.Imgproc.CONTOURS_MATCH_I1;
import static org.opencv.imgproc.Imgproc.boundingRect;
public class MainActivity extends Activity implements OnTouchListener, CvCameraViewListener2 {
private static final String TAG = "colordetect";
private boolean mIsColorSelected = false;
private Mat mRgba;//两个mat对象
private Scalar mBlobColorRgba;
private Scalar mBlobColorHsv;
private ColorBlobDetector mDetector;
private Mat mSpectrum;
private Size SPECTRUM_SIZE;
private Scalar CONTOUR_COLOR;
private CameraBridgeViewBase mOpenCvCameraView;//opencv中的抽象类,类似于Android中的SurfaceView该类实现了两个接口:
// CvCameraViewListener CvCameraViewListener2这两个接口提供了回调处理,,开始和停止摄像机,,处理每一帧的拍摄
//要实现CameraBridgeViewBase有两个view,JavaCameraView NativeCameraView:是C++类,相较而言可以得到较高的帧速率
//为了画矩形框
private Mat mMat;
private int x;
private int y;
private int w;
private int h;
private Point mpoint;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnTouchListener(MainActivity.this);
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public MainActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.activity_main);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.cjv);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
@Override
public void onResume()
{
super.onResume();
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_4_0, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mDetector = new ColorBlobDetector();
mSpectrum = new Mat();
mBlobColorRgba = new Scalar(255);
mBlobColorHsv = new Scalar(255);//从rgbt通道转化为hsv
SPECTRUM_SIZE = new Size(200, 64);
CONTOUR_COLOR = new Scalar(255,0,0,255);//红色 一开始的默认轮廓颜色
}
public void onCameraViewStopped() {
mRgba.release();
}
public boolean onTouch(View v, MotionEvent event) {
int cols = mRgba.cols();//宽
int rows = mRgba.rows();//高
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2;
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2;
int x = (int)event.getX() - xOffset;
int y = (int)event.getY() - yOffset;//这段计算是什么意思,mRgba,mOpenCvCameraView这两个对象有什么不同?
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
Rect touchedRect = new Rect();//触摸到的矩形框
touchedRect.x = (x>4) ? x-4 : 0;
touchedRect.y = (y>4) ? y-4 : 0;
touchedRect.width = (x+4 < cols) ? x + 4 - touchedRect.x : cols - touchedRect.x;
touchedRect.height = (y+4 < rows) ? y + 4 - touchedRect.y : rows - touchedRect.y;
Mat touchedRegionRgba = mRgba.submat(touchedRect);//分割提取触摸的矩形块图形对象,然后进行hsv通道转换,下步会进行提取
Mat touchedRegionHsv = new Mat();
Imgproc.cvtColor(touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);//转换函数?
// Calculate average color of touched region,计算所选矩形块中所有像素点的平均hsv
mBlobColorHsv = Core.sumElems(touchedRegionHsv);//核心块总hsv,以列表格式存储
int pointCount = touchedRect.width*touchedRect.height;//总像素数
for (int i = 0; i < mBlobColorHsv.val.length; i++)//每个通道都坐下平均
mBlobColorHsv.val[i] /= pointCount;
mBlobColorRgba = converScalarHsv2Rgba(mBlobColorHsv);
Log.i(TAG, "Touched rgba color: (" + mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] +
", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3] + ")");
mDetector.setHsvColor(mBlobColorHsv);
Imgproc.resize(mDetector.getSpectrum(), mSpectrum, SPECTRUM_SIZE, 0, 0, Imgproc.INTER_LINEAR_EXACT);
mIsColorSelected = true;
touchedRegionRgba.release();
touchedRegionHsv.release();
return false; // don't need subsequent touch events
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.e(TAG, "Contours count: " + contours.size());
//画出矩形 contours的相关属性https://blog.csdn.net/zziahgf/article/details/76013881
//每个单独的contour是包括物体边界点的(x,y)坐标的Numpy 数组.
// Rect r= Imgproc.boundingRect(contours.get(0));//正确形势
//Imgproc.rectangle(mRgba,r,new Scalar(0,0,255), 2);
//到了opencv3.x,图形绘制模块就移植在Imgproc里了
if(contours.size()>0) {//contours.size()是轮廓总数量的意思
//找最大轮廓坐标
double maxVal = 0;
int maxValIdx = 0;
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
double contourArea = Imgproc.contourArea(contours.get(contourIdx));
if (maxVal < contourArea)
{
maxVal = contourArea;
maxValIdx = contourIdx;
}
}
Moments m = Imgproc.moments(contours.get(maxValIdx));//在这里在识别出来的轮廓中心画圆
int cx = (int) (m.m10 / m.m00);
int cy = (int) (m.m01 / m.m00);//轮廓的中心
mpoint = new Point(cx, cy);
//Imgproc.circle(mRgba, mpoint, 5, new Scalar(0, 0, 255), -1, Imgproc.LINE_AA);
Imgproc.rectangle(mRgba, new Point(cx-5,cy-5), new Point(cx+5, cy+5) , new Scalar(0,0,255));;
//Imgproc.drawContours(mRgba, contours, -1,new Scalar(0,0,255));//画出轮廓,正确,RGB
}
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
}
return mRgba;
}
private Scalar converScalarHsv2Rgba(Scalar hsvColor) {//scalar是将图像设置成单一灰度和颜色
Mat pointMatRgba = new Mat();
Mat pointMatHsv = new Mat(1, 1, CvType.CV_8UC3, hsvColor);
Imgproc.cvtColor(pointMatHsv, pointMatRgba, Imgproc.COLOR_HSV2RGB_FULL, 4);
//如何在这里只返回单一的颜色或范围,是不是就可以做颜色识别了,但验证
//return new Scalar(0, 0, 255);//没有效果,依然是点一个颜色实现一个效果把下面一行注释掉依然能实现原有功能
return new Scalar(pointMatRgba.get(0, 0));
}
}
ColorBlobDetection.java
在这里插入代码片
```package com.example.testopencvad;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Scalar;
import org.opencv.imgproc.Imgproc;
public class ColorBlobDetector {
// Lower and Upper bounds for range checking in HSV color space
private Scalar mLowerBound = new Scalar(0);
private Scalar mUpperBound = new Scalar(0);
// Minimum contour area in percent for contours filtering
private static double mMinContourArea = 0.1;
// Color radius for range checking in HSV color space
private Scalar mColorRadius = new Scalar(25,50,50,0);
private Mat mSpectrum = new Mat();
private List<MatOfPoint> mContours = new ArrayList<MatOfPoint>();
// Cache
Mat mPyrDownMat = new Mat();
Mat mHsvMat = new Mat();
Mat mMask = new Mat();
Mat mDilatedMask = new Mat();
Mat mHierarchy = new Mat();
public void setColorRadius(Scalar radius) {
mColorRadius = radius;
}
public void setHsvColor(Scalar hsvColor) {//在这里 点击之后开始进行触摸区域的阈值处理
double minH = (hsvColor.val[0] >= mColorRadius.val[0]) ? hsvColor.val[0]-mColorRadius.val[0] : 0;
double maxH = (hsvColor.val[0]+mColorRadius.val[0] <= 255) ? hsvColor.val[0]+mColorRadius.val[0] : 255;
mLowerBound.val[0] = minH;
mUpperBound.val[0] = maxH;
mLowerBound.val[1] = hsvColor.val[1] - mColorRadius.val[1];
mUpperBound.val[1] = hsvColor.val[1] + mColorRadius.val[1];
mLowerBound.val[2] = hsvColor.val[2] - mColorRadius.val[2];
mUpperBound.val[2] = hsvColor.val[2] + mColorRadius.val[2];
mLowerBound.val[3] = 0;
mUpperBound.val[3] = 255;
Mat spectrumHsv = new Mat(1, (int)(maxH-minH), CvType.CV_8UC3);
for (int j = 0; j < maxH-minH; j++) {
byte[] tmp = {(byte)(minH+j), (byte)255, (byte)255};
spectrumHsv.put(0, j, tmp);
}
Imgproc.cvtColor(spectrumHsv, mSpectrum, Imgproc.COLOR_HSV2RGB_FULL, 4);//色彩空间的转化
}
public Mat getSpectrum() {
return mSpectrum;
}
public void setMinContourArea(double area) {
mMinContourArea = area;
}
public void process(Mat rgbaImage) {
Imgproc.pyrDown(rgbaImage, mPyrDownMat);//图像降采样
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);//颜色空间的转换
//在这里加入阈值范围,生效!!
//以蓝色为例
Scalar lowerbScalar = new Scalar(100,50,50); //hsv色域的蓝色最低阈值
Scalar highbScalar = new Scalar(130, 255, 255); //hsv色域蓝色最高阈值
//获得二值图像。结果存到desImgMag
Core.inRange(mHsvMat, lowerbScalar, highbScalar, mMask);
//Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);//inRange()函数可实现二值化功能
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);//mDilatedMask为已经二值化的图像
// Find max contour area
double maxArea = 0;
Iterator<MatOfPoint> each = contours.iterator();
while (each.hasNext()) {
MatOfPoint wrapper = each.next();
double area = Imgproc.contourArea(wrapper);
if (area > maxArea)
maxArea = area;
}
// Filter contours by area and resize to fit the original image size
mContours.clear();
each = contours.iterator();
while (each.hasNext()) {
MatOfPoint contour = each.next();
if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) {
Core.multiply(contour, new Scalar(4,4), contour);
mContours.add(contour);
}
}
}
public List<MatOfPoint> getContours() {
return mContours;
}//获取contours
}