Android NDK开发:手势识别

目录

效果展示

相关文章

OpenGL系列之十六:实现大眼特效

实现步骤

1.下载基础代码

这里使用的代码是以OpenGL系列之十六:实现大眼特效这篇文章为基础的

2.添加识别库

这里由于依赖库中使用的CameraX版本与我的有冲突所以我将其剔除了

implementation ('com.google.mediapipe:solution-core:latest.release'){
        exclude group: 'androidx.camera', module: 'camera-camera2'
        exclude group: 'androidx.camera', module: 'camera-core'
    }
    implementation ('com.google.mediapipe:hands:latest.release'){
        exclude group: 'androidx.camera', module: 'camera-camera2'
        exclude group: 'androidx.camera', module: 'camera-core'
    }
3.修改GLHelper

我们修改getTextureData方法,返回texture中的Buffer数据,用于后续在render中生成Bitmap

fun getTextureData(matrix: FloatArray):GLData{
        // camTextureId->fboTexureId

        // camTextureId->fboTexureId
        GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0])
        GLES20.glViewport(0, 0, textureWidth, textureHeight)
        GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT)
        GLES20.glUseProgram(progCam2FBO)
        GLES20.glVertexAttribPointer(
            vcCam2FBO,
            2,
            GLES20.GL_FLOAT,
            false,
            4 * 2,
            vertexCoordsBuffer
        )
        textureCoordsBuffer!!.clear()
        textureCoordsBuffer?.put(transformTextureCoordinates(textureCoords, matrix))
        textureCoordsBuffer!!.position(0)
        GLES20.glVertexAttribPointer(
            tcCam2FBO,
            2,
            GLES20.GL_FLOAT,
            false,
            4 * 2,
            textureCoordsBuffer
        )
        GLES20.glActiveTexture(GLES20.GL_TEXTURE0)
        GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, camTextureId[0])
        GLES20.glUniform1i(GLES20.glGetUniformLocation(progCam2FBO, "sTexture"), 0)
        GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4)
        GLES20.glFlush()

        var targetTexureId = fboTexureId[0]

        //========================Kotlin代码生成图像数据============================
        val pixelBuffer = ByteBuffer.allocate(textureWidth * textureHeight * 4)
        GLES20.glReadPixels(
            0,
            0,
            textureWidth,
            textureHeight,
            GLES20.GL_RGBA,
            GLES20.GL_UNSIGNED_BYTE,
            pixelBuffer
        )

        targetTexureId = drawTexureId[0]
        return GLData(targetTexureId,pixelBuffer)
    }
data class GLData(var textureId:Int,var byteBuffer: ByteBuffer?)
4.进行识别

我们识别使用的是依赖库中的Hands

private val hands: Hands

我们需要实例化Hands并且添加识别回调,这里的识别回调中的paintFlag是用于通知OpenGL绘制

constructor(cameraView: CameraView){
        this.cameraView = cameraView
        glHelper = GLHelper()
        hands = Hands(
            cameraView.context,
            HandsOptions.builder()
                .setStaticImageMode(true)
                .setMaxNumHands(2)
                .setRunOnGpu(true)
                .build()
        )
        hands.setResultListener { handsResult: HandsResult? ->
            Log.e("结果","${handsResult?.multiHandLandmarks()?.size}")
            if(handsResult?.multiHandLandmarks()?.size!! > 0){
                hasHand = 1
                arrayLandMarks = handsResult.multiHandLandmarks()?.get(0)?.landmarkOrBuilderList?.toTypedArray()
                Log.e("结果","${arrayLandMarks?.size}")
            }
            paintFlag = true
        }
        hands.setErrorListener { message: String, e: RuntimeException? ->
            Log.e(
                "测试",
                "MediaPipe Hands error:$message"
            )
        }
        val lifecycleOwner = cameraView.context as LifecycleOwner
        cameraHelper = CameraHelper(lifecycleOwner,this)
    }

我们在onDrawFrame方法中根据texture获取到图像数据并且转成Bitmap传给Hands用于识别,然后添加一个循环用于检测是否已经识别完毕,如果识别完毕了就将识别的数据传给Native层进行绘制

override fun onDrawFrame(p0: GL10?) {
        startTime = System.currentTimeMillis()
        paintFlag = false
        hasHand = 0
        GLES30.glClearColor(0.0f, 0.0f, 0.0f, 1.0f)
        GLES30.glClear(GLES30.GL_COLOR_BUFFER_BIT or GLES30.GL_DEPTH_BUFFER_BIT)
        //更新画面
        surfaceTexture.updateTexImage()

        val matrix = FloatArray(16)
        surfaceTexture.getTransformMatrix(matrix)

        glData = glHelper.getTextureData(matrix)

        var ARGB8888ImageBitmap =
            Bitmap.createBitmap(glHelper.textureWidth, glHelper.textureHeight, Bitmap.Config.ARGB_8888)
        ARGB8888ImageBitmap.copyPixelsFromBuffer(glData?.byteBuffer)
        //为了缩小Bitmap加快识别速度
        val scaleWidth = (glHelper.textureWidth.toFloat() * 0.05f).toInt()
        val scaleHeight = (glHelper.textureHeight.toFloat() * 0.05f).toInt()
        ARGB8888ImageBitmap = Bitmap.createScaledBitmap(ARGB8888ImageBitmap,scaleWidth,scaleHeight,false)
        hands.send(ARGB8888ImageBitmap)
        ARGB8888ImageBitmap.recycle()
        try {
            while (!paintFlag){
                Thread.sleep(6)
            }
            Log.e("渲染","渲染")
            ndkPaintGL(glData!!.textureId,hasHand,arrayLandMarks)
        }catch (e:Exception){}finally {
            val endTime = System.currentTimeMillis()
            val offTime = endTime - startTime
            val fps = 1000 / offTime
            LogUtils.eTag("FPS",fps)
            requestRender()
        }
    }
5.绘制手势

识别的手势一共有21个点,每个点有x、y、z三个值,这里我们只需要取x、y即可,另外由于返回的坐标系是0~1之间的,所以需要自己算出真实的位置,整体如下所示

void* pixelBuffer = NULL;
extern "C"
JNIEXPORT void JNICALL
Java_com_itfitness_openglcheckhand_render_GLRender_ndkPaintGL(JNIEnv *env, jobject thiz,
                                                           jint texture_id,jint hasHand,jobjectArray landmarks) {
    int width = ccRender->textureWidth;
    int height = ccRender->textureHeight;
    if(pixelBuffer == NULL){
        pixelBuffer = malloc(width * height * 4);
    } else{
        memset(pixelBuffer,0,width * height  * 4);
    }
    glReadPixels(
            0,
            0,
            width,
            height,
            GL_RGBA,
            GL_UNSIGNED_BYTE,
            pixelBuffer
    );

    cv::Mat imageSrc(height, width, CV_8UC4, pixelBuffer);

    cv::cvtColor(imageSrc,imageSrc,cv::COLOR_RGBA2BGR);


    if(hasHand == 1){
        //定义三角型的点
        CCFloat3 vertexPoints[21];
        for(int i = 0 ; i < 21 ; i++){
            jobject landMark = env->GetObjectArrayElement(landmarks,i);
            jclass landMarkClazz = env->GetObjectClass(landMark);
            jmethodID getXMethod = env->GetMethodID(landMarkClazz,"getX", "()F");
            jmethodID getYMethod = env->GetMethodID(landMarkClazz,"getY", "()F");
            jmethodID getZMethod = env->GetMethodID(landMarkClazz,"getZ", "()F");
            float x = env->CallFloatMethod(landMark,getXMethod);
            float y = env->CallFloatMethod(landMark,getYMethod);
            float z = env->CallFloatMethod(landMark,getZMethod);
            float centerX = x * imageSrc.cols;
            float centerY = y * imageSrc.rows;
            vertexPoints[i].x = centerX;
            vertexPoints[i].y = centerY;
            vertexPoints[i].z = 1.0;
            cv::circle(imageSrc,cv::Point(centerX,centerY),20,cv::Scalar(0,255,0),cv::FILLED);
            LOGE("点%d->%f,%f",i,centerX,centerY);
            LOGE("点%d->%f,%f",i,x,y);
            LOGE("点%d->%d,%d",i,imageSrc.cols,imageSrc.rows);
        }

        //cv::line(imageSrc,cv::Point(vertexPoints[0].x,vertexPoints[0].y),cv::Point(vertexPoints[1].x,vertexPoints[1].y),cv::Scalar(0,255,0),3);

        cv::putText(imageSrc,"itfitness",
                    cv::Point(vertexPoints[8].x,vertexPoints[8].y),
                    cv::FONT_HERSHEY_SIMPLEX,2,cv::Scalar(0,0,255),5);
    }

    cv::cvtColor(imageSrc,imageSrc,cv::COLOR_BGR2RGBA);
    glActiveTexture(texture_id);
    glBindTexture(GL_TEXTURE_2D,texture_id);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageSrc.cols, imageSrc.rows, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageSrc.data);
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    ccRender->paintGL(texture_id);
}

案例源码

https://gitee.com/itfitness/opengl-check-hand

你可能感兴趣的:(Android NDK开发:手势识别)