前言:因为要提供一个相机操作SDK给第三方,要求不需要传控件,只要相机预览回调数据,所以写了两个相机管理类,分别使用camera api 1和 camera api 2实现显示预览图并获取回调数据,或不显示预览图只获取回调数据。以下类使用kotlin编写。
一、相机参数常量类:Const.kt
// package ...
/**
* @author :zzh
* time : 2021/4/23 15:21
*/
object Const {
const val previewWidth = 1280
const val previewHeight = 720
/**预览回调数据格式为NV21,经转化后**/
const val previewImageFormatNV21 = 100
/**预览回调数据格式为RAW16,大端格式,经转化后**/
const val previewImageFormatRAW16 = 101
}
二、图像处理工具类:ImageUtils.kt
// package ...
import android.graphics.*
import android.media.Image
import android.os.Environment
import java.io.File
import java.io.FileOutputStream
import java.io.IOException
import kotlin.math.ceil
/**
* @author :zzh
* time : 2021/4/23 15:03
*/
object ImageUtils {
// add by zzh, optimize time consume
fun getBytesFromImageToNV21(image: Image?): ByteArray? {
try {
if (image == null || image.planes.isEmpty()) {
return null
}
val planes = image.planes.clone()
val pictureSize = image.width * image.height
val nv21 = ByteArray(pictureSize * 3 / 2)
var index: Int
// nv21 data like YYYYVUVU, image format is YV12:YYYYVVUU, y placed on planes[0], u on planes[1], v on planes[2]
// image format decide by DisplayFragment.mFormat
if (planes.size > 2) {
// y
val yBuffer = planes[0].buffer
yBuffer[nv21, 0, pictureSize]
// System.arraycopy(planes[0].buffer, 0, nv21, 0, pictureSize)
// v pixel stride is equal u pixel stride
val uvPixelStride = planes[2].pixelStride
index = pictureSize
// u
val uBuffer = planes[1].buffer
// v
val vBuffer = planes[2].buffer
// v bytes length is equal u bytes length
var uv = 0
while (uv < vBuffer.capacity()) {
nv21[index++] = vBuffer[uv]
nv21[index++] = uBuffer[uv]
uv += uvPixelStride
}
}
return nv21
} catch (e: Exception) {
LogUtils.d("ImageUtils", "getBytesFromImageToNV21 1 param function error=" + e.message)
} finally {
image?.close()
}
return null
}
// YV12: YYYYYYYY VV UU
fun yv12ToNV21(byteArray: ByteArray, width: Int, height: Int): ByteArray {
val yuvBytes = ByteArray(width * height * 3 / 2)
try {
// copy y
val size = width * height
System.arraycopy(byteArray, 0, yuvBytes, 0, size)
var uvIndex = size
var uByteIndex = size * 5 / 4
for (vByteIndex in size until size * 5 / 4) {
// copy v
yuvBytes[uvIndex++] = byteArray[vByteIndex]
// copy u
yuvBytes[uvIndex++] = byteArray[uByteIndex++]
}
return yuvBytes
} catch (e: java.lang.Exception) {
e.printStackTrace()
}
return yuvBytes
}
/**
* 保存图片
*/
fun saveBitmap(file: File?, bitmap: Bitmap): Boolean {
var out: FileOutputStream? = null
try {
out = FileOutputStream(file)
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, out)
return true
} catch (e: java.lang.Exception) {
e.printStackTrace()
} finally {
try {
out?.close()
} catch (e: java.lang.Exception) {
e.printStackTrace()
}
}
return false
}
/**
* 非安卓标准格式的RAW10转RAW16,补充数据在末尾
* @param src
* @param width
* @param height
* @return
*/
fun transformRAW10ToRAW16(src: ByteArray, width: Int, height: Int): ByteArray {
val dst = ByteArray(width * height * 2)
val rowStride = src.size / height
for (i in 0 until height) {
var k = 0
var n = 0
var dstStartIndex: Int
var srcStartIndex: Int
var fillByteStartIndex: Int
var j = 0
while (j < width) {
srcStartIndex = i * width + j
dstStartIndex = (i * width + k) * 2
//height * (rowStride - width)
fillByteStartIndex = i * (rowStride - width) + n
/** Big_Endian ① */
dst[dstStartIndex] = src[srcStartIndex]
dst[dstStartIndex + 1] = (src[fillByteStartIndex].toInt().shl(6) and 0xc0).toByte()
dst[dstStartIndex + 2] = src[srcStartIndex + 1]
dst[dstStartIndex + 3] = (src[fillByteStartIndex].toInt().shl(4) and 0xc0).toByte()
dst[dstStartIndex + 4] = src[srcStartIndex + 2]
dst[dstStartIndex + 5] = (src[fillByteStartIndex].toInt().shl(2) and 0xc0).toByte()
dst[dstStartIndex + 6] = src[srcStartIndex + 3]
dst[dstStartIndex + 7] = (src[fillByteStartIndex].toInt() and 0xc0).toByte()
/** Big_Endian ② */
// dst[dstStartIndex] = src[srcStartIndex]
// dst[dstStartIndex + 1] = (byte) (src[fillByteStartIndex] & 0xc0)
// dst[dstStartIndex + 2] = src[srcStartIndex + 1]
// dst[dstStartIndex + 3] = (byte) ((src[fillByteStartIndex] << 2) & 0xc0)
// dst[dstStartIndex + 4] = src[srcStartIndex + 2]
// dst[dstStartIndex + 5] = (byte) ((src[fillByteStartIndex] << 4) & 0xc0)
// dst[dstStartIndex + 6] = src[srcStartIndex + 3]
// dst[dstStartIndex + 7] = (byte) ((src[fillByteStartIndex] << 6) & 0xc0)
/** Little_Endian ① */
// dst[dstStartIndex] = (byte) (src[fillByteStartIndex] & 0x03)
// dst[dstStartIndex + 1] = src[srcStartIndex]
// dst[dstStartIndex + 2] = (byte) ((src[fillByteStartIndex] >> 2) & 0x03)
// dst[dstStartIndex + 3] = src[srcStartIndex + 1]
// dst[dstStartIndex + 4] = (byte) ((src[fillByteStartIndex] >> 4) & 0x03)
// dst[dstStartIndex + 5] = src[srcStartIndex + 2]
// dst[dstStartIndex + 6] = (byte) ((src[fillByteStartIndex] >> 6) & 0x03)
// dst[dstStartIndex + 7] = src[srcStartIndex + 3]
/** Little_Endian ② */
// dst[dstStartIndex] = (byte) (src[fillByteStartIndex >> 6] & 0x03)
// dst[dstStartIndex + 1] = src[srcStartIndex]
// dst[dstStartIndex + 2] = (byte) ((src[fillByteStartIndex] >> 4) & 0x03)
// dst[dstStartIndex + 3] = src[srcStartIndex + 1]
// dst[dstStartIndex + 4] = (byte) ((src[fillByteStartIndex] >> 2) & 0x03)
// dst[dstStartIndex + 5] = src[srcStartIndex + 2]
// dst[dstStartIndex + 6] = (byte) ((src[fillByteStartIndex]) & 0x03)
// dst[dstStartIndex + 7] = src[srcStartIndex + 3]
k += 4
n++
j += 4
}
}
return dst
}
var index = 0
fun saveNV21(data: ByteArray, width: Int, height: Int) {
if (index >= 5) {
return
}
//保存一张照片
val fileName = "IMG_" + index++.toString() + ".jpg" //jpeg文件名定义
val sdRoot = Environment.getExternalStorageDirectory() //系统路径
val dir = "/jpeg/" //文件夹名
val mkDir = File(sdRoot, dir)
if (!mkDir.exists()) {
//目录不存在,则创建
mkDir.mkdirs()
}
val pictureFile = File(sdRoot, dir + fileName)
if (!pictureFile.exists()) {
try {
pictureFile.createNewFile()
val filecon = FileOutputStream(pictureFile)
// 将NV21 data保存成YuvImage
val image = YuvImage(data, ImageFormat.NV21, width, height, null)
// 图像压缩 // 将NV21格式图片,以质量70压缩成Jpeg,并得到JPEG数据流
image.compressToJpeg(Rect(0, 0, image.width, image.height), 70, filecon)
filecon.close()
} catch (e: IOException) {
e.printStackTrace()
}
}
}
fun getYV12ImagePixelSize(previewWidth: Int, previewHeight: Int): Int {
val yStride = (ceil(previewWidth / 16.0) * 16).toInt()
val uvStride = (ceil(yStride / 2 / 16.0) * 16).toInt()
val ySize = yStride * previewHeight
val uvSize = uvStride * previewHeight / 2
return ySize + uvSize * 2
}
}
三、camera api 1 和 camera api 2 管理器接口: ICameraManager.kt
// package ...
import android.view.TextureView
/**
* @author :zzh
* time : 2021/4/27 10:34
*/
interface ICameraManager {
/**
* open camera and start camera preview, show preview situation
* */
fun openCamera(textureView: TextureView, cameraId: Int, previewWidth: Int, previewHeight: Int, imageFormat: Int)
/**
* open camera and start camera preview, not show preview situation
* */
fun openCamera(cameraId: Int, previewWidth: Int, previewHeight: Int, imageFormat: Int)
/**
* for camera api1 to start preview, api2 no implementation
* */
fun startPreview()
/**
* for camera api1 to start preview, api2 no implementation
* */
fun stopPreview()
/**
* for release camera
* */
fun releaseCamera()
/**
* set process camera callback data listener
* */
fun setPreviewCallback(camera12PreviewCallback: ICameraPreviewCallback)
}
四、预览回调数据监听器类: ICameraPreviewCallback.kt
// package ...
/**
* @author :zzh
* time : 2021/4/23 14:59
*/
interface ICameraPreviewCallback {
fun onPreviewFrame(data: ByteArray?, convertedImageFormat: Int, cameraManager: ICameraManager)
}
五、camera api 1 管理器:CameraApi1Manager.kt
// package ...
import android.content.Context
import android.graphics.*
import android.hardware.Camera
import android.hardware.Camera.CameraInfo
import android.hardware.Camera.PreviewCallback
import android.opengl.GLES11Ext
import android.os.Handler
import android.os.HandlerThread
import android.os.Message
import android.util.Log
import android.view.Surface
import android.view.TextureView
import android.view.TextureView.SurfaceTextureListener
import android.view.WindowManager
//import com.zzh.camera2.model.Const
//import com.zzh.camera2.utils.ImageUtils
import java.io.IOException
class CameraApi1Manager : ICameraManager {
val TAG: String = CameraApi1Manager::class.java.simpleName
// 相机回调数据监听器
private var camera12PreviewCallback: ICameraPreviewCallback? = null
// 是否显示预览图像,常量
private val isShowCallbackView = true
// 默认预览图像格式,变量,会修改
private var previewFormat = ImageFormat.NV21
// 预览宽、高,变量,会修改
private var previewWidth: Int = Const.previewWidth
private var previewHeight: Int = Const.previewHeight
private var mCameraId = 0
private var mCamera: Camera? = null
// 显示预览画面的控件,若不赋值,则不显示预览图像
private var textureView: TextureView? = null
// 用于不使用控件显示预览图像的情形 @{
private val previewDataArray: ArrayList = ArrayList()
// 数组长度,多个数据缓冲区,防止预览回调画面闪烁
private val previewDataSize = 2
var mSurfaceTexture: SurfaceTexture? = null
// @}
// 用于处理回调数据 @{
private var mProcessImageHandler: Handler? = null
private var mProcessImageThread: HandlerThread? = null
private val processPreviewDataMsgId = 99
// @}
override fun setPreviewCallback(camera12PreviewCallback: ICameraPreviewCallback) {
this.camera12PreviewCallback = camera12PreviewCallback
}
// 不带控件方式打开相机,只使用回调数据
override fun openCamera(
cameraId: Int,
previewWidth: Int,
previewHeight: Int,
imageFormat: Int
) {
mCameraId = cameraId
this.previewWidth = previewWidth
this.previewHeight = previewHeight
this.previewFormat = imageFormat
// 此变量必须是类成员变量,否则会导致回调函数只调用几帧就没了
mSurfaceTexture = SurfaceTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES)
openCamera(mSurfaceTexture!!, cameraId)
}
/**
* 带预览控件打开相机
*/
override fun openCamera(
textureView: TextureView,
cameraId: Int,
previewWidth: Int,
previewHeight: Int, imageFormat: Int
) {
mCameraId = cameraId
this.textureView = textureView
this.previewWidth = previewWidth
this.previewHeight = previewHeight
this.previewFormat = imageFormat
if (textureView.isAvailable)
openCamera(textureView.surfaceTexture!!, cameraId)
else
textureView.surfaceTextureListener = surfaceTextureListener
}
override fun startPreview() {
if (mCamera != null) {
mCamera!!.startPreview()
}
}
override fun stopPreview() {
if (mCamera != null) {
mCamera!!.stopPreview()
}
}
override fun releaseCamera() {
if (mCamera != null) {
val camera: Camera = mCamera!!
camera.stopPreview()
if (textureView == null) {
camera.setPreviewCallbackWithBuffer(null)
} else {
camera.setPreviewCallback(null)
}
camera.release()
mCamera = null
}
stopBackgroundThread()
}
private val surfaceTextureListener = object : SurfaceTextureListener {
override fun onSurfaceTextureAvailable(
surfaceTexture: SurfaceTexture,
width: Int,
height: Int
) {
openCamera(surfaceTexture, mCameraId)
}
override fun onSurfaceTextureSizeChanged(
surfaceTexture: SurfaceTexture,
i: Int,
i1: Int
) {
Log.i(TAG, "onSurfaceTextureSizeChanged")
}
override fun onSurfaceTextureDestroyed(surfaceTexture: SurfaceTexture): Boolean {
Log.i(TAG, "onSurfaceTextureDestroyed")
return false
}
override fun onSurfaceTextureUpdated(surfaceTexture: SurfaceTexture) {
}
}
private fun openCamera(surfaceTexture: SurfaceTexture, cameraId: Int) {
if (cameraId < 0 /*|| cameraId > Camera.getNumberOfCameras() - 1*/) {
Log.w(
TAG,
"openCamera failed, cameraId=" + cameraId + ", Camera.getNumberOfCameras()=" + Camera.getNumberOfCameras()
)
return
}
startBackgroundThread()
try {
// Log.i(TAG,"surfaceCreated open camera cameraId=$cameraId start")
mCamera = Camera.open(cameraId)
if (textureView != null) {
mCamera!!.setDisplayOrientation(
getCameraDisplayOrientation(
textureView!!.context,
cameraId
)
)
}
mCamera!!.setPreviewTexture(surfaceTexture)
// set preview format @{
this.previewFormat = setCameraPreviewFormat(mCamera!!, this.previewFormat)
// @}
// 设置fps@{
val minFps: Int = 30000
val maxFps: Int = 30000
setCameraPreviewFpsRange(mCamera!!, minFps, maxFps)
// @}
// 设置预览尺寸 @{
val hasSetPreviewSize = setCameraPreviewSize(mCamera!!, previewWidth, previewHeight)
if (hasSetPreviewSize != null && hasSetPreviewSize.size > 1) {
previewWidth = hasSetPreviewSize[0]
previewHeight = hasSetPreviewSize[1]
}
// @}
// 设置照片尺寸 @{
setCameraPictureSize(mCamera!!, previewWidth, previewHeight)
// @}
// 设置预览回调函数@{
if (textureView == null) {
mCamera!!.setPreviewCallbackWithBuffer(mCameraCallbacks)
Log.i(
TAG,
"ImageFormat: $previewFormat bits per pixel=" + ImageFormat.getBitsPerPixel(
previewFormat
)
)
// 初始化数组
for (index in 0 until previewDataSize) {
val previewData = if (previewFormat != ImageFormat.YV12) {
ByteArray(previewWidth * previewHeight * ImageFormat.getBitsPerPixel(previewFormat) / 8)
} else {
val size = ImageUtils.getYV12ImagePixelSize(previewWidth, previewHeight)
ByteArray(size)
}
previewDataArray.add(previewData)
}
addAllPreviewCallbackData()
} else {
mCamera!!.setPreviewCallback(mCameraCallbacks)
}
// @}
mCamera!!.startPreview()
} catch (localIOException: IOException) {
Log.e(
TAG,
"surfaceCreated open camera localIOException cameraId=" + cameraId + ", error=" + localIOException.message,
localIOException
)
} catch (e: Exception) {
Log.e(
TAG,
"surfaceCreated open camera cameraId=" + cameraId + ", error=" + e.message,
e
)
}
}
// 添加用于填充回调数据的缓冲区
private fun addAllPreviewCallbackData() {
for (data in previewDataArray) {
// 设置 mCamera.addCallbackBuffer(mPreviewData) 后才会回调,旨在每处理完一帧数据回调一次
mCamera!!.addCallbackBuffer(data)
}
}
// add by zzh
private val mCameraCallbacks: PreviewCallback = PreviewCallback { data, camera ->
if (!isShowCallbackView) {
return@PreviewCallback
}
// 使用handler thread去处理,防止阻塞
if (mProcessImageHandler != null) {
val msg = Message()
msg.what = processPreviewDataMsgId
msg.obj = data
mProcessImageHandler!!.removeMessages(processPreviewDataMsgId)
mProcessImageHandler!!.sendMessage(msg)
}
if (textureView == null) {
// 在此处处理当前帧数据,并设置下⼀帧回调
addAllPreviewCallbackData()
}
}
// add by zzh
private fun setCameraPreviewFormat(camera: Camera, previewFormat: Int): Int {
var parameters = camera.parameters
// 默认预览格式
val defaultPreviewFormat = parameters.previewFormat
// 设置的预览图像格式是否是相机支持的
var isSetFormatSuit = false
var isSupportYV12Format = false
var supportedFirstFormat = ImageFormat.NV21
val pfa = parameters.supportedPreviewFormats
// 列出所有支持的格式
for (index in 0 until pfa.size) {
if (index == 0) {
supportedFirstFormat = pfa[index]
}
// Log.i(TAG, "supported format[" + index + "]=" + pfa[index])
if (previewFormat == pfa[index]) {
isSetFormatSuit = true
}
if (ImageFormat.YV12 == pfa[index]) {
isSupportYV12Format = true
}
}
if (!isSetFormatSuit) {
Log.w(TAG, "The image preview format you set is not supported")
}
if (isSetFormatSuit) {
parameters.previewFormat = previewFormat
} else if (isSupportYV12Format) {
parameters.previewFormat = ImageFormat.YV12
} else {
parameters.previewFormat = supportedFirstFormat
}
// 设置预览格式
camera.parameters = parameters
Log.i(
TAG, "surfaceCreated defaultPreviewFormat=" + defaultPreviewFormat
+ ", after set preview format=" + camera.parameters.previewFormat + ", camera id=$mCameraId"
)
return camera.parameters.previewFormat
}
// add by zzh setCameraPreviewFpsRange
private fun setCameraPreviewFpsRange(camera: Camera, minFps: Int, maxFps: Int) {
var min: Int = 30000
var max: Int = 30000
val parameters = camera.parameters
val defaultPreviewFps = IntArray(2)
parameters.getPreviewFpsRange(defaultPreviewFps)
val supportedFpsRangeArray = parameters.supportedPreviewFpsRange
var isSupportSetFps = false
for (index in 0 until supportedFpsRangeArray.size) {
// Log.i(TAG, "supported fps=" + supportedFpsRangeArray[index][0] + ", 1=" + supportedFpsRangeArray[index][1])
if (index >= supportedFpsRangeArray.size - 1) {
min = supportedFpsRangeArray[index][0]
max = supportedFpsRangeArray[index][1]
}
if (supportedFpsRangeArray[index][0] == minFps && supportedFpsRangeArray[index][1] == maxFps) {
isSupportSetFps = true
}
}
if (isSupportSetFps) {
min = minFps
max = maxFps
}
parameters.setPreviewFpsRange(min, max)
mCamera!!.parameters = parameters
val afterFps = IntArray(2)
mCamera!!.parameters.getPreviewFpsRange(afterFps)
val isSupportZoom = parameters.isZoomSupported
Log.i(
TAG, "surfaceCreated defaultPreviewFps, min = "
+ defaultPreviewFps[0] + ", max = " + defaultPreviewFps[1] + ", zoom support=" + isSupportZoom
+ ", after set fps min=" + afterFps[0] + ", max=" + afterFps[1]
)
}
// add by zzh
private fun setCameraPreviewSize(camera: Camera, width: Int, height: Int): Array {
val parameters = camera.parameters
val defaultPreviewSize = parameters.previewSize
// set preview size
val sizes = camera.parameters.supportedPreviewSizes
var expected = sizes[sizes.size - 1]
var gotIt = false
// print all support preview sizes
// for (size in sizes) {
// Log.i(TAG, "all Preview size is w:" + size.width + " h:" + size.height)
// }
for (size in sizes) {
// Log.i(TAG, "Preview size is w:" + size.width + " h:" + size.height)
if (size.width == width && size.height == height) {
expected = size
gotIt = true
Log.i(TAG, "setCameraPreviewSize width,height is supported")
break
}
}
var resultWidth = expected.width
var resultHeight = expected.height
if (!gotIt) {
resultWidth = width
resultHeight = height
Log.i(TAG, "setCameraPreviewSize width,height is not supported")
}
parameters.setPreviewSize(resultWidth, resultHeight)
camera.parameters = parameters
Log.i(
"TAG",
"setCameraPreviewSize defaultPreviewSize width=" + defaultPreviewSize.width + ", height=" + defaultPreviewSize.height
+ ", after set preview size width=" + camera.parameters.previewSize.width + ", height=" + camera.parameters.previewSize.height
)
return arrayOf(resultWidth, resultHeight)
}
// add by zzh
private fun setCameraPictureSize(camera: Camera, width: Int, height: Int): Array {
val parameters = camera.parameters
val defaultPictureSize = parameters.pictureSize
// set picture size
val sizes = camera.parameters.supportedPictureSizes
var expected = sizes[sizes.size - 1]
var gotIt = false
// print all support picture sizes
// for (size in sizes) {
// Log.i(TAG, "all picture size is w:" + size.width + " h:" + size.height)
// }
for (size in sizes) {
// Log.i(TAG, "Picture size is w:" + size.width + " h:" + size.height)
if (size.width == width && size.height == height) {
expected = size
gotIt = true
Log.i(TAG, "setCameraPictureSize width,height is supported")
break
}
}
var resultWidth = expected.width
var resultHeight = expected.height
if (!gotIt) {
resultWidth = width
resultHeight = height
Log.i(TAG, "setCameraPictureSize width,height is not supported")
}
parameters.setPictureSize(resultWidth, resultHeight)
// parameters.pictureFormat = previewFormat
// parameters.setZoom(0)
// parameters.setRotation(0)
camera.parameters = parameters
Log.i(
"TAG",
"setCameraPictureSize defaultPictureSize width=" + defaultPictureSize.width + ", height=" + defaultPictureSize.height
+ ", after set picture size width=" + camera.parameters.pictureSize.width + ", height=" + camera.parameters.pictureSize.height
)
return arrayOf(resultWidth, resultHeight)
}
// add by zzh
private fun getCameraDisplayOrientation(context: Context?, cameraId: Int): Int {
if (context == null) {
Log.e(TAG, "setCameraDisplayOrientation failed activity is null")
return 0
}
val info = CameraInfo()
Camera.getCameraInfo(cameraId, info)
val windowManager = context.getSystemService(Context.WINDOW_SERVICE) as WindowManager
val rotation = windowManager.defaultDisplay.rotation
var degrees = 0
when (rotation) {
Surface.ROTATION_0 -> degrees = 0
Surface.ROTATION_90 -> degrees = 90
Surface.ROTATION_180 -> degrees = 180
Surface.ROTATION_270 -> degrees = 270
}
var result: Int
if (info.facing == CameraInfo.CAMERA_FACING_FRONT) {
result = (info.orientation + degrees) % 360
result = (360 - result) % 360 // compensate the mirror
} else { // back-facing
result = (info.orientation - degrees + 360) % 360
}
Log.i(TAG, "setCameraDisplayOrientation result=$result")
return result
}
private fun startBackgroundThread() {
if (mProcessImageThread == null || mProcessImageHandler == null) {
mProcessImageThread = HandlerThread("Preview data processor")
mProcessImageThread!!.start()
mProcessImageHandler = Handler(mProcessImageThread!!.looper, processPreviewCallback)
}
}
private fun stopBackgroundThread() {
if (mProcessImageThread != null && mProcessImageThread!!.isAlive()) {
mProcessImageThread!!.quit()
mProcessImageHandler!!.removeCallbacksAndMessages(null)
mProcessImageThread = null
mProcessImageHandler = null
}
}
private val processPreviewCallback = Handler.Callback {
try {
val what = it.what
val dataRsv = it.obj
if (what == processPreviewDataMsgId && dataRsv != null && dataRsv is ByteArray) {
val data = dataRsv as ByteArray
var byteArray: ByteArray? = dataRsv as ByteArray
var imageFormat = 0
// camera api1 一般只支持yv12 nv21两种格式
when (previewFormat) {
ImageFormat.YV12, ImageFormat.YUV_420_888 -> {
imageFormat = Const.previewImageFormatNV21
byteArray = ImageUtils.yv12ToNV21(data, previewWidth, previewHeight)
}
ImageFormat.RAW10 -> {
imageFormat = Const.previewImageFormatRAW16
byteArray =
ImageUtils.transformRAW10ToRAW16(data, previewWidth, previewHeight)
}
ImageFormat.NV21 -> {
imageFormat = Const.previewImageFormatNV21
}
}
if (camera12PreviewCallback != null && data != null) {
camera12PreviewCallback!!.onPreviewFrame(byteArray!!, imageFormat, this)
}
}
} catch (e: Exception) {
Log.e(TAG, "processPreviewCallback error=" + e.message);
}
true
}
}
六、camera api 2 管理器:CameraApi2Manager.kt
// package ...
import android.annotation.SuppressLint
import android.content.Context
import android.graphics.ImageFormat
import android.graphics.Matrix
import android.graphics.RectF
import android.graphics.SurfaceTexture
import android.hardware.camera2.*
import android.hardware.camera2.CameraCaptureSession.CaptureCallback
import android.hardware.camera2.params.OutputConfiguration
import android.hardware.camera2.params.SessionConfiguration
import android.media.ImageReader
import android.media.ImageReader.OnImageAvailableListener
import android.os.Build
import android.os.Handler
import android.os.HandlerThread
import android.util.Log
import android.util.Size
import android.view.Surface
import android.view.TextureView
import android.view.TextureView.SurfaceTextureListener
import android.view.WindowManager
import androidx.annotation.NonNull
import androidx.annotation.RequiresApi
//import com.zzh.camera2.model.Const
//import com.zzh.camera2.utils.ImageUtils
import kotlin.collections.ArrayList
class CameraApi2Manager : ICameraManager {
companion object {
val TAG: String = CameraApi2Manager::class.java.simpleName
}
/*** 相机管理类 */
var mCameraManager: CameraManager? = null
/*** 指定摄像头ID对应的Camera实体对象 */
var mCameraDevice: CameraDevice? = null
/**
* 预览尺寸
*/
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private var mPreviewSize = Size(Const.previewWidth, Const.previewHeight)
/*** 打开摄像头的ID[CameraDevice]. */
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private var mCameraId = CameraCharacteristics.LENS_FACING_FRONT
/*** 处理静态图像捕获的ImageReader。[ImageReader] */
private var mImageReader: ImageReader? = null
/*** 用于相机预览的{@Link CameraCaptureSession}。 */
private var mCaptureSession: CameraCaptureSession? = null
/*** [CaptureRequest.Builder]用于相机预览请求的构造器 */
private var mPreviewRequestBuilder: CaptureRequest.Builder? = null
/***预览控件, 可不赋值,则不显示预览画面 */
private var textureView: TextureView? = null
/***判断是否支持闪关灯 */
private var mFlashSupported = false
/*** 用于运行不应阻塞UI的任务的附加线程。 */
private var mBackgroundThread: HandlerThread? = null
private val maxImages = 2
private var faceDetectModes: IntArray? = null
/*** 用于在后台运行任务的[Handler]。 */
private var mBackgroundHandler: Handler? = null
private var camera12PreviewCallback: ICameraPreviewCallback? = null
override fun setPreviewCallback(camera12PreviewCallback: ICameraPreviewCallback) {
this.camera12PreviewCallback = camera12PreviewCallback
}
var previewFormat = ImageFormat.YUV_420_888
private var isShowCallbackView = true
var context: Context
constructor(context: Context) {
this.context = context.applicationContext
// 获取CameraManager 相机设备管理器
mCameraManager = this.context.getSystemService(Context.CAMERA_SERVICE) as CameraManager?
}
override fun openCamera(
textureView: TextureView,
cameraId: Int,
previewWidth: Int,
previewHeight: Int,
imageFormat: Int
) {
if (textureView == null) {
throw IllegalArgumentException("openCamera textureView is null")
}
this.textureView = textureView
mCameraId = cameraId
mPreviewSize = Size(previewWidth, previewHeight)
this.previewFormat = imageFormat
if (this.textureView!!.isAvailable)
openCamera(textureView.width, textureView.height)
else
this.textureView!!.surfaceTextureListener = textureListener
}
override fun openCamera(
cameraId: Int,
previewWidth: Int,
previewHeight: Int,
imageFormat: Int
) {
mCameraId = cameraId
mPreviewSize = Size(previewWidth, previewHeight)
this.previewFormat = imageFormat
openCamera(previewWidth, previewHeight)
}
// 不做实现
override fun startPreview() {
}
// 不做实现
override fun stopPreview() {
}
override fun releaseCamera() {
closeCamera()
}
/**
* 预览请求构建器, 用来构建"预览请求"(下面定义的)通过pipeline发送到Camera device
* 这是[ImageReader]的回调对象。 当静止图像准备保存时,将会调用“onImageAvailable”。
* 当加上mPreviewRequestBuilder!!.addTarget(mImageReader!!.surface) 代码后,会持续回调
*/
private val mOnImageAvailableListener = OnImageAvailableListener { reader ->
// 必须加image获取和image关闭方法,才不会导致预览卡死。
if (reader != null) {
val image = reader.acquireLatestImage()
if (!isShowCallbackView) {
image.close()
} else if (image != null) {
var byteArray: ByteArray? = null
var imageFormat = 0
if (previewFormat == ImageFormat.YV12 || previewFormat == ImageFormat.YUV_420_888) {
imageFormat = Const.previewImageFormatNV21
byteArray = ImageUtils.getBytesFromImageToNV21(image)
// byteArray = ImageUtils.NV21_mirror(byteArray!!, Const.previewWidth, Const.previewHeight)
} else {
if (image.planes.isNotEmpty()) {
val buffer = image.planes[0].buffer
val bytes = ByteArray(buffer.remaining())
buffer[bytes]
if (previewFormat != ImageFormat.RAW10) {
byteArray = bytes.clone()
} else {
imageFormat = Const.previewImageFormatRAW16
byteArray = ImageUtils.transformRAW10ToRAW16(
bytes.clone(),
mPreviewSize.width,
mPreviewSize.height
)
}
}
image.close()
}
if (camera12PreviewCallback != null && byteArray != null)
camera12PreviewCallback!!.onPreviewFrame(byteArray, imageFormat, this)
}
}
}
/*** [CameraDevice.StateCallback]打开指定摄像头回调[CameraDevice] */
private val mStateCallback: CameraDevice.StateCallback =
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
object : CameraDevice.StateCallback() {
override fun onOpened(@NonNull cameraDevice: CameraDevice) {
Log.d(
TAG,
"mStateCallback onOpened thread=${Thread.currentThread()} cameraid=$mCameraId"
)
mCameraDevice = cameraDevice
createCameraPreview()
}
override fun onDisconnected(@NonNull cameraDevice: CameraDevice) {
Log.d(TAG, "mStateCallback onDisconnected id =${cameraDevice.id}")
mCameraDevice = cameraDevice
releaseCamera()
}
override fun onError(@NonNull cameraDevice: CameraDevice, error: Int) {
Log.d(TAG, "mStateCallback onError id =${cameraDevice.id}, error=$error")
mCameraDevice = cameraDevice
releaseCamera()
}
}
/**
* TextureView 生命周期响应
*/
private val textureListener: SurfaceTextureListener = object : SurfaceTextureListener {
//创建
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
override fun onSurfaceTextureAvailable(surface: SurfaceTexture, width: Int, height: Int) {
openCamera(width, height)
}
//尺寸改变
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
override fun onSurfaceTextureSizeChanged(surface: SurfaceTexture, width: Int, height: Int) {
configureTransform(width, height)
}
//销毁
override fun onSurfaceTextureDestroyed(surface: SurfaceTexture): Boolean {
return false
}
//更新
override fun onSurfaceTextureUpdated(surface: SurfaceTexture) {}
}
/**
* 打开指定摄像头ID的相机
*/
@SuppressLint("MissingPermission")
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private fun openCamera(viewWidth: Int, viewHeight: Int) {
try {
// 最好在调用openCamera()之前调用,因为打开相机需要
startBackgroundThread()
// autoRatioTextureView()
configureTransform(viewWidth, viewHeight)
if (mImageReader == null) {
// 创建一个ImageReader对象,用于获取摄像头的图像数据,maxImages是ImageReader一次可以访问的最大图片数量
mImageReader = ImageReader.newInstance(
mPreviewSize.width,
mPreviewSize.height,
previewFormat,
maxImages
)
mImageReader!!.setOnImageAvailableListener(
mOnImageAvailableListener,
mBackgroundHandler
)
}
mCameraManager!!.openCamera(mCameraId.toString(), mStateCallback, mBackgroundHandler)
// printCameraInfo()
} catch (e: CameraAccessException) {
e.printStackTrace()
} catch (e1: Exception) {
e1.printStackTrace()
}
}
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private fun printCameraInfo() {
Log.d(TAG, "printCameraInfo openCamera current camera id=$mCameraId")
val ids = mCameraManager!!.cameraIdList
if (ids != null) {
for (id in ids.indices) {
Log.d(TAG, "openCamera support id[$id]=${ids[id]}")
}
}
val characteristics = mCameraManager!!.getCameraCharacteristics(mCameraId.toString() + "")
//检查是否支持闪光灯
val available = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE)
mFlashSupported = available ?: false
Log.d(TAG, "openCamera mFlashSupported=$mFlashSupported")
val map = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)
if (map != null) {
val previewSizes = map.getOutputSizes(SurfaceTexture::class.java)//获取预览尺寸
for (i in previewSizes.indices)
Log.d(TAG, "openCamera previewSizes[$i]=${previewSizes[i]}")
val captureSizes = map.getOutputSizes(ImageFormat.JPEG)//获取拍照尺寸
for (i in captureSizes.indices)
Log.d(TAG, "openCamera captureSizes[$i]=${captureSizes[i]}")
}
val cOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)//获取相机角度
Log.d(TAG, "openCamera 相机角度 cOrientation=${cOrientation}")
val cRect = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)//获取成像区域
Log.d(TAG, "openCamera 成像区域 cRect=${cRect}")
val cPixelSize =
characteristics.get(CameraCharacteristics.SENSOR_INFO_PIXEL_ARRAY_SIZE)//获取成像尺寸,同上
Log.d(TAG, "openCamera 成像尺寸 cPixelSize=${cPixelSize}")
//可用于判断是否支持人脸检测,以及支持到哪种程度
faceDetectModes =
characteristics[CameraCharacteristics.STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES] //支持的人脸检测模式
if (faceDetectModes != null) {
for (i in 0 until faceDetectModes!!.size)
Log.d(TAG, "openCamera 支持的人脸检测模式 faceDetectModes[$i]=${faceDetectModes!![i]}")
}
val maxFaceCount =
characteristics[CameraCharacteristics.STATISTICS_INFO_MAX_FACE_COUNT]!! //支持的最大检测人脸数量
Log.d(TAG, "openCamera 支持的最大检测人脸数量 maxFaceCount=${maxFaceCount}")
}
/**
* Configures the necessary [android.graphics.Matrix] transformation to `mTextureView`.
* This method should be called after the camera preview size is determined in
* setUpCameraOutputs and also the size of `mTextureView` is fixed.
* @param viewWidth The width of `mTextureView`
* @param viewHeight The height of `mTextureView`
*/
private fun configureTransform(viewWidth: Int, viewHeight: Int) {
if (this.textureView == null) {
return
}
val rotation =
(this.context.getSystemService(Context.WINDOW_SERVICE) as WindowManager?)!!.defaultDisplay.rotation
Log.i("TAG", "rotation=$rotation, camera id=$mCameraId")
val matrix = Matrix()
val viewRect = RectF(0f, 0f, viewWidth.toFloat(), viewHeight.toFloat())
val bufferRect = RectF(0f, 0f, mPreviewSize.height.toFloat(), mPreviewSize.width.toFloat())
val centerX = viewRect.centerX()
val centerY = viewRect.centerY()
if (Surface.ROTATION_90 == rotation || Surface.ROTATION_270 == rotation) {
bufferRect.offset(centerX - bufferRect.centerX(), centerY - bufferRect.centerY())
matrix.setRectToRect(viewRect, bufferRect, Matrix.ScaleToFit.FILL)
val scale = Math.max(
viewHeight.toFloat() / mPreviewSize.height,
viewWidth.toFloat() / mPreviewSize.width
)
matrix.postScale(scale, scale, centerX, centerY)
matrix.postRotate((90 * (rotation - 2)).toFloat(), centerX, centerY)
} else if (Surface.ROTATION_180 == rotation) {
matrix.postRotate(180f, centerX, centerY)
}
textureView!!.setTransform(matrix)
}
/**
* 创建预览对话
*/
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private fun createCameraPreview() {
if (this.context == null) {
return
}
try {
//创建预览请求构建器
mPreviewRequestBuilder =
mCameraDevice!!.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
//设置实时帧数据接收,不设置的话onImageAvailable不会回调数据,只有拍照时回调
mPreviewRequestBuilder!!.addTarget(mImageReader!!.surface)
var surface: Surface? = null
if (this.textureView != null) {
// 获取texture实例
val surfaceTexture: SurfaceTexture = textureView!!.surfaceTexture!!
//我们将默认缓冲区的大小配置为我们想要的相机预览的大小。
surfaceTexture.setDefaultBufferSize(mPreviewSize.width, mPreviewSize.height)
// 用来开始预览的输出surface
surface = Surface(surfaceTexture)
//将TextureView的Surface作为相机的预览显示输出
mPreviewRequestBuilder!!.addTarget(surface)
}
//设置人脸检测级别,设置为非关闭,才能检测人脸
mPreviewRequestBuilder!!.set(
CaptureRequest.STATISTICS_FACE_DETECT_MODE,
getFaceDetectMode()
)
//3A--->auto
mPreviewRequestBuilder!!.set(
CaptureRequest.CONTROL_MODE,
CameraMetadata.CONTROL_MODE_AUTO
)
//3A
mPreviewRequestBuilder!!.set(
CaptureRequest.CONTROL_AF_MODE,
CameraMetadata.CONTROL_AF_MODE_CONTINUOUS_VIDEO
)
mPreviewRequestBuilder!!.set(
CaptureRequest.CONTROL_AE_MODE,
CameraMetadata.CONTROL_AE_MODE_ON
)
mPreviewRequestBuilder!!.set(
CaptureRequest.CONTROL_AWB_MODE,
CameraMetadata.CONTROL_AWB_MODE_AUTO
)
// 在打开两个相机,且不用控件显示预览时,用这个方法会导致有一个相机无法正常关闭
if (Build.VERSION.SDK_INT >= 28) {
// use no deprecated function @{
val sessionType = SessionConfiguration.SESSION_REGULAR
val outputConfigArrays = ArrayList()
val outputConfiguration2 = OutputConfiguration(mImageReader!!.surface)
outputConfigArrays.add(outputConfiguration2)
if (surface != null) {
val outputConfiguration1 = OutputConfiguration(surface!!)
outputConfigArrays.add(outputConfiguration1)
}
val executor = this.context.mainExecutor
val captureStateCallback = object : CameraCaptureSession.StateCallback() {
override fun onConfigured(@NonNull cameraCaptureSession: CameraCaptureSession) {
// 相机关闭时, 直接返回
if (null == mCameraDevice) {
return
}
//会话准备就绪后,我们开始显示预览。
// 会话可行时, 将构建的会话赋给field
mCaptureSession = cameraCaptureSession
// 开始预览
mCaptureSession!!.setRepeatingRequest(
mPreviewRequestBuilder!!.build(),
mCaptureCallback,
mBackgroundHandler
)
//设置闪关灯模式
// setFlashMode(CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH)
}
override fun onConfigureFailed(@NonNull cameraCaptureSession: CameraCaptureSession) {
Log.d(TAG, "onConfigureFailed cameraCaptureSession=${cameraCaptureSession.device.id}")
}
}
val sessionConfiguration = SessionConfiguration(
sessionType,
outputConfigArrays,
executor,
captureStateCallback
)
//在这里,我们为相机预览创建一个CameraCaptureSession。
mCameraDevice!!.createCaptureSession(sessionConfiguration)
// @}
} else {
val listSurface = ArrayList()
listSurface.add(mImageReader!!.surface)
if (surface != null) {
listSurface.add(surface)
}
val stateCallback: CameraCaptureSession.StateCallback =
object : CameraCaptureSession.StateCallback() {
override fun onConfigured(@NonNull cameraCaptureSession: CameraCaptureSession) {
// 相机关闭时, 直接返回
if (null == mCameraDevice) {
return
}
//会话准备就绪后,我们开始显示预览。
// 会话可行时, 将构建的会话赋给field
mCaptureSession = cameraCaptureSession
//相机预览应该连续自动对焦。
// mPreviewRequestBuilder!!.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_VIDEO)
// 开始预览
mCaptureSession!!.setRepeatingRequest(
mPreviewRequestBuilder!!.build(),
mCaptureCallback,
mBackgroundHandler
)
//设置闪关灯模式
// setFlashMode(CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH)
}
override fun onConfigureFailed(@NonNull cameraCaptureSession: CameraCaptureSession) {
Log.d(TAG, "onConfigureFailed")
}
}
//在这里,我们为相机预览创建一个CameraCaptureSession。
mCameraDevice!!.createCaptureSession(listSurface, stateCallback, mBackgroundHandler)
}
} catch (e: CameraAccessException) {
e.printStackTrace()
}
}
/**
* 获取支持的最高人脸检测级别
* @return
*/
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
private fun getFaceDetectMode(): Int {
return if (faceDetectModes == null) {
CaptureRequest.STATISTICS_FACE_DETECT_MODE_FULL
} else {
faceDetectModes!!.get(faceDetectModes!!.size - 1)
}
}
/**
* Closes the current [CameraDevice].
* 关闭正在使用的相机
*/
private fun closeCamera() {
// 关闭拍照处理器
if (null != mImageReader) {
try {
mImageReader!!.setOnImageAvailableListener(null, null)
mImageReader!!.close()
mImageReader = null
} catch (e: Exception) {
Log.e(TAG, "closeCamera close imagereader error = " + e.message)
}
}
// 关闭捕获会话
if (null != mCaptureSession) {
try {
mCaptureSession!!.close()
mCaptureSession = null
} catch (cameraAccessException: CameraAccessException) {
Log.e(
TAG,
"closeCamera mCaptureSession close cameraAccessException = " + cameraAccessException.message
)
} catch (e1: Exception) {
Log.e(TAG, "closeCamera mCaptureSession close E1 = " + e1.message)
}
}
// 关闭当前相机
if (null != mCameraDevice) {
try {
mCameraDevice!!.close()
mCameraDevice = null
} catch (e: Exception) {
Log.e(TAG, "closeCamera mCameraDevice close E = " + e.message)
}
}
// 停止相机需要用的后台线程
stopBackgroundThread()
}
/**
* 初试化相机打开、拍照、预览的线程,并开始线程
*/
private fun startBackgroundThread() {
mBackgroundThread = HandlerThread("Camera Background")
mBackgroundThread!!.start()
mBackgroundHandler = Handler(mBackgroundThread!!.looper)
}
/**
* 停止相机打开、拍照、预览的线程
* */
private fun stopBackgroundThread() {
if (mBackgroundThread != null && mBackgroundHandler != null) {
mBackgroundHandler!!.removeCallbacksAndMessages(null)
mBackgroundHandler = null
mBackgroundThread!!.quitSafely()
mBackgroundThread = null
}
}
/**
* A [CameraCaptureSession.CaptureCallback] that handles events related to JPEG capture.
*/
private val mCaptureCallback: CaptureCallback = @RequiresApi(Build.VERSION_CODES.LOLLIPOP)
object : CaptureCallback() {
override fun onCaptureProgressed(
@NonNull session: CameraCaptureSession,
@NonNull request: CaptureRequest,
@NonNull partialResult: CaptureResult
) {
}
override fun onCaptureCompleted(
@NonNull session: CameraCaptureSession,
@NonNull request: CaptureRequest,
@NonNull result: TotalCaptureResult
) {
/*val faces = result.get(CaptureResult.STATISTICS_FACES)
Log.d(TAG, "人脸个数:" + faces?.size)
if (faces != null) {
for (f in faces.indices) {
Log.d(TAG, "人脸[$f]id:" + faces[f].id)
//人脸检测坐标基于相机成像画面尺寸以及坐标原点。此处进行比例换算
Log.d(TAG, "人脸[$f]bounds:" + faces[f].bounds)
Log.d(TAG, "人脸[$f]leftEyePosition:" + faces[f].leftEyePosition)
Log.d(TAG, "人脸[$f]rightEyePosition:" + faces[f].rightEyePosition)
Log.d(TAG, "人脸[$f]score:" + faces[f].score)
}
}*/
}
}
}
七、使用方法。
// 以下以在Activity类中调用为例
// 类实现回调数据监听接口:ICameraPreviewCallback
// 定义管理器变量
lateinit var cameraManager: ICameraManager
// activity onCreate()中初始化相机管理器
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) {
cameraManager = CameraApi1Manager()
} else {
cameraManager = CameraApi2Manager(this)
}
cameraManager.setPreviewCallback(this)
// activity onResume()中调用,其中previewFormat的值为ImageFormat.YUV_420_888格式等。
// 打开相机
if (/**显示预览图**/) {
cameraManager.openCamera(
textureView,
cameraId,
previewWidth,
previewHeight,
previewFormat)
} else {
// 不显示预览图,只需要回调数据
cameraManager.openCamera(cameraId, previewWidth, previewHeight, previewFormat)
}
// 开始相机预览
cameraManager.startPreview()
// activity onPause()中调用
cameraManager.stopPreview()
cameraManager.releaseCamera()
八、安卓Android Studio工程代码,见如下gitee仓库。
安卓使用Camera api 1 或 Camera api 2 实现相机操作
欢迎大家点赞收藏!