使用Camera2采集相机预览数据,一般推荐格式为YUV,而目前主流的图像处理算法却只针对RGB的,所以一般需要先将YUV图片格式转为RGB,在Android中为Bitmap。
这里有两个方式,一种是先转为jpg,再通过jpg转为Bitmap:
YuvImage yuvImage = new YuvImage(nv21, ImageFormat.NV21, stride, height, null);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
yuvImage.compressToJpeg(new Rect(0, 0, stride, height), 100, byteArrayOutputStream);
Bitmap previewBitmap = null;
try {
byte[] jpgBytes = byteArrayOutputStream.toByteArray();
BitmapFactory.Options options = new BitmapFactory.Options();
options.inSampleSize = ImageUtil.getSampleSize(mPreferSize, stride, height);
// 原始预览数据生成的bitmap
final Bitmap originalBitmap = BitmapFactory.decodeByteArray(jpgBytes, 0, jpgBytes.length, options);
Matrix matrix = new Matrix();
// 预览相对于原数据可能有旋转
matrix.postRotate(90);
// 和预览画面相同的bitmap
previewBitmap = Bitmap.createBitmap(originalBitmap, 0, 0, originalBitmap.getWidth(), originalBitmap.getHeight(), matrix, false);
originalBitmap.recycle();
} catch (OutOfMemoryError e) {
e.printStackTrace();
}
这里有个缩小和旋转的过程,inSampleSize 为缩小倍数。
第二种方式是使用ScriptIntrinsicYuvToRGB和Allocation直接将YUV转为RGB:
package *****;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Matrix;
import android.renderscript.Allocation;
import android.renderscript.Element;
import android.renderscript.RenderScript;
import android.renderscript.ScriptIntrinsicYuvToRGB;
import android.renderscript.Type;
public class YuvToRgb {
private boolean mIsnitial= false;
private RenderScript renderScript;
private ScriptIntrinsicYuvToRGB scriptIntrinsicYuvToRGB;
private Type.Builder yuvType, rgbaType;
private Allocation in, out;
private int mWidth, mHeight;
private YuvToRgb() {}
private static class HOLDER {
private static YuvToRgb INSTANCE = new YuvToRgb();
}
public static YuvToRgb getInstance() {
return HOLDER.INSTANCE;
}
Bitmap convertYUVtoRGB(Context context, byte[] yuvData, int width, int height, int inSampleSize, int rotation) {
if(width <= 0 || height <= 0) {
return null;
}
init(context);
if(mWidth != width || mHeight != height) {
yuvType = new Type.Builder(renderScript, Element.U8(renderScript)).setX(yuvData.length);
in = Allocation.createTyped(renderScript, yuvType.create(), Allocation.USAGE_SCRIPT);
rgbaType = new Type.Builder(renderScript, Element.RGBA_8888(renderScript)).setX(width).setY(height);
out = Allocation.createTyped(renderScript, rgbaType.create(), Allocation.USAGE_SCRIPT);
mWidth = width;
mHeight = height;
}
in.copyFrom(yuvData);
scriptIntrinsicYuvToRGB.setInput(in);
scriptIntrinsicYuvToRGB.forEach(out);
Bitmap outBmp = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
out.copyTo(outBmp);
// 这里的空白bitmap尺寸需要与变换后的预期尺寸一致
int dstWidth = height/inSampleSize;
int dstHeight = width/inSampleSize;
Bitmap src = Bitmap.createBitmap(dstWidth, dstHeight, Bitmap.Config.ARGB_8888);
final Canvas canvas = new Canvas(src);
// 预览相对于原数据可能有旋转
Matrix matrix = getTransformationMatrix(width, height, dstWidth, dstHeight, rotation, false, false, true);
canvas.drawBitmap(outBmp, matrix, null);
return src;
}
private void init(Context context) {
if(!mIsnitial) {
renderScript = RenderScript.create(context);
scriptIntrinsicYuvToRGB = ScriptIntrinsicYuvToRGB.create(renderScript, Element.U8_4(renderScript));
mWidth = 0;
mHeight = 0;
mIsnitial = true;
}
}
/**
* Returns a transformation matrix from one reference frame into another.
* Handles cropping (if maintaining aspect ratio is desired) and rotation.
*
* @param srcWidth Width of source frame.
* @param srcHeight Height of source frame.
* @param dstWidth Width of destination frame.
* @param dstHeight Height of destination frame.
* @param applyRotation Amount of rotation to apply from one frame to another.
* Must be a multiple of 90.
* @param flipHorizontal should flip horizontally
* @param flipVertical should flip vertically
* @param maintainAspectRatio If true, will ensure that scaling in x and y remains constant,
* cropping the image if necessary.
* @return The transformation fulfilling the desired requirements.
*/
private static Matrix getTransformationMatrix(
final int srcWidth,
final int srcHeight,
final int dstWidth,
final int dstHeight,
final int applyRotation, boolean flipHorizontal, boolean flipVertical,
final boolean maintainAspectRatio) {
final Matrix matrix = new Matrix();
if (applyRotation != 0) {
if (applyRotation % 90 != 0) {
throw new IllegalArgumentException(String.format("Rotation of %d % 90 != 0", applyRotation));
}
// Translate so center of image is at origin.
matrix.postTranslate(-srcWidth / 2.0f, -srcHeight / 2.0f);
// Rotate around origin.
matrix.postRotate(applyRotation);
}
// Account for the already applied rotation, if any, and then determine how
// much scaling is needed for each axis.
final boolean transpose = (Math.abs(applyRotation) + 90) % 180 == 0;
final int inWidth = transpose ? srcHeight : srcWidth;
final int inHeight = transpose ? srcWidth : srcHeight;
int flipHorizontalFactor = flipHorizontal ? -1 : 1;
int flipVerticalFactor = flipVertical ? -1 : 1;
// Apply scaling if necessary.
if (inWidth != dstWidth || inHeight != dstHeight) {
final float scaleFactorX = flipHorizontalFactor * dstWidth / (float) inWidth;
final float scaleFactorY = flipVerticalFactor * dstHeight / (float) inHeight;
if (maintainAspectRatio) {
// Scale by minimum factor so that dst is filled completely while
// maintaining the aspect ratio. Some image may fall off the edge.
final float scaleFactor = Math.max(Math.abs(scaleFactorX), Math.abs(scaleFactorY));
matrix.postScale(scaleFactor, scaleFactor);
} else {
// Scale exactly to fill dst from src.
matrix.postScale(scaleFactorX, scaleFactorY);
}
}
if (applyRotation != 0) {
// Translate back from origin centered reference to destination frame.
float dx = dstWidth / 2.0f;
float dy = dstHeight / 2.0f;
matrix.postTranslate(dx, dy);
// postScale中心点如果出错,图像不会被变换
matrix.postScale(flipHorizontalFactor, flipVerticalFactor, dx, dy);
}
return matrix;
}
}
调用方式:
Bitmap previewBitmap = YuvToRgb.getInstance().convertYUVtoRGB(getActivity(), nv21, stride, height,
ImageUtil.getSampleSize(mPreferSize, stride, height), 90);
按理说,第二种方法少了一个转为jpg的过程,速度应该更快才对,实测却发现,第二种方式比第一种还慢一半,原因是,第一种方法中第一个bitmap就已经进行缩小,导致时间更少。
怎么说了,为啥还要研究第二种方法呢,因为这个过程要频繁调用,第一种方法存在一定的内存抖动。至于如何取舍优化,目前还没想好。