paint.setColor(resultBitmap != null ? resultColor : maskColor); canvas.drawRect(0, 0, width, frame.top, paint); canvas.drawRect(0, frame.top, frame.left, frame.bottom + 1, paint); canvas.drawRect(frame.right + 1, frame.top, width, frame.bottom + 1, paint); canvas.drawRect(0, frame.bottom + 1, width, height, paint);drawRect函数有五个参数,前四个参数构成两个坐标,组成一个矩形,后面一个画笔相关的。
paint.setColor(frameColor); canvas.drawRect(frame.left, frame.top, frame.right + 1, frame.top + 2, paint); canvas.drawRect(frame.left, frame.top + 2, frame.left + 2, frame.bottom - 1, paint); canvas.drawRect(frame.right - 1, frame.top, frame.right + 1, frame.bottom - 1, paint); canvas.drawRect(frame.left, frame.bottom - 1, frame.right + 1, frame.bottom + 1, paint);最中间的一条红色扫描线亦如此。
postInvalidateDelayed(ANIMATION_DELAY, frame.left, frame.top, frame.right, frame.bottom);这一句很关键,postInvalidateDelayed函数主要用来在非UI线程中刷新UI界面,每个ANIMATION_DELAY时间,刷新指定的范围。所以会不停得调用onDraw函数,并在界面上添加绿色的特征点。在刚开始看这份代码时,没明白是如何添加绿色的标记点的。现在再看了一遍,大致明白了。在camera聚焦获取图片后,再使用core中的库进行解析,会得出特征点的坐标,最后通过ViewfinderResultPointCallback类回调,将特征点添加到ViewfinderView中的ArrayList容器中。
public void foundPossibleResultPoint(ResultPoint point) { viewfinderView.addPossibleResultPoint(point); }这个函数特征点加入到possibleResultPoints中,由于对java不熟悉,不知道 “=” 的赋值对于List来说是浅拷贝,总在想possibleResultPoints对象没有被赋值,如何获取这些特征点了。后面才知道,这个“=”赋值,只是个浅拷贝。若要对这种预定义的集合实现深拷贝,可以使用构造函数,
public void addPossibleResultPoint(ResultPoint point) { List<ResultPoint> points = possibleResultPoints; synchronized (point) { points.add(point); int size = points.size(); if (size > MAX_RESULT_POINTS) { // trim it points.subList(0, size - MAX_RESULT_POINTS / 2).clear(); } } }
如果想深入的查看view刷新的过程,具体实现,查看下面这个链接,这个系列文章写的很详细。
AndroidBluetooth博客:View编程(2): invalidate()再探
ViewfinderView自定义了view,实现了一个简洁的扫描界面。这一篇记录我再看代码过程中对于Android Camera 的理解。由于才开始写技术类博客,前两篇有很多不足之处,都是自己随性而写,估计大家很难对我写的有一个清晰的了解。这篇尝试改变下风格,争取好好的表达我的浅薄理解,也让大家能够看懂。
<uses-permission android:name="android.permission.CAMERA"/> <uses-feature android:name="android.hardware.camera"/>如下是一个很简单的camera示例,简单到只能取景,即打开相机,将景象显示在屏幕上,仅此而已。
import java.io.IOException; import android.app.Activity; import android.hardware.Camera; import android.os.Bundle; import android.view.SurfaceHolder; import android.view.SurfaceView; public class CameraTestActivity extends Activity implements SurfaceHolder.Callback { private SurfaceHolder surfaceHolder; private Camera camera; /** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); SurfaceView surfaceView = (SurfaceView) findViewById(R.id.preview_view); surfaceHolder = surfaceView.getHolder(); surfaceHolder.addCallback(this); surfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); } @Override public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) { // TODO Auto-generated method stub } @Override public void surfaceCreated(SurfaceHolder arg0) { // TODO Auto-generated method stub camera = Camera.open(); Camera.Parameters parameters = camera.getParameters(); parameters.setPreviewSize(480, 320); // 设置 camera.setParameters(parameters); try { camera.setPreviewDisplay(surfaceHolder); } catch (IOException e) { System.out.println(e.getMessage()); } camera.startPreview(); } @Override public void surfaceDestroyed(SurfaceHolder arg0) { // TODO Auto-generated method stub if (camera != null) { camera.stopPreview(); } camera.release(); camera = null; } }其中的R.id.preview_view如下:
<SurfaceView android:id="@+id/preview_view" android:layout_width="fill_parent" android:layout_height="fill_parent" />
android:screenOrientation="landscape"将landscape该为portrait,结果却很意外,屏幕是竖着显示了,但是取景后的内容与显示却是横竖相反的,手机竖着取景,显示的却是横着的。不可以简单的通过调整这个参数值来改变方向。后面调用下面这个函数,重新设置了预览照片的显示方向。
camera.setDisplayOrientation(90);调整显示方向后,取景终于正常了。但是在后面预览拍照结果时,发现这都是假象,相机底层取景还是横屏的,只是在预览时进行了方向调整,这样还存在一个显示照片拉伸的问题。这个没有深入查看了。
<uses-permission android:name="android.permission.CAMERA" /> <uses-feature android:name="android.hardware.camera" /> <uses-feature android:name="android.hardware.camera.autofocus" />整个代码:
import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Timer; import java.util.TimerTask; import android.app.Activity; import android.content.Context; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.ImageFormat; import android.graphics.Rect; import android.graphics.YuvImage; import android.hardware.Camera; import android.os.Bundle; import android.util.Log; import android.view.Display; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.WindowManager; import android.widget.ImageView; public class CameraTestActivity extends Activity implements SurfaceHolder.Callback { private static String TAG = CameraTestActivity.class.getSimpleName(); private SurfaceHolder surfaceHolder; private Camera camera; private ImageView imageView; private Timer mTimer; private TimerTask mTimerTask; private Camera.AutoFocusCallback mAutoFocusCallBack; private Camera.PreviewCallback previewCallback; /** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); SurfaceView surfaceView = (SurfaceView) findViewById(R.id.preview_view); imageView = (ImageView) findViewById(R.id.image_view); surfaceHolder = surfaceView.getHolder(); surfaceHolder.addCallback(this); surfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); mAutoFocusCallBack = new Camera.AutoFocusCallback() { @Override public void onAutoFocus(boolean success, Camera camera) { if (success) { // isAutoFocus = true; camera.setOneShotPreviewCallback(previewCallback); Log.d(TAG, "onAutoFocus success"); } } }; previewCallback = new Camera.PreviewCallback() { @Override public void onPreviewFrame(byte[] data, Camera arg1) { if (data != null) { Camera.Parameters parameters = camera.getParameters(); int imageFormat = parameters.getPreviewFormat(); Log.i("map", "Image Format: " + imageFormat); Log.i("CameraPreviewCallback", "data length:" + data.length); if (imageFormat == ImageFormat.NV21) { // get full picture Bitmap image = null; int w = parameters.getPreviewSize().width; int h = parameters.getPreviewSize().height; Rect rect = new Rect(0, 0, w, h); YuvImage img = new YuvImage(data, ImageFormat.NV21, w, h, null); ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (img.compressToJpeg(rect, 100, baos)) { image = BitmapFactory.decodeByteArray(baos.toByteArray(), 0, baos.size()); imageView.setImageBitmap(image); } } } } }; mTimer = new Timer(); mTimerTask = new CameraTimerTask(); mTimer.schedule(mTimerTask, 0, 500); } @Override public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) { // TODO Auto-generated method stub } @Override public void surfaceCreated(SurfaceHolder arg0) { // TODO Auto-generated method stub initCamera(); } @Override public void surfaceDestroyed(SurfaceHolder arg0) { // TODO Auto-generated method stub if (camera != null) { camera.stopPreview(); camera.release(); camera = null; } previewCallback = null; mAutoFocusCallBack = null; } public void initCamera() { camera = Camera.open(); Camera.Parameters parameters = camera.getParameters(); WindowManager wm = (WindowManager) getSystemService(Context.WINDOW_SERVICE); // 获取当前屏幕管理器对象 Display display = wm.getDefaultDisplay(); // 获取屏幕信息的描述类 parameters.setPreviewSize(display.getWidth(), display.getHeight()); camera.setParameters(parameters); try { camera.setPreviewDisplay(surfaceHolder); } catch (IOException e) { System.out.println(e.getMessage()); } camera.startPreview(); } class CameraTimerTask extends TimerTask { @Override public void run() { if (camera != null) { camera.autoFocus(mAutoFocusCallBack); } } } }与上一篇的简单预览相比,这篇增加了两个内容,一个是自动聚焦,一个是拍照。代码看上去很简单,没多少内容。但不亲自测试下,还会发现不少。
Bitmap bitmap = BitmapFactory.decodeByteArray(data, 0, data.length);这行代码返回的总是null,即bitmap没有成功生成。对这些代码本来就是拿来用,功能实现了,就行,对这些都只是简单的了解,当遇到bug后并百思不得其解。后来在网上几经查找发现原来是BitmapFactory.decodeByteArray只支持一定的格式,camara支持的previewformat格式为NV21,所以在获得bitmap时,需要进行转换。通过YuvImage类来转换成JPEG格式,再显示出来。 具体讨论,请点这里。
CameraManager.get().requestPreviewFrame(decodeThread.getHandler(), R.id.decode);//实现拍照 CameraManager.get().requestAutoFocus(this, R.id.auto_focus);//实现聚焦首先实现拍照,再是实现聚焦,并且重载的聚焦回调函数是隔一段时间再次发出聚焦的请求,实现不断的聚焦。
public void onAutoFocus(boolean success, Camera camera) { if (autoFocusHandler != null) { Message message = autoFocusHandler.obtainMessage(autoFocusMessage, success); // Simulate continuous autofocus by sending a focus request every // AUTOFOCUS_INTERVAL_MS milliseconds. //Log.d(TAG, "Got auto-focus callback; requesting another"); autoFocusHandler.sendMessageDelayed(message, AUTOFOCUS_INTERVAL_MS); autoFocusHandler = null; } else { Log.d(TAG, "Got auto-focus callback, but no handler for it"); } }聚焦于拍照之前没有先后的逻辑关系,聚焦为了拍照更清晰。这样,关于camera取景聚焦拍照的简单过程并如上了。
private final Button.OnClickListener addCardListener = new TextView.OnClickListener() { @Override public void onClick(View v) { //在此实现button点击后的操作 } };如上的代码实现了点击监听,通过回调函数,当有点击操作时,并执行onClick函数。这就是一个简单的回调函数的使用。
System.out.println(TAG + "The worker thread id = " + Thread.currentThread().getId()); //判断线程ID最后运行的结果:
01-12 02:41:12.594: I/System.out(655): CaptureActivity The main thread id = 1 01-12 02:41:14.605: I/System.out(655): CaptureActivityHandler The handler thread id = 1 01-12 02:41:12.946: I/System.out(655): DecodeThread The worker thread id = 13 01-12 02:41:13.094: I/System.out(655): DecodeHandler The handler thread id = 13
由此可见,这两个handler都分别属于他们的thread。但在创建这两个handler时,有很大的差别。CaptureActivityHandler的创建只是简简单单的new了,没有其他辅助。这就是main activity在创建时,系统默认为它创建一个looper,负责管理该线程的消息循环,取送消息等,不需要额外指定。但对于自己创建的Thread,系统默认是没有为其创建looper的,需要自己为它创建消息循环。
先看下DecodeThread的代码 : 去掉了跟理解线程不相关的代码。
final class DecodeThread extends Thread { public static String TAG = DecodeThread.class.getSimpleName(); private final CaptureActivity activity; private Handler handler; private final CountDownLatch handlerInitLatch;//到计数的锁 DecodeThread(CaptureActivity activity, Vector<BarcodeFormat> decodeFormats, String characterSet, ResultPointCallback resultPointCallback) { this.activity = activity; handlerInitLatch = new CountDownLatch(1);//从1开始到计数 } Handler getHandler() { try { handlerInitLatch.await();//阻塞先等handler被初始化了才能返回结果。改计数锁即等countdown-->0。 } catch (InterruptedException ie) { // continue? } return handler; } @Override public void run() { Looper.prepare(); handler = new DecodeHandler(activity, hints); handlerInitLatch.countDown();//启动到计数,countdown-1 变成0; System.out.println(TAG + "The worker thread id = " + Thread.currentThread().getId()); //判断线程ID Looper.loop(); }
Message message = autoFocusHandler.obtainMessage(autoFocusMessage, success); autoFocusHandler.sendMessageDelayed(message, AUTOFOCUS_INTERVAL_MS);这样创建了消息,隔断时间发送。
关于Android的消息处理机制,这里有篇更好的文章,请点这里。还有这一篇介绍looper的。这两篇说的更透彻,实用。这还发现一篇巨作。
// For now, merely tries to distinguish ISO-8859-1, UTF-8 and Shift_JIS, // which should be by far the most common encodings. ISO-8859-1 // should not have bytes in the 0x80 - 0x9F range, while Shift_JIS // uses this as a first byte of a two-byte character. If we see this // followed by a valid second byte in Shift_JIS, assume it is Shift_JIS. // If we see something else in that second byte, we'll make the risky guess // that it's UTF-8.这部分是代码中对编码方式猜测的基本方法。缺少对GB2312的猜测。GB2312使用两个字节来进行编码。第一个字节的范围在(0xB0,0xF7),紧接着的第二个字节的范围在(0xA0,0xF7)。根据GB2312的这个编码规则,可以进行一个简单的判断,解决扫描GB2312编码的qr图乱码问题。
for (int i = 0; i < length; i++) { int value = bytes[i] & 0xFF; if (value > 0x7F)// 如果大于127,则可能是GB2312,就开始判断该字节,和下一个字节 { if (value > 0xB0 && value <= 0xF7)// 第一个字节再此范围内,则开始判断第二个自己 { int value2 = bytes[i + 1] & 0xFF; if (value2 > 0xA0 && value2 <= 0xF7) { return true; } } } }以上是一个简单的判断,通过加入到guesscoding中后可以正确的识别出GB2312编码的qr图。但感觉这样的方法还是比较低效率的,没有真正融合到zxing的源码中。需要再一步的思考。
byte[] rotatedData = new byte[data.length]; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) rotatedData[x * height + height - y - 1] = data[x + y * width]; }这样就解决了ResultPoint点标记的不正确的问题,当然除了设置方向,旋转数据,还需要调整下view的布局,使扫描框看上去更协调。