以MediaServer为例,进程初始化如下:
1.创建进程中所有Service实例
2.注册service到service manager
3.创建线程池,等待处理binder事件
MediaServer处理原理如下图所示:
上图中,mediaserver的两个线程首先调用binder的ioctl方法并挂起等待binder唤醒,如图中步骤a1和b1所示。其次客户端通过BpXXX发送命令給binder,如步骤a2,b2所示,此时客服端线程将被挂起。binder设备收到请求后首先更新binder_write_read数据结构,然后唤醒挂起的线程,并调用BnXXX::onTransact()执行客户请求。服务端完成处理后将通知binder,binder唤醒客户端线程,从而得到最终结果。
以下为详细调用代码:
frameworks/base/media/mediaserver/Main_mediaserver.cpp
int main(int argc, char** argv) { sp<ProcessState> proc(ProcessState::self()); sp<IServiceManager> sm = defaultServiceManager(); LOGI("ServiceManager: %p", sm.get()); MediaPlayerService::instantiate(); //new MediaPlayerService并注册 CameraService::instantiate(); AudioPolicyService::instantiate(); ProcessState::self()->startThreadPool(); //创建线程并调用IPCThreadState::joinThreadPool(true) why?? IPCThreadState::self()->joinThreadPool(); //在主线程中调用IPCThreadState::joinThreadPool(true) }
疑点:startThreadPool和joinThreadPool完后有两个线程,主线程和工作线程,而且都在做消息循环,且参数isMain都是true。难道google怕一个线程工作量太多,所以搞两个线程来工作?网上有人测试过把最后一句屏蔽掉,也能正常工作。
frameworks/base/libs/binder/ProcessState.cpp
ProcessState::ProcessState() : mDriverFD(open_driver()) , mVMStart(MAP_FAILED) , mManagesContexts(false) , mBinderContextCheckFunc(NULL) , mBinderContextUserData(NULL) , mThreadPoolStarted(false) , mThreadPoolSeq(1) { ...} static int open_driver() { if (gSingleProcess) { return -1; } int fd = open("/dev/binder", O_RDWR); if (fd >= 0) { fcntl(fd, F_SETFD, FD_CLOEXEC); //这里设置为FD_CLOEXEC表示当程序执行exec函数时本fd将被系统自动关闭,表示不传递给exec创建的新进程 ?? int vers; status_t result = ioctl(fd, BINDER_VERSION, &vers); if (result == -1) { LOGE("Binder ioctl to obtain version failed: %s", strerror(errno)); close(fd); fd = -1; } if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) { LOGE("Binder driver protocol does not match user space protocol!"); close(fd); fd = -1; } #if defined(HAVE_ANDROID_OS) size_t maxThreads = 15; //最大线程数为15,此为线程池最大线程个数 result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); if (result == -1) { LOGE("Binder ioctl to set max threads failed: %s", strerror(errno)); } #endif return fd; } void ProcessState::startThreadPool() { AutoMutex _l(mLock); if (!mThreadPoolStarted) { mThreadPoolStarted = true; spawnPooledThread(true); //这里最终调用到IPCThreadState::self()->joinThreadPool(true) } } void ProcessState::spawnPooledThread(bool isMain) { if (mThreadPoolStarted) { int32_t s = android_atomic_add(1, &mThreadPoolSeq); char buf[32]; sprintf(buf, "Binder Thread #%d", s); sp<Thread> t = new PoolThread(isMain); t->run(buf); } } class PoolThread : public Thread { protected: virtual bool threadLoop() { IPCThreadState::self()->joinThreadPool(mIsMain); //这里最终调用到IPCThreadState::self()->joinThreadPool(mIsMain) return false; } const bool mIsMain; };
void IPCThreadState::joinThreadPool(bool isMain) { mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER); //为啥多个线程共享同一个Parcel mIn/mOut??? ... status_t result; do { int32_t cmd; ... // now get the next command to be processed, waiting if necessary result = talkWithDriver(); if (result >= NO_ERROR) { size_t IN = mIn.dataAvail(); if (IN < sizeof(int32_t)) continue; cmd = mIn.readInt32(); result = executeCommand(cmd); } ... // Let this thread exit the thread pool if it is no longer // needed and it is not the main process thread. if(result == TIMED_OUT && !isMain) { //非主ThreadPool如果超时,将退出循环 break; } } while (result != -ECONNREFUSED && result != -EBADF); mOut.writeInt32(BC_EXIT_LOOPER); //告诉binder线程退出 talkWithDriver(false); } status_t IPCThreadState::executeCommand(int32_t cmd) { BBinder* obj; RefBase::weakref_type* refs; status_t result = NO_ERROR; ... case BR_TRANSACTION: //最核心的命令 { binder_transaction_data tr; result = mIn.read(&tr, sizeof(tr)); ... Parcel reply; if (tr.target.ptr) { sp<BBinder> b((BBinder*)tr.cookie); const status_t error = b->transact(tr.code, buffer, &reply, tr.flags); //这里调用transact -> onTransact() if (error < NO_ERROR) reply.setError(error); } else { const status_t error = the_context_object->transact(tr.code, buffer, &reply, tr.flags); if (error < NO_ERROR) reply.setError(error); } if ((tr.flags & TF_ONE_WAY) == 0) { sendReply(reply, 0); } mCallingPid = origPid; mCallingUid = origUid; } break; case BR_SPAWN_LOOPER: mProcess->spawnPooledThread(false); //线程池中可用线程不足,创建新的线程加入到线程池 break; ...}
frameworks/base/lib/binder/Binder.cpp
status_t BBinder::transact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { data.setDataPosition(0); status_t err = NO_ERROR; switch (code) { case PING_TRANSACTION: reply->writeInt32(pingBinder()); break; default: err = onTransact(code, data, reply, flags); //调用到onTransact() break; } if (reply != NULL) { reply->setDataPosition(0); } return err; }
kernel/omap4/drivers/staging/android/Binder.c
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block) { ... retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); //如果此线程无事可做,则分配给整个proc使用 ... thread->looper |= BINDER_LOOPER_STATE_WAITING; //线程标志置位 if (wait_for_proc_work) proc->ready_threads++; //线程进入等待前,进程对应的可用工作线程数+1 mutex_unlock(&binder_lock); if (wait_for_proc_work) {... if (non_block) { //单向调用时,直接返回 if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); //挂起线程,直到本进程中有事务要做 } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));//挂起线程,直到本线程中有事务要做???如果此线程已挂起 } mutex_lock(&binder_lock); if (wait_for_proc_work) proc->ready_threads--; //进程可用线程减1 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; //本线程清除等待标志位 ... if (proc->requested_threads + proc->ready_threads == 0 && //如果尚未申请创建线程或者可用线程为0 proc->requested_threads_started < proc->max_threads && //且已创建的线程数小于最大线程数 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | //?? BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) //请求binder所在进程创建工作线程 return -EFAULT; } return 0;}
static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); }
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) {...
case BC_REGISTER_LOOPER: if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; //申请service进程创建的thread个数减1 proc->requested_threads_started++; //记录进程中可用的线程数 } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break;
case BC_ENTER_LOOPER: thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; ...
http://wenku.baidu.com/view/f13333d049649b6648d747b3.html Binder深入讲解
http://www.angryredplanet.com/~hackbod/openbinder/docs/html/BinderIPCMechanism.html Openbinder: binder IPC mechanism
http://www.cnblogs.com/innost/archive/2011/01/09/1931456.html Android深入浅出之Binder机制