Binder作为Android进程通信的基础,在整个系统中扮演着十分重要的角色,理解binder的原理是能帮助我们更好的理解Android系统架构,如ActivityManagerService, WindowManagerService等运行机理。
在上一篇文章里(Android系统服务管家servicemanager启动过程详解),着重介绍了Android系统服务管家servicemanager的启动过程,那么,一个系统服务又是如何被注册到servicemanager中,从而被其他应用程序使用的了?这里,我们以cameraservice为例来详细阐述下系统服务注册的整个流程,在此过程中对binder在native层的结构做一个简要的窥探。
同样,Binder在框架层上由Java以及native(C++)两个层构成,Java层通过JNI来调用native层的接口。native层(对应的模块libbinder)总体上来说是对位于内核空间的binder驱动在用户空间的一种封装,对外提供了一系列进行IPC通信的接口与方法。Binder的简单架构如下图所示:
注意:文章分析基于Android N7.0
- Native: /android/frameworks/native/libs/binder/
- Java: /android/frameworks/base/core/java/android/os/
init进程启动后,会解析init.rc初始化脚本,其中有一行:
service cameraserver /system/bin/cameraserver
class main
user cameraserver
group media_rw audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct shell sdcard_rw system
找到cameraserver模块对应的代码main_cameraserver.cpp
,main函数入口:
int main(int argc __unused, char** argv __unused)
{
signal(SIGPIPE, SIG_IGN);
//获取ProcessState实例
sp proc(ProcessState::self());
// 获取ServiceManager
sp sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
// 发布cameraservice到系统中
CameraService::instantiate();
// 启动binder线程池
ProcessState::self()->startThreadPool();
// 加入到Binder线程池
IPCThreadState::self()->joinThreadPool();
}
// 该函数位于CameraService的父类BinderService中
static void instantiate() { publish(); }
static status_t publish(bool allowIsolated = false) {
sp sm(defaultServiceManager());
// 将CameraService注册到系统中,注册名为"media.camera"
return sm->addService(
String16(SERVICE::getServiceName()),
new SERVICE(), allowIsolated);
}
源码: /android/frameworks/av/camera/cameraserver/main_cameraserver.cpp
注册cameraservice过程,主要做以下几件事情:
Binder进程状态ProcessState
采用单例模式,确保只有一个实例被创建。
sp<ProcessState> ProcessState::self()
{
// 自动锁,确保始终只有一个实例
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
return gProcess;
}
在第一次调用时,gProcess
为NULL,因此会创建一个ProcessState
,并将其保存到gProcess
中:
ProcessState::ProcessState()
: mDriverFD(open_driver()) // 打开驱动
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// 调用binder_mmap,获取虚拟内存空间用于transanctions处理
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
}
}
在构造时,调用open_driver
打开binder驱动,获取相应的文件描述符:
static int open_driver()
{
//打开binder驱动,获取相应的文件描述符
int fd = open("/dev/binder", O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
// 获取binder版本号
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
// 设置binder线程最大个数为默认值(15)
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
....
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
binder初始化完成后,需要获取servicemanager接口,以便使用servicemanager提供的功能。我们首先来看一看IServiceManager
的定义,IServiceManager
类继承自IInterface
,其声明了四个纯虚拟函数getService
,checkService
,addService
,listService
,需要在子类中实现。
class IServiceManager : public IInterface
{
public:
// 声明接口,位于IInterface.h中
DECLARE_META_INTERFACE(ServiceManager);
// 根据名称来获取服务,如果尚未存在,可能会阻塞线程
virtual sp getService( const String16& name) const = 0;
// 根据名称来获取服务,不会阻塞线程
virtual sp checkService( const String16& name) const = 0;
// 注册服务
virtual status_t addService( const String16& name,
const sp& service,
bool allowIsolated = false) = 0;
// 列出所有存在的服务
virtual Vector listServices() = 0;
....
};
// 获取servicemanagerd对应的接口
sp defaultServiceManager();
....
};
来看一看,defaultServiceManager()
的具体实现。这里同样适用了单例模式,创建好的单个实例保存在gDefaultServiceManager
中。那么,究竟gDefaultServiceManager
引用的是哪个对象实例了?
sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
// 循环获取servicemanager,采用循环应该是考虑到servicemanager进程可能加载慢的问题
while (gDefaultServiceManager == NULL) {
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
首先,看一看ProcessState::self()->getContextObject(NULL)
这个函数的返回值究竟是什么;在ProcessState.cpp
中找到相应的函数:getContextObject()
:
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
//返回handle = 0即servicemanager代理的强引用
return getStrongProxyForHandle(0);
}
// 获取handle对应的BpBinder对象
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// 当前没有BpBinder,创建一个新的
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
// 查询servicemanager是否可用
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
// 创建handle对应的BpBinder对象,并返回
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
因此,ProcessState::self()->getContextObject(NULL)
函数返回的实际上是一个handle=0的BpBinder
对象,这样:
gDefaultServiceManager = interface_cast(
ProcessState::self()->getContextObject(NULL));
实际等价于:
gDefaultServiceManager = interface_cast(new BpBinder(0));
那么,这里的interface_cast
又是如何将一个BpBinder(0)
转换成IServiceManager
接口的了?interface_cast
是一个模板函数,定义在IInterface.h
中:
template<typename INTERFACE>
inline sp interface_cast(const sp& obj)
{
return INTERFACE::asInterface(obj);
}
将INTERFACE
替换成IServiceManager
变为:
inline sp interface_cast(const sp& obj)
{
return IServiceManager::asInterface(obj);
}
在IServiceManager
声明时,对asInterface()
方法进行了声明,而在另一个宏#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)
中则对该接口进行了定义,在IServiceManager.cpp
中有这么一行:
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
对上述宏进行展开:
const android::String16 IServiceManager::descriptor("android.os.IServiceManager");
const android::String16&
IServiceManager::getInterfaceDescriptor() const {
return IServiceManager::descriptor;
}
android::sp IServiceManager::asInterface(
const android::sp& obj)
{
android::sp intr;
if (obj != NULL) {
//BpBinder没有重定义queryLocalInterface函数,因此直接返回NULL
intr = static_cast(
obj->queryLocalInterface(
IServiceManager::descriptor).get());
if (intr == NULL) {
// 新建一个BpServiceManager,并返回
intr = new BpServiceManager(obj);
}
}
return intr;
}
IServiceManager::IServiceManager() { }
IServiceManager::~IServiceManager() { }
由上述可以知道,gDefaultServiceManager
实际上等价于:new BpServiceManager(new BpBinder(0))
。再来看看BpServiceManager
的定义(如下所示)。可以看到,由于BpInterface
继承了IServiceManager
,因此,BpServiceManager
就可以实现IServiceManager
提供的纯虚函数接口了:
class BpServiceManager : public BpInterface
{
public:
BpServiceManager(const sp& impl)
: BpInterface(impl)
{ }
virtual sp getService(const String16& name) const
{
unsigned n;
for (n = 0; n < 5; n++){
sp svc = checkService(name);
if (svc != NULL) return svc;
ALOGI("Waiting for service %s...\n", String8(name).string());
sleep(1);
}
return NULL;
}
virtual sp checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
// 这里remote()即 mRemote == new BpBinder(0)
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
virtual status_t addService(const String16& name, const sp& service,
bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
virtual Vector listServices()
{
Vector res;
int n = 0;
for (;;) {
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeInt32(n++);
status_t err = remote()->transact(LIST_SERVICES_TRANSACTION, data, &reply);
if (err != NO_ERROR)
break;
res.add(reply.readString16());
}
return res;
}
};
// IInterface.h中的声明
template
class BpInterface : public INTERFACE, public BpRefBase
{
public:
BpInterface(const sp& remote);
protected:
virtual IBinder* onAsBinder();
};
在BpServiceManager
构造过程中,会将BpBinder(0)
的引用赋值给BpInterface
的基类BpRefBase
的私有变量mRemote
,通过函数remote()
即可获取到该引用,从而在客户端与服务端之间建立起沟通的桥梁。
- /android/frameworks/native/libs/binder/IServiceManager.cpp
- /android/frameworks/native/include/binder/IInterface.h
接下来,就要准备将CameraService注册到系统中了;执行 CameraService::instantiate()
,该函数继承自模板类BinderService, 将SERVICE
替换为CameraService
:
class BinderService
{
public:
// 将CameraService发布到系统中
static status_t publish(bool allowIsolated = false) {
sp sm(defaultServiceManager());
return sm->addService(
String16(CameraService::getServiceName()),
new CameraService(), allowIsolated);
}
static void publishAndJoinThreadPool(bool allowIsolated = false) {
publish(allowIsolated);
joinThreadPool();
}
// 实例化CameraService,并将其注册到系统中
static void instantiate() { publish(); }
...
};
由上节分析知道,defaultServiceManager()
返回的实际上是BpServiceManager(new BpBinder(0))
引用,因而sm->addService()
实际调用的是BpServiceManager
的addService
函数:
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
// 这里remote()返回的是一个BpBinder(0)的引用
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
将远程调用的数据写入Parcel
数据包后,调用BpBinder(0)
的transact
函数:
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
// 利用binder线程类IPCThreadState发送RPC数据
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
线程类IPCThreadState
跟每个binder线程一一对应,每一个binder线程都有一个IPCThreadState
实例(利用gTLS
线程本地存储实现),其负责与binder驱动进行交互,将客户端的数据写入到驱动(保存在mOut
)或者将驱动的数据读取出来(保存在mIn
)。
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
...
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
调用IPCThreadState
的transact
函数:
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
// 数据没有错误,将其写入到驱动
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
// 同步调用,等待驱动返回结果
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
// 等待服务端响应结果
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
// 异步方式 ,直接返回结果
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
保持与binder驱动的数据交换,一旦服务端响应并返回结果,将其保存到mIn
中:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
// 将写缓冲区mOut中的数据写入驱动
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
....
// 服务端响应请求
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
....
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
不断与binder驱动通信,将mOut缓冲区的RPC数据写入到驱动中:
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
//binder读写数据结构体变量
binder_write_read bwr;
// 读缓冲区是否为空
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// 当前是否可写入数据到binder驱动
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// 可读数据
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
// 将bwr的读缓冲指向mIn的数据区
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0; //已写数据
bwr.read_consumed = 0; // 已读数据
status_t err;
do {
#if defined(__ANDROID__)
//与驱动交互,将mOut的数据写入到驱动,读取的数据保存到mIn
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
} while (err == -EINTR);
if (err >= NO_ERROR) {
// 将已写入驱动的数据从mOut中移除
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
// 将读取到的数据信息保存到mIn
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
将CameraService添加到servicemanager中后,接着CameraService会启动一个binder线程,并让线程进入待执行状态,等待远程客户端进程的RPC请求:
// 启动binder线程池
ProcessState::self()->startThreadPool();
// 加入到Binder线程池
IPCThreadState::self()->joinThreadPool();
如果主线程尚未启动,则启动一个新的线程作为线程池中的主线程:
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
// 启动一个线程池(主线程)
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
// 新建并运行一个线程,并以Binder_1,Binder_2等命名
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp<Thread> t = new PoolThread(isMain);
t->run(name.string());
}
}
将新建的线程加入到线程池中,并告知binder驱动,等待客户端的RPC请求:
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
// 设置线程状态
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
// 设置当前线程的调度策略
set_sched_policy(mMyThreadId, SP_FOREGROUND);
status_t result;
do {
processPendingDerefs();
// 不断执行从binder驱动获取到的命令,有需要时等待
result = getAndExecuteCommand();
....
// 非主线程超时,直接退出
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
// 退出线程池
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
不断执行从binder驱动获取到的指令,并执行相应命令:
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
// 与binder进行数据通信
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
// 进程中正在执行任务的线程数加一
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount++;
if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs == 0) {
mProcess->mStarvationStartTimeMs = uptimeMillis();
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
result = executeCommand(cmd);
....
// 保持线程处于Forground状态
set_sched_policy(mMyThreadId, SP_FOREGROUND);
}
return result;
}
根据binder驱动发送的指令请求(交易码BR_XXX),执行相应的任务,需要响应客户端请求时,则将结果返回给驱动(交易码BC_XXX):
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
....
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
obj->incStrong(mProcess.get());
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
....
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
if (curPrio > ANDROID_PRIORITY_NORMAL) {
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
Parcel reply;
status_t error;
if (tr.target.ptr) {
if (reinterpret_cast(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
....
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
至此,CameraService
就启动完成了。再来回顾下整个启动的流程(简图如下所示):
ProcessState
,打开binder驱动,并要求内核分配IPC所需的内存空间;BpBinder
引用,请求添加服务