Linux系统中进程间通信的方式有socket, named pipe, message queque, signal, share memory。Java系统中的进程间通信方式有socket,named pipe等,而android则通常采用Binder实现进程间的通信。Binder通信是通过linux的binder driver来实现的,Binder通信操作类似线程迁移(thread migration),两个进程间IPC看起来就象是一个进程进入另一个进程执行代码然后带着执行的结果返回。Binder通信是同步而不是异步。android采用这种方式在于Binder通信方式的高效率。Android中的Binder通信是基于Service与Client的,客户端和服务端直接通过Binder交互数据,打开Binder写入数据,通过Binder读取数据,通讯就可以完成了。Binder是一个很低层的概念,上面一层根本都看不到Binder。
在Android中,要完成某个操作,所需要做的就是请求某个有能力的服务对象去完成动作,且无需知道这个通讯是怎样工作的,以及服务在哪里。所以Andoid的IPC在本质上属于对象请求代理架构。具体架构如下图1所示:
图1 Andoid的IPC架构图
Android中的Binder通信是基于Service与Client的,所有需要IBinder通信的进程都必须实现一个IBinder接口。作为Binder通信的Android proxy客户端实现IBinder接口通常只是实现向服务端发送服务请求,而Android navtive 服务端实现IBinder接口则是真正实现服务功能(典型方法如onTransact)。
RefBase.h : 引用计数,定义类RefBase。
Parcel.h : 为在IPC中传输的数据定义容器,定义类Parcel
IBinder.h:Binder对象的抽象接口,定义类IBinder
Binder.h:BBinder对象的基本功能,定义类BBinder(服务端要继承的类)和BpRefBase
BpBinder.h:BpBinder的功能,定义类BpBinder(客户代理要包含的类)
IInterface.h:为抽象经过Binder的接口定义通用类,定义类IInterface,类模板BnInterface(服务端要继承的类模板),类模板BpInterface(客户代理要继承的类模板)
ProcessState.h:表示进程状态的类,定义类ProcessState
IPCThreadState.h:表示IPC线程的状态,定义类IPCThreadState
Android虚拟机启动之前系统会先启动service Manager进程,service Manager打开binder驱动,并通知binder kernel驱动程序这个进程将作为System Service Manager,然后该进程将进入一个循环,等待处理来自其他进程的数据。
源代码分析如下(Service_manager.c):
int main(int argc,char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
bs = binder_open(128*1024); //打开/dev/binder 驱动
if (binder_become_context_manager(bs)) {//注册为service manager in binderkernel
LOGE("cannot become contextmanager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
voidbinder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf,sizeof(unsigned));/*向binder驱动写入相应命令,表示进入监听循环*/
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd,BINDER_WRITE_READ, &bwr); /*等待来自其他进程的数据*/
if (res < 0) {
LOGE("binder_loop: ioctlfailed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, readbuf,bwr.read_consumed, func); /*解析获取的数据,根据返回的命令做相应处理,cmd==BR_TRANSACTION则调用函数func作相应处理 */
if (res == 0) {
LOGE("binder_loop: unexpectedreply?!\n");
break;
}
if (res < 0) {
LOGE("binder_loop: io error %d%s\n", res, strerror(errno));
break;
}
}
}
int svcmgr_handler(struct binder_state *bs,
struct binder_txn*txn,
struct binder_io*msg,
struct binder_io*reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
/*如果客户端进程获取Service时所使用的句柄与此不符,Service Manager将不接受Client的请求*/
if (txn->target != svcmgr_handle)
return -1;
// Equivalent toParcel::enforceInterface(), reading the RPC
// header with the strict mode policy maskand the interface name.
// Note that we ignore the strict_policyand don't propagate it
// further (since we do no outbound RPCsanyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s,sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id%s\n", str8(s));
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE: /*获取或检测一个服务*/
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len);
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:/*注册增加一个服务*/
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
if (do_add_service(bs, s, len, ptr,txn->sender_euid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply,si->name);
return 0;
}
return -1;
}
default:
LOGE("unknown code %d\n",txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
首先打开binder的驱动程序然后通过binder_become_context_manager函数调用ioctl告诉BinderKernel驱动程序这是一个服务管理进程,然后调用binder_loop等待来自其他进程的数据。BINDER_SERVICE_MANAGER是服务管理进程的句柄,它的定义是:
/* the one magic object */
#define BINDER_SERVICE_MANAGER ((void*)0)
如果客户端进程获取Service时所使用的句柄与此不符,Service Manager将不接受Client的请求。
用户使用ServiceManager的两个常用服务就是注册一个服务(addService)和获取一个服务(getService),通过使用defaultServiceManager得到一个远程ServiceManager的接口,通过这个接口我们可以调用addService函数将System service添加到Service Manager进程中,然后client可以通过getService获取到需要连接的目的Service的IBinder对象。
⒈IServiceManager
接口IServiceManager主要定义了ServiceManager服务函数,通常client和 service 端都要实现这些函数。
classIServiceManager : public IInterface
{
public:
DECLARE_META_INTERFACE(ServiceManager);
/*宏展开代码如下
* static constandroid::String16 descriptor;
* staticandroid::sp<IServiceManager>asInterface(
* constandroid::sp<android::IBinder>&obj);
* virtual constandroid::String16& getInterfaceDescriptor() const;
* IServiceManager();
* virtual ~IServiceManager ();
*/
/**
* Retrieve an existing service, blockingfor a few seconds
* if it doesn't yet exist.
*/
virtual sp<IBinder> getService( const String16& name)const = 0;
/**
* Retrieve an existing service,non-blocking.
*/
virtual sp<IBinder> checkService( const String16&name) const = 0;
/**
* Register a service.
*/
virtual status_t addService( const String16&name,
const sp<IBinder>& service) = 0;
/**
* Return list of all existing services.
*/
virtual Vector<String16> listServices() = 0;
……
};
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16descriptor; \
static android::sp<I##INTERFACE>asInterface( \
constandroid::sp<android::IBinder>& obj); \
virtual const android::String16&getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual~I##INTERFACE(); \
⒉BpBinder
/* BpBinder:继承了类IBinder ,站在client的角度实现binder通信所要完成的工作,例如transact 函数的实现则只是简单的将数据信息打包发给服务端处理;BpBinder是binder通信机制的proxy*/
class BpBinder :public IBinder
{
public:
BpBinder(int32_t handle);
inline int32_t handle() const {return mHandle; }
virtual const String16& getInterfaceDescriptor() const;
virtual bool isBinderAlive() const;
virtual status_t pingBinder();
virtual status_t dump(int fd, constVector<String16>& args);
virtual status_t transact( uint32_t code,
constParcel& data,
Parcel*reply,
uint32_t flags = 0);
virtual status_t linkToDeath(constsp<DeathRecipient>& recipient,
void*cookie = NULL,
uint32_tflags = 0);
virtual status_t unlinkToDeath( const wp<DeathRecipient>&recipient,
void*cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL);
virtual void attachObject( const void* objectID,
void*object,
void*cleanupCookie,
object_cleanup_func func);
virtual void* findObject(const void* objectID) const;
virtual void detachObject(constvoid* objectID);
virtual BpBinder* remoteBinder();
status_t setConstantData(const void* data, size_tsize);
void sendObituary();
……
protected:
virtual ~BpBinder();
virtual void onFirstRef();
virtual void onLastStrongRef(const void* id);
virtual bool onIncStrongAttempted(uint32_t flags,const void* id);
private:
const int32_t mHandle;
……
};
⒊类ProcessState
PocessState及其IPCThreadState处于IPC与内核打交道包装层。有关IPC的c++空间的实现都是从ProcessState这个对象完成的。ProcessState是Binder通讯的基础,ProcessState必须在Binder通讯之前建立。且客户端,服务端都必须建立。ProcessState放置在全局变量gProcess中,每个进程只有一个ProcessState对象,负责打开Binder设备驱动,建立线程池等。
class ProcessState : public virtualRefBase
{
public:
static sp<ProcessState> self();//打开/dev/binder设备, 映射binder设备fd到内存
static void setSingleProcess(boolsingleProcess);
void setContextObject(constsp<IBinder>& object);
sp<IBinder> getContextObject(constsp<IBinder>& caller);
void setContextObject(constsp<IBinder>& object,
const String16& name);
sp<IBinder> getContextObject(const String16& name,
const sp<IBinder>& caller);
bool supportsProcesses() const;
void startThreadPool();
……
sp<IBinder> getStrongProxyForHandle(int32_thandle);
wp<IBinder> getWeakProxyForHandle(int32_t handle);
void expungeHandle(int32_t handle,IBinder* binder);
……
void spawnPooledThread(bool isMain);
private:
friend class IPCThreadState;
ProcessState();
~ProcessState();
ProcessState(const ProcessState& o);
ProcessState& operator=(const ProcessState& o);
struct handle_entry {
IBinder* binder;
RefBase::weakref_type* refs;
}
handle_entry* lookupHandleLocked(int32_t handle);
int mDriverFD;
void* mVMStart;
……
};
⒋ IPCThreadState
IPCThreadState每个线程有一个,IPCThreadState实例登记在Linux线程程的上下文附属数据中,主要负责Binder数据读取,写入和请求处理框架。IPCThreadSate在构造的时候,获取进程的ProcessSate并记录在自己的成员变量mProcess中,通过mProcess可以获取到Binder的句柄。
classIPCThreadState
{
public:
static IPCThreadState* self();
sp<ProcessState> process();
……
void joinThreadPool(bool isMain =true);
// Stop the local process.
void stopProcess(bool immediate =true);
status_t transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
……
private:
IPCThreadState();
~IPCThreadState();
status_t talkWithDriver(booldoReceive=true);
status_t writeTransactionData(int32_t cmd,
uint32_tbinderFlags,
int32_t handle,
uint32_t code,
const Parcel& data,
status_t* statusBuffer);
status_t executeCommand(int32_tcommand);
……
const sp<ProcessState> mProcess;
const pid_t mMyThreadId;
Vector<BBinder*> mPendingStrongDerefs;
Vector<RefBase::weakref_type*>mPendingWeakDerefs;
Parcel mIn;
Parcel mOut;
status_t mLastError;
pid_t mCallingPid;
uid_t mCallingUid;
int32_t mStrictModePolicy;
int32_t mLastTransactionBinderFlags;
};
ProcessSate构造的时候,使用open_binder打开/driver/binder,并将句柄记录在mDriverFD,在ProcessState中并不使用这个句柄,真正使用这个Binder设备句柄的是IPCThreadState,所有关于Binder的操作放置在IPCThreadState中:
(1) 读取或写入:talkWithDriver对
ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)进行包装。
(2)请求处理:executeCommand(...)
(3)循环结构:joinThreadPool()
⒌ defaultServiceManager方法
下面是函数defaultServiceManager的源码实现,该函数最好会返回一个ServiceManager的远程接口。
sp<IServiceManager>defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
if (gDefaultServiceManager == NULL) {
/*根据返回的BpBinder创建BpServiceManager */
gDefaultServiceManager =interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL)); }
}
}
sp<IBinder>ProcessState::getContextObject(const sp<IBinder>& caller)
{
if (supportsProcesses()) {
/*根据handle获取与之对应服务的BpBinder*/
return getStrongProxyForHandle(0);
} else {
return getContextObject(String16("default"), caller);
}
}
sp<IBinder>ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs =b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness isto allow us to add a primary
// reference to the remote proxywhen this team doesn't have one
// but another team is sending thehandle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
根据android IPC架构可以知道实现一个具体的服务需要实现一个BnXXXService和一个BpXXXService。另外通过Service Manager提供的服务,利用addService接口注册自己的服务,当要使用该服务功能是则可以利用接口getService获取远程服务接口。
MediaService 服务的android IPC架构实现如下图:
MediaService服务端的初始化流程如下:
int main(int argc, char** argv)
{
/*创建ProcessState 对象,其中打开了binder设备*/
sp<ProcessState> proc(ProcessState::self());
/*获取ServiceManager远程服务接口*/
sp<IServiceManager> sm = defaultServiceManager();
LOGI("ServiceManager: %p", sm.get());
AudioFlinger::instantiate();
/*注册MediaService服务*/
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
ProcessState::self()->startThreadPool();/*启动一个服务循环框架线程*/
IPCThreadState::self()->joinThreadPool();/*在主线程中启动一个服务循环框架*/
}
voidIPCThreadState::joinThreadPool(bool isMain)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
// This thread may have been spawned by a thread that was in thebackground
// scheduling group, so first we will make sure it is in thedefault/foreground
// one to avoid performing an initial transaction in the background.
androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_DEFAULT);
status_t result;
do {
int32_t cmd;
// When we've cleared the incoming command queue, process any pendingderefs
//如果命令队列为空,清空所有未处理的derefs
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending =mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i <numPending; i++) {
RefBase::weakref_type* refs= mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
numPending =mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i <numPending; i++) {
BBinder* obj =mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
// now get the next command to be processed, waiting if necessary
result = talkWithDriver();/*从binder中读取数据,获取下一个命令进行处理*/
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t))continue;
cmd = mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processingtop-level Command: "
<<getReturnString(cmd) << endl;
}
/*根据命令进行相应处理,根据从binder中读到的数据可以解析出要求服务的BBinder对象,从而能够得到正确的服务*/
result = executeCommand(cmd);
}
// After executing the command, ensure that the thread is returned tothe
// default cgroup before rejoining the pool. The driver takes care of
// restoring the priority, but doesn't do anything with cgroups so we
// need to take care of that here in userspace. Note that we do make
// sure to go in the foreground after executing a transaction, but
// there are other callbacks into user code that could have changed
// our group so we want to make absolutely sure it is put back.
androidSetThreadSchedulingGroup(mMyThreadId,ANDROID_TGROUP_DEFAULT);
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);/*服务循环框架*/
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOLerr=%p\n",
(void*)pthread_self(), getpid(), (void*)result);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
http://blog.csdn.net/maxleng/
http://apps.hi.baidu.com/share/detail/5457535
http://www.cnblogs.com/innost/archive/2011/01/09/1931456.html
http://blog.csdn.net/woshishushangdezhu/archive/2009/08/04/4408293.aspx