// 定义能力接口
class IBuddy: public IInterface {
public:
enum {
BUDDY_GET_PID = IBinder::FIRST_CALL_TRANSACTION,
};
virtual int getPid(void ) = 0;
DECLARE_META_INTERFACE(Buddy);
};
IMPLEMENT_META_INTERFACE(Buddy, "Buddy");
// 定义接口proxy ,用于应用调用接口
class BpBuddy : public BpInterface {
public:
// 定义构造函数,注意参数 类型
BpBuddy(const sp& impl) : BpInterface(impl) { }
virtual void getPid(int32_t push_data) {
Parcel data, reply;
// Write RPC headers. (previously just the interface token)
data.writeInterfaceToken(IBuddy::getInterfaceDescriptor());
data.writeInt32(push_data);
remote()->transact(BUDDY_GET_PID, data, &reply);
int32_t res;
status_t status = reply.readInt32(&res);
return res;
}
}
// 接口的实现类
class BnBuddy : public BnInterface {
virtual status_t onTransact(uint32_t code,const Parcel& data,
Parcel* reply,uint32_t flags = 0);
};
status_t BnBuddy::onTransact(uint32_t code, const Parcel &data,
Parcel* reply,uint32_t flags) {
// 检测 discriptor 是否相同
CHECK_INTERFACE(IBuddy, data, reply);
switch(code) {
case BUDDY_GET_PID: {
int32_t pid = getPid(); // 调用getPid() 函数
reply->writeInt32(pid);
return NO_ERROR;
}
break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
class BuddyService : public BnBuddy {
virtual int32_t getPid() {// impl: system call, get process ID
return getpid();
}
};
int main(int argc, char **argv)
{
//regester service to service manager
defaultServiceManager()->addService(String16("BuddyService"), new BuddyService());
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
return 0;
}
sp sm =defaultServiceManager();
// find buddy service , and call function
sp binder =sm->getService(String16("Buddy"));
sp mBuddy =interface_cast(binder);
0.1 一个完整native service 所需要的组件(class)
0.2 注册service 到系统int main(int argc, char **argv)
{
//regester service to service manager
defaultServiceManager()->addService(String16("BuddyService"), new BuddyService());
//开启监听
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
return 0;
}
RTFSC , 源代码我们逐步分析:
第一步,首先调用 defaultServiceManager(),此函数调用返回了一个 IServiceManager 指针,
sp defaultServiceManager(){
// gDefaultServiceManager 在文件 /android/frameworks/native/libs/binder/Static.cpp 中定义
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
// 加锁 线程安全
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
// 这里是我们所需要关注的重点,这行代码这里发生了什么呢 ?
gDefaultServiceManager = interface_cast(
ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
// 这里是一个singleton, gProcess 也是在 /android/frameworks/native/libs/binder/Static.cpp 中定义的
sp ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
return gProcess;
}
sp ProcessState::getContextObject(const sp& caller)
{
return getStrongProxyForHandle(0);
}
sp ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp result;
AutoMutex _l(mLock);
// 查找 handle entry
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
// 这里我们返回的是一个 handle == 0 的BpBinder
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
template
inline sp interface_cast(const sp& obj)
{
return INTERFACE::asInterface(obj);
}
显然,这里直接调用的 IServiceManager::asInterface 那么函数asInterface 是在哪里声明和定义的呢,下面我们看看 IServiceManager 这个类的定义
// IServiceManager 声明
class IServiceManager : public IInterface
{
public:
DECLARE_META_INTERFACE(ServiceManager);
//。。。 省略不相关代码
};
// 定义
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
// 定义了一个 静态变量
// 声明了一个 asInterface 静态函数
// 声明了一个获取接口描述符的成员函数
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp asInterface( \
const android::sp& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
// 初始化了类的静态变量
const android::String16 I##INTERFACE::descriptor(NAME); \
// 静态函数返回的就是描述符静态变量
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
// asInterface 函数的实现 , 在IServiceManager 中这里的参数是一个 BpBinder
android::sp I##INTERFACE::asInterface( \
const android::sp& obj) \
{ \
android::sp intr; \
if (obj != NULL) { \
intr = static_cast( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
#define CHECK_INTERFACE(interface, data, reply) \
if (!data.checkInterface(this)) { return PERMISSION_DENIED; } \
template
inline sp interface_cast(const sp& obj)
{
return INTERFACE::asInterface(obj);
}
// android/frameworks/native/libs/binder/IServiceManager.cpp
virtual status_t addService(const String16& name, const sp& service, bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
// 调用BpBinder::transact
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
第二步:IPCThreadState::self()->joinThreadPool();
void IPCThreadState::joinThreadPool(bool isMain = true)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
//...............
status_t result;
// 循环接收请求数据
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
result = executeCommand(cmd);
//...............
}
return result;
}
第三步:ProcessState::self()->startThreadPool();
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
String8 ProcessState::makeBinderThreadName() {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
String8 name;
name.appendFormat("Binder_%X", s);
return name;
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp t = new PoolThread(isMain);
t->run(name.string());
}
}
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
0.3 通过service manager 获取服务
我们看一下下面这段代码sp sm =defaultServiceManager();
// find buddy service , and call function
sp binder =sm->getService(String16("Buddy"));
sp mBuddy =interface_cast(binder);
看到以上的代码,和 defaultServiceManager 获取 IServiceManager 的内容差不多,在这里不在赘述,关于 getService 及addService的实现过程,后面会讲到。
在前面 native service 例子中,我们已经看到,要将用户自定义的 native service ,添加到系统及从系统获取自定义service, 都和一个类分不开,IServiceManager,IServiceManager 是应用层实现binder通信的基础,首先,看一下 IServiceManager 类图,部分细节为画出,请参见native service 的类图。
virtual status_t BpServiceManager::addService(const String16& name, const sp& service, bool allowIsolated = false)
{
// Parce 在使用 binder 进行IPC时候,一个很重要的数据结构,在此不表,后文讲解
Parcel data, reply;
// 写入接口描述符
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
// 写入servcie name
data.writeString16(name);
// 写如service 的 实例, 这里很重要,后面的实例,IPC 全靠他了。
data.writeStrongBinder(service);
// 写入isolate 的
data.writeInt32(allowIsolated ? 1 : 0);
// 调用 BpBinder的 transact 函数,关于常量定义 enum {
// GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
// CHECK_SERVICE_TRANSACTION,
// ADD_SERVICE_TRANSACTION,
// LIST_SERVICES_TRANSACTION,
//};
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
{
// Once a binder has died, it will never come back to life.
// 构造函数初始化, mAlive == 0, mHandle == 0, flags == 0, default
if (mAlive) {
// 这里调用到了IPCThreadState
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
看看 IPCThreadState里的实现
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
// 常量定义,此定义在 /kernel/drivers/staging/android/binder.h中定义
//enum transaction_flags {
//TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
//TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
//TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
//TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
//};
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
// 将数据写入mOut中,同样在kernel source中定义 BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) { // reply != NULL;
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
writeTransactionData 函数,将请求的数据,打包到parcel中。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.handle = handle; // 0
tr.code = code; // ADD_SERVICE_TRANSACTION
tr.flags = binderFlags; // flags == 0|TF_ACCEPT_FDS;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {// but statusBuffer == NULL;
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = statusBuffer;
tr.offsets_size = 0;
tr.data.ptr.offsets = NULL;
} else {
return (mLastError = err);
}
// BC_TRANSACTION == cmd
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
打包的数据结构,binder_transaction_data在 /kernel/drivers/staging/android/binder.h中定义 ,这是binder IPC 中,几个关键的数据结构之一,在后面还会讲到binder_write_read 和 flat_binder_object,都在 内核的 binder.h里定义的
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
size_t handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
unsigned int flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
const void *buffer;
/* offsets from buffer to flat_binder_object structs */
const void *offsets;
} ptr;
uint8_t buf[8];
} data;
};
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult=NULL)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
// talkWithDriver 返回以后,读出返回的数据,在 mIn中
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
// 读取响应消息类型
cmd = mIn.readInt32();
switch (cmd) {
//........................
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
// 将响应数据填充到 parcel 的 reply 中
reply->ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer), tr.data_size,
reinterpret_cast(tr.data.ptr.offsets), tr.offsets_size/sizeof(size_t),
freeBuffer, this);
} else {
//.......................
}
} else {
//........................
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
status_t IPCThreadState::talkWithDriver(bool doReceive = true)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
// 数据结构 定义在 kernel中, /kernel/drivers/staging/android/binder.h
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
// mOut writeTransactionData 中写入的值
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
// 写数据同时等待读数据,阻塞
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
// 在IPCThreadState构造函数处初始化 open_driver
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
//reset mOut
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < (ssize_t)mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
// 将返回的数据写入 mIn 中
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
与binder driver 通信的时候,实际用到的数据结构
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses apropriately.
*/
// 利用整型与指针(实际上就是一个整型的数值,只不过其数值表示的是内存地址)之间的联系,
// 简单粗暴且高效的读写方式
struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
signed long read_size; /* bytes to read */
signed long read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
//...... 省略部分代码
case BR_TRANSACTION:
{
binder_transaction_data tr;
// 读取从 binder driver 获取到的数据 tr
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer), tr.data_size,
reinterpret_cast(tr.data.ptr.offsets), tr.offsets_size/sizeof(size_t),
freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
//..................
Parcel reply;
if (tr.target.ptr) {
// BBinder:: transact ==> BuddyService::onTransact
sp b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
} else {
const status_t error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
// server 发送响应
sendReply(reply, 0);
} else {
}
mCallingPid = origPid;
mCallingUid = origUid;
}
break;
//...... 省略部分代码
default:
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{
status_t err;
status_t statusBuffer;
err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);
if (err < NO_ERROR) return err;
return waitForResponse(NULL, NULL);
}
getService 从 serviceManager 获取服务
virtual sp BpServiceManager::getService(const String16& name) const
{
unsigned n;
for (n = 0; n < 5; n++){
sp svc = checkService(name);
if (svc != NULL) return svc;
ALOGI("Waiting for service %s...\n", String8(name).string());
sleep(1);
}
return NULL;
}
virtual sp checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
调用的是checkService ,而 里面同样的调用的是 remote()->transact 这里不在赘述其调用过程,这里我们要说的 reply.readStrongBinder();
1.2 client 与server 通信中一个重要的数据结构 parcel.
parcel 里面定义了很多数据接口,因为代码长度关系,我们选择几个 经常使用的,重要的 接口分析一下status_t Parcel::writeInterfaceToken(const String16& interface)
{
writeInt32(IPCThreadState::self()->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
// currently the interface identification token is just its name as a string
return writeString16(interface);
}
status_t Parcel::writeStrongBinder(const sp& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp& proc, const sp& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
// BnBuddy::localBinder {return this;}. BpBinder::remoteBinder{ return this;}.
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
ALOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = NULL;
obj.cookie = NULL;
}
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(const sp& binder, const flat_binder_object& flat, Parcel* out)
{
return out->writeObject(flat, false);
}
sp Parcel::readStrongBinder() const
{
sp val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
status_t unflatten_binder(const sp& proc, const Parcel& in, sp* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = static_cast(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
1.3 serviceManager impl 端
//android/frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char **argv)
{
// struct binder_state
//{
//int fd;
//void *mapped;
//unsigned mapsize;
//};
struct binder_state *bs;
///* the one magic object */
//#define BINDER_SERVICE_MANAGER ((void*) 0)
void *svcmgr = BINDER_SERVICE_MANAGER;
bs = binder_open(128*1024);
//初始化 binder_node 根节点
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
struct binder_state *binder_open(unsigned mapsize)
{
struct binder_state *bs;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return 0;
}
// 获取 fd
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
// 映射内存大小
bs->mapsize = mapsize;
// 映射内存
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
//..............
}
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
// 4X32 字节的缓冲区
unsigned readbuf[32];
// 不写数据,只是读数据,所以都为 0
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
// 告诉驱动程序开始进入循环监听,读取数据
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 解析读取到的数据
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
int binder_parse(struct binder_state *bs, struct binder_io *bio, uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
//size 是按 字节算的
uint32_t *end = ptr + (size / 4);
while (ptr < end) {
// mOut.writeInt32(cmd);
// mOut.write(&tr, sizeof(tr));
uint32_t cmd = *ptr++;
// ++ 以后 ptr 指向了 这样的一个数据结构 binder_transaction_data tr
switch(cmd) {
//...................................
case BR_TRANSACTION: {
// binder_txn 与 binder_transaction_data 但是,两个数据结构的内存结构相同,
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4]; // 64 × 4
//struct binder_io
//{
// char *data; /* pointer to read/write from */
// uint32_t *offs; /* array of offsets */
// uint32_t data_avail; /* bytes available in data buffer */
// uint32_t offs_avail; /* entries available in offsets array */
//
// char *data0; /* start of data buffer */
// uint32_t *offs0; /* start of offsets buffer */
// uint32_t flags;
// uint32_t unused;
//};
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
// call back svcmgr_handler
res = func(bs, txn, &msg, &reply);
// send reply ==>binder_write==>ioctl.
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
case BR_REPLY: {
struct binder_txn *txn = (void*) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += (sizeof(*txn) / sizeof(uint32_t));
r = 0;
break;
}
//............................
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
int allow_isolated;
if (txn->target != svcmgr_handle)
return -1;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
// s 就是 serviceManager 服务的 discreptor, svcmgr_id =
//uint16_t svcmgr_id[] = {
//'a','n','d','r','o','i','d','.','o','s','.',
//'I','S','e','r','v','i','c','e','M','a','n','a','g','e','r'
//};
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
// 需要查找的 service name
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len, txn->sender_euid);
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
1.4 完整的ipc过程