Binder源码解读 01(第一个binder的启动)

binder第一部分,我们从用户空间的service_manager.c 看起,看看第一个binder是如何启动运行的~

service_manager.c :: main

int main(int argc, char** argv)
{
    struct binder_state *bs; 
    union selinux_callback cb;
    char *driver;

    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder"; //拿到binder 驱动文件位置
    }

    bs = binder_open(driver, 128*1024); //
     ...
}

binder.c :: binder_open

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));//分配内存存储binder状态信息
    ...
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);//通过系统调用binder驱动的open方法,打开文件并把文件描述符存在bs中
    ...
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||//通过系统调用判断版本
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
            ...
    }

    bs->mapsize = mapsize;//存储binder驱动开辟的的空间大小,及上文申请的128*1024
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);//通过系统调用驱动mmap,申请和内核绑定的内存空间,把指针存在bs中
    ...
    return bs;
  ...
}

继续回到刚才的service_manager中

service_manager :: open

int main(int argc, char** argv)
{
    ...
    bs = binder_open(driver, 128*1024);
    ...
    if (binder_become_context_manager(bs)) {//这个方法也是通过系统调用将此binder设置为context_manager
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    ...//省略selinux相关的代码,暂时不看
    binder_loop(bs, svcmgr_handler);//进入binder_loop进行循环
    return 0;
}

进入loop循环

binder.c :: binder_loop

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr; //声明 binder_write_read 结构体 用于传输数据
    uint32_t readbuf[32]; // 开辟一块数据

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER; //写入BC_ENTER_LOOPER标记
    binder_write(bs, readbuf, sizeof(uint32_t)); //调用binder_write通知binder进入循环
  ...
}

binder.c :: binder_write

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len; 
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0; //将要读写的信息写入 binder_write_read 结构体
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //通过系统调用binder驱动的ioctl,将数据写入刚刚打开的binder
    ...
    return res;
}

对驱动通知完毕后,用户空间正式进入循环

binder.c :: binder_loop

void binder_loop(struct binder_state *bs, binder_handler func)
{
    ...
    for (;;) { //开始不断的循环
        bwr.read_size = sizeof(readbuf); //刚才用于写数据的数据用完了,现在可以二次利用~用于从binder底层读取同样大小的数据
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf; //设置用它接收数据

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //从打开的binder内核空间尝试读取数据
        ...
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); //把从内核空间读取到的数据进行解析并判断是否需要退出循环
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

最后看一下service_manager是如何解析从内核空间读取到的数据的

binder.c :: binder_parse

这个方法用于解析从内核读取到的信息,有多种情况,我们先简单看一下

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) { //遍历底层获取的整条信息
        uint32_t cmd = *(uint32_t *) ptr;//前几位存储的是cmd
        ptr += sizeof(uint32_t);//后移继续读
...
        switch(cmd) { //根据底层传来的不同指令执行对应的操作
        ...
    }
    return r;
}

下面分析一下各个指令

BR_NOOP | BR_TRANSACTION_COMPLETE

case BR_NOOP:
     break;
case BR_TRANSACTION_COMPLETE:
     break;

这两种情况直接返回默认值1,根据binder_loop中的代码可知,会继续循环从底层读取数据并继续解析

BR_INCREFS | BR_ACQUIRE | BR_RELEASE | BR_DECREFS

case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
     ptr += sizeof(struct binder_ptr_cookie);
     break;

指针后移越过存放binder_ptr_cookie的位置之后binder_loop继续循环读取

BR_TRANSACTION

case BR_TRANSACTION: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;//读取驱动层返回的binder_transaction_data
    if ((end - ptr) < sizeof(*txn)) { //判断是否是一个binder_transaction_data
        ALOGE("parse: txn too small!\n");
        return -1;
    }
    binder_dump_txn(txn); //打印出来
    if (func) {//如果binder_parse调用者提供了对应的回调
        ...
    }
    ptr += sizeof(*txn);//指针后移,继续循环读取
    break;
}

上面的描述并不详细,下面仔细分析一下。

首先是一个binder_transaction_data结构体,用于存储binder事务数据

binder_transaction_data
struct binder_transaction_data {
    union {
        __u32   handle; //命令事务的目标描述符
        binder_uintptr_t ptr;//返回事务的目标描述符
    } target;
    binder_uintptr_t    cookie; /* target object cookie */
    __u32       code;       /* transaction command */

    /* General information about the transaction. */
    __u32           flags;
    pid_t       sender_pid; //发送方pid
    uid_t       sender_euid; //发送方euid
    binder_size_t   data_size;  //发送数据的大小
    binder_size_t   offsets_size;   //偏移大小

    /* If this transaction is inline, the data immediately
     * follows here; otherwise, it ends with a pointer to
     * the data buffer.
     */
    union {
        struct { //事务数据
            /* transaction data */
            binder_uintptr_t    buffer;
            /* offsets from buffer to flat_binder_object structs */
            binder_uintptr_t    offsets;
        } ptr;
        __u8    buf[8];
    } data;
};

在我们从驱动层读取到了本次binder事务及携带的数据之后,使用binder_parse调用者提供的func回调进行处理。

unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;

bio_init(&reply, rdata, sizeof(rdata), 4); 
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
    binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}

首先看一下binder_io结构体:

binder_io
struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */
    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

用于存储binder传输中的数据

以及两个初始化binder_io的方法

//第一个方法用于从binder_transaction_data中直接拷贝数据
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
    bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
    bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
    bio->data_avail = txn->data_size;
    bio->offs_avail = txn->offsets_size / sizeof(size_t);
    bio->flags = BIO_F_SHARED;//标记SHARED的数据
}
//使用data初始化一个bio
void bio_init(struct binder_io *bio, void *data,
              size_t maxdata, size_t maxoffs)
{
    size_t n = maxoffs * sizeof(size_t);

    if (n > maxdata) {
        bio->flags = BIO_F_OVERFLOW;
        bio->data_avail = 0;
        bio->offs_avail = 0;
        return;
    }

    bio->data = bio->data0 = (char *) data + n;
    bio->offs = bio->offs0 = data;
    bio->data_avail = maxdata - n;
    bio->offs_avail = maxoffs;
    bio->flags = 0;
}

在对msg以及reply进行了初始化后,执行了调用方提供的方法func。跟踪一下,可知是service_manager.c 中的svcmgr_handler方法。对于这个方法我们暂时先不看,继续下一步。

if (txn->flags & TF_ONE_WAY) { //根据此事务时候有TF_ONE_WAY的标记位决定
    binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}

void binder_free_buffer(struct binder_state *bs,
                        binder_uintptr_t buffer_to_free)
{
    ...
    binder_write(bs, &data, sizeof(data));
}

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    ...
    binder_write(bs, &data, sizeof(data));
}

可以看到,最终都是通过binder_write向驱动层写入数据。

到这里为止,binder_parse 中对 BR_TRANSACTION 情况的判断结束了。我们接着看。

BR_REPLY

case BR_REPLY: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
    ...
    ptr += sizeof(*txn); //指针后移
    r = 0; 
    break;
}

在此情况下,binder_parse会返回0。这种情况下,binder_loop会中断循环。

BR_DEAD_BINDER | BR_FAILED_REPLY | BR_DEAD_REPLY

case BR_DEAD_BINDER: {
    struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;//从传来的数据中读取binder_death
    ptr += sizeof(binder_uintptr_t); //指针后移
    death->func(bs, death->ptr); //执行使用读取到的死亡回调
    break;
}
case BR_FAILED_REPLY:
    r = -1;
    break;
case BR_DEAD_REPLY:
    r = -1;
    break;

报错,binder_loop中断循环。

到这里,binder_parse方法我们就大致分析完了,同样,对用户层的service_manager的启动及执行也告于段落了。下面上一张时序图简单总结。


binder1.png

你可能感兴趣的:(Binder源码解读 01(第一个binder的启动))