Service组件的启动过程

注册 Service 组件

// FregService Demo
class FregService : public BnFregService
{
public:
    FregService()
        : mVal(99)
    {
        printf(" %s X ", __func__);
    }

    virtual ~FregService()
    {
        printf(" %s X ", __func__);
    }

    static void instantiate()
    {
        defaultServiceManager()->addService(String16(FREG_SERVICE), new FregService());
    }

    int32_t getVal()
    {
        return mVal;
    }

    void setVal(int32_t val)
    {
        mVal = val;
    }

private:
    int32_t mVal;
};

defaultServiceManager 已经了解, 返回的是 IServiceManager 的 BpServiceManager.

我们接着看 BpServiceManager 的 addService 的实现:

class BpServiceManager : public BpInterface
{
public:
    explicit BpServiceManager(const sp& impl)
        : BpInterface(impl)
    {
    }

    virtual sp getService(const String16& name) const
    {
        unsigned n;
        for (n = 0; n < 5; n++){
            if (n > 0) {
                if (!strcmp(ProcessState::self()->getDriverName().c_str(), "/dev/vndbinder")) {
                    ALOGI("Waiting for vendor service %s...", String8(name).string());
                    CallStack stack(LOG_TAG);
                } else {
                    ALOGI("Waiting for service %s...", String8(name).string());
                }
                sleep(1);
            }
            sp svc = checkService(name);
            if (svc != NULL) return svc;
        }
        return NULL;
    }

    virtual sp checkService( const String16& name) const
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
        return reply.readStrongBinder();
    }

    virtual status_t addService(const String16& name, const sp& service,
            bool allowIsolated)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        data.writeStrongBinder(service);
        data.writeInt32(allowIsolated ? 1 : 0);
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }

    virtual Vector listServices()
    {
        Vector res;
        int n = 0;

        for (;;) {
            Parcel data, reply;
            data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
            data.writeInt32(n++);
            status_t err = remote()->transact(LIST_SERVICES_TRANSACTION, data, &reply);
            if (err != NO_ERROR)
                break;
            res.add(reply.readString16());
        }
        return res;
    }
};

Client 和 Service 的通信有如下五个步骤:

  • Client 将通信数据封装为 Parce 对象.
  • Client 发送 BC_TRANSACTION 到 Driver. Driver 根据协议内容找到 目标 Service 后,会向 Client 发送 BR_TRANSACTION_COMPLETE,表示进程间通信请求已被接受.然后返回用户空间处理该命令.
  • Driver 在向 Client 进程发送 BR_TRANSACTION_COMPLETE 的同时,会向 Server 进程发送 BR_TRANSACTION 返回协议,请求 Server 进程处理请求.
  • Server 进程 根据请求进程处理,然后发送 BC_REPLY 到 Driver ,Driver 会根据协议内容找到目标 Client 进程,然后向 Server 进程发送一个 BR_TRANSACTION_COMPLETE,表示返回的进程通信结果已经收到.
  • 同时,Driver 会向 Client 进程发送 BR_REPLY,表示本次通信的返回结果.
    BR_TRANSACTION_COMPLETE 的存在意义:
    让 进程发送 BC_TRANSACTION 或 BR_REPLY 后,可以通过 BR_TRANSACTION_COMPLETE 返回用户空间,增加并发处理能力.

addService---封装进程间通信数据

  • writeInterfaceToken

每一个通信请求数据首先调用 writeInterfaceToken 写入 通信头.

// Write RPC headers.  (previously just the interface token)
status_t Parcel::writeInterfaceToken(const String16& interface)
{
    writeInt32(IPCThreadState::self()->getStrictModePolicy() |
               STRICT_MODE_PENALTY_GATHER);
    // currently the interface identification token is just its name as a string
    return writeString16(interface);  // 接口描述符.
}

Strict Mode Policy:

在 Android Application 程序中,我们可以为线程定义一些 Strict Mode Policy.如果线程在运行中违反这些 Policy,系统就会发出警告.这样开发人员就方便调试应用程序,并且可以进程优化.例如:禁止在 应用程序的 UI 线程中读写磁盘.因为读写磁盘比较耗时,影响用户体验.这是我们就可以为应用程序的UI线程定义一个不允许读写磁盘的 Strict Mode Policy.当开发人员在UI线程中访问磁盘时,系统就会发出警告,这样方便开发人员进行优化.

Client 请求 Server 执行某一操作,其实就是要求 Server 线程代替它来指向某一操作,因此,Binder 会尽量将 Server 线程模拟成 Client 线程来执行这个操作,这就要求 Client 线程的某些属性设置到 Server 线程中,Strict Mode Policy 就是其中的一个属性.

  • writeStrongBinder
status_t Parcel::writeStrongBinder(const sp& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp& /*proc*/,
    const wp& binder, Parcel* out)
{
    flat_binder_object obj;

    // Server 线程优先级不能低于 0x7f; FLAT_BINDER_FLAG_ACCEPTS_FDS表示通信数据可以包含文件描述符.
    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    
    if (binder != NULL) { // binder 就是 FregService, 继承自 BBinder 类.
        sp real = binder.promote();
        if (real != NULL) {
            IBinder *local = real->localBinder();  // 返回 BBinder 实例, 不为 NULL.
            if (!local) {
                BpBinder *proxy = real->remoteBinder();
                if (proxy == NULL) {
                    ALOGE("null proxy");
                }
                const int32_t handle = proxy ? proxy->handle() : 0;
                obj.type = BINDER_TYPE_WEAK_HANDLE;
                obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
                obj.handle = handle;
                obj.cookie = 0;
            } else {
                obj.type = BINDER_TYPE_WEAK_BINDER;
                obj.binder = reinterpret_cast(binder.get_refs());
                obj.cookie = reinterpret_cast(binder.unsafe_get());
            }
            return finish_flatten_binder(real, obj, out);
        }

        // XXX How to deal?  In order to flatten the given binder,
        // we need to probe it for information, which requires a primary
        // reference...  but we don't have one.
        //
        // The OpenBinder implementation uses a dynamic_cast<> here,
        // but we can't do that with the different reference counting
        // implementation we are using.
        ALOGE("Unable to unflatten Binder weak reference!");
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = 0;
        obj.cookie = 0;
        return finish_flatten_binder(NULL, obj, out);

    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = 0;
        obj.cookie = 0;
        return finish_flatten_binder(NULL, obj, out);
    }
}

inline static status_t finish_flatten_binder(
    const sp& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

/*
 * mData: 数据缓冲区,里边可能包含整数,字符串或者 Binder 对象,即 flat_binder_object.
 * mObjects: 偏移数组,保存了在数据缓冲区 mData 中所有 Binder 对象的位置.
*/
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
    if (enoughData && enoughObjects) {
restart_write:
        *reinterpret_cast(mData+mDataPos) = val;

        // remember if it's a file descriptor
        if (val.type == BINDER_TYPE_FD) {
            if (!mAllowFds) {
                // fail before modifying our object index
                return FDS_NOT_ALLOWED;
            }
            mHasFds = mFdsKnown = true;
        }

        // Need to write meta-data?
        if (nullMetaData || val.binder != 0) {  // 数据中存在 Binder Object.
            mObjects[mObjectsSize] = mDataPos;  // mObjects 保存了 Binder Object 在数据缓冲区中的位置.
            acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize); // 为 Binder Object 增加引用计数.
            mObjectsSize++;
        }

        return finishWrite(sizeof(flat_binder_object)); // 更新 数据缓冲区 信息.
    }

    if (!enoughData) {
        const status_t err = growData(sizeof(val));
        if (err != NO_ERROR) return err;
    }
    if (!enoughObjects) {
        size_t newSize = ((mObjectsSize+2)*3)/2;
        if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
        binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
        if (objects == NULL) return NO_MEMORY;
        mObjects = objects;
        mObjectsCapacity = newSize;
    }

    goto restart_write;
}

addService---发送和处理 BC_TRANSACTION 命令协议

// IServiceManager.cpp
    virtual status_t addService(const String16& name, const sp& service,
            bool allowIsolated)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        data.writeStrongBinder(service);
        data.writeInt32(allowIsolated ? 1 : 0);
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }

// remote() 返回的就是 BpBinder. 这个追查并不复杂.

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}
// handle : 0 : 由于访问的是 ServiceManager
// code   : ADD_SERVICE_TRANSACTION
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();

    flags |= TF_ACCEPT_FDS;


    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        err = waitForResponse(NULL, NULL);
    }

    return err;
}

有几个关键函数: waitForResponse, talkWithDriver, executeCommand, transact, sendReply, writeTransactionData

  • writeTransactionData
/*
 * 将 数据封装为 binder_transaction_data, 然后写入 mOut 中, mOut 本身也是 Paecel 对象.
*/
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;                // BC_TRANSACTION, BC_REPLY
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = reinterpret_cast(statusBuffer);
        tr.offsets_size = 0;
        tr.data.ptr.offsets = 0;
    } else {
        return (mLastError = err);
    }

    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));

    return NO_ERROR;
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;

    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    // read buffer 为 空, 说明需要从 Driver 读取数据.

    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    /*
     * 如果 read buffer 不为空,说明 read buffer 中还有数据仍未处理,此时禁止 caller 请求读取下一个数据.
     * 也就是: doReceive == true, needRead == false,结果就是由于 bwr.read_size == 0 && bwr.write == 0 而返回.
    */
    /*
     * 1. doReceive == true, needRead == false;  不进行 io ,直接返回.
     * 2. doReceive == true, needRead == true; 
     * 3. doReceive == false, needRead == false; 这是写操作.因为 read_size == 0
     * 4. doReceive == false, needRead == true;  这也是写操作.
    */
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }

    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;

    ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr);

    {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }

        return NO_ERROR;
    }
}
  • binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }

    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        binder_inner_proc_lock(proc);
        if (!binder_worklist_empty_ilocked(&proc->todo))
            binder_wakeup_proc_ilocked(proc);
        binder_inner_proc_unlock(proc);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }

    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    struct binder_context *context = proc->context;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error.cmd == BR_OK) {
        int ret;

        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    
        switch (cmd) {
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr,
                       cmd == BC_REPLY, 0);
            break;
        } break;
        }
        *consumed = ptr - buffer;
    }
    return 0;
}

// binder_transaction
static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply,
                   binder_size_t extra_buffers_size)
{
    int ret;
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    binder_size_t *offp, *off_end, *off_start;
    binder_size_t off_min;
    u8 *sg_bufp, *sg_buf_end;
    struct binder_proc *target_proc = NULL;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct binder_transaction *in_reply_to = NULL;
    uint32_t return_error = 0;
    uint32_t return_error_param = 0;
    uint32_t return_error_line = 0;
    struct binder_buffer_object *last_fixup_obj = NULL;
    binder_size_t last_fixup_min_off = 0;
    struct binder_context *context = proc->context;

    if (reply) {
        binder_inner_proc_lock(proc);
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            binder_inner_proc_unlock(proc);
            binder_user_error("%d:%d got reply transaction with no transaction stack\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            return_error_param = -EPROTO;
            return_error_line = __LINE__;
            goto err_empty_call_stack;
        }
        if (in_reply_to->to_thread != thread) {
            spin_lock(&in_reply_to->lock);
            binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
                proc->pid, thread->pid, in_reply_to->debug_id,
                in_reply_to->to_proc ?
                in_reply_to->to_proc->pid : 0,
                in_reply_to->to_thread ?
                in_reply_to->to_thread->pid : 0);
            spin_unlock(&in_reply_to->lock);
            binder_inner_proc_unlock(proc);
            return_error = BR_FAILED_REPLY;
            return_error_param = -EPROTO;
            return_error_line = __LINE__;
            in_reply_to = NULL;
            goto err_bad_call_stack;
        }
        thread->transaction_stack = in_reply_to->to_parent;
        binder_inner_proc_unlock(proc);
        target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
        if (target_thread == NULL) {
            return_error = BR_DEAD_REPLY;
            return_error_line = __LINE__;
            goto err_dead_binder;
        }
        if (target_thread->transaction_stack != in_reply_to) {
            binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
                proc->pid, thread->pid,
                target_thread->transaction_stack ?
                target_thread->transaction_stack->debug_id : 0,
                in_reply_to->debug_id);
            binder_inner_proc_unlock(target_thread->proc);
            return_error = BR_FAILED_REPLY;
            return_error_param = -EPROTO;
            return_error_line = __LINE__;
            in_reply_to = NULL;
            target_thread = NULL;
            goto err_dead_binder;
        }
        target_proc = target_thread->proc;
        target_proc->tmp_ref++;
        binder_inner_proc_unlock(target_thread->proc);
    } else {
        /*
         * BC_TRANSACTION
        */

        // 首先根据 handle 找到 对应的 binder_ref
        if (tr->target.handle) {
            struct binder_ref *ref;

            /*
             * There must already be a strong ref
             * on this node. If so, do a strong
             * increment on the node to ensure it
             * stays alive until the transaction is
             * done.
             */
            binder_proc_lock(proc);
            ref = binder_get_ref_olocked(proc, tr->target.handle,
                             true);
            if (ref) {
                target_node = binder_get_node_refs_for_txn(
                        ref->node, &target_proc,
                        &return_error);
                // 根据 ref->node 找到 target_node.
            } else {
                binder_user_error("%d:%d got transaction to invalid handle\n",
                          proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
            }
            binder_proc_unlock(proc);
        } else {
            /*
             * 如果 handle 为 0, 说明找的就是 ServiceManager
            */
            mutex_lock(&context->context_mgr_node_lock);
            target_node = context->binder_context_mgr_node;  // target_node 对应的就是 binder_context_mgr_node
            if (target_node)
                target_node = binder_get_node_refs_for_txn(
                        target_node, &target_proc,
                        &return_error);
            else
                return_error = BR_DEAD_REPLY;
            mutex_unlock(&context->context_mgr_node_lock);
        }

        binder_inner_proc_lock(proc);
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
            struct binder_transaction *tmp;

            tmp = thread->transaction_stack;
            if (tmp->to_thread != thread) {
                // error
            }
            while (tmp) {
                struct binder_thread *from;

                spin_lock(&tmp->lock);
                from = tmp->from;
                if (from && from->proc == target_proc) {
                    atomic_inc(&from->tmp_ref);
                    target_thread = from;
                    spin_unlock(&tmp->lock);
                    break;
                }
                spin_unlock(&tmp->lock);
                tmp = tmp->from_parent;
            }
        }
        binder_inner_proc_unlock(proc);
    }

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    
    binder_stats_created(BINDER_STAT_TRANSACTION);
    spin_lock_init(&t->lock);
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;  // 这很好理解.
    else
        t->from = NULL;
    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    if (!(t->flags & TF_ONE_WAY) &&
        binder_supported_policy(current->policy)) {
        /* Inherit supported policies for synchronous transactions */
        t->priority.sched_policy = current->policy;
        t->priority.prio = current->normal_prio;
    } else {
        /* Otherwise, fall back to the default priority */
        t->priority = target_proc->default_priority;
    }

    /*
     * 这里非常关键,最重要的内存部分.
     * 是使用 target_proc 的 allocator 进行内存分配.
     * 说明,直接将数据拷贝的目标进程的 binder_buffer 中.
    */
    t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
        tr->offsets_size, extra_buffers_size,
        !reply && (t->flags & TF_ONE_WAY));

    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    off_start = (binder_size_t *)(t->buffer->data +
                      ALIGN(tr->data_size, sizeof(void *)));
    offp = off_start;

    // 这里的两次直接拷贝.
    copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
               tr->data.ptr.buffer, tr->data_size);
    copy_from_user(offp, (const void __user *)(uintptr_t)
               tr->data.ptr.offsets, tr->offsets_size);
    
    off_end = (void *)off_start + tr->offsets_size;
    sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
    sg_buf_end = sg_bufp + extra_buffers_size;
    off_min = 0;

    /*
     * 对于普通用户数据 Driver 本身是不关心的.
     * 但是, 对于 offset buffer,也就是存放 flat_binder_object 对应于 BBinder 组件必须处理.
     * 需要为之建立 binder_node,并且管理引用计数.
    */
    for (; offp < off_end; offp++) {
        struct binder_object_header *hdr;
        size_t object_size = binder_validate_object(t->buffer, *offp);

        hdr = (struct binder_object_header *)(t->buffer->data + *offp);
        off_min = *offp + object_size;
        switch (hdr->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            /*
             * 数据中包含 BBinder 对象.
            */
            struct flat_binder_object *fp;

            fp = to_flat_binder_object(hdr);
            binder_translate_binder(fp, t, thread);  // 创建 binder_node, binder_ref,并且维护引用计数.
        } break;
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            /*
             * 数据中包含 BpBinder 对象
            */
            struct flat_binder_object *fp;
waitForResponse(
            fp = to_flat_binder_object(hdr);
            binder_translate_handle(fp, t, thread);  // 维护引用计数.
        } break;

        ...
        }
    }
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    binder_enqueue_work(proc, tcomplete, &thread->todo);  
    /*
     * 将 BINDER_WORK_TRANSACTION_COMPLETE 工作项添加到 源进程的源线程 的 todo list 中.
     * 在 源线程的 下一个 IO 中会读取该工作项进行处理 ???
     * 这个理解是错误的.
     * 以前 一直以为 一个 ioctl 只能读或者写. 其实 不然.
     * 仔细看 binder_ioctl_write_read ,可发现,是可以进行先写后读的.
     * 所以每次 ioctl 写操作,必然会有一个 返回 命令.所以 BC_TRANSACTION 操作的返回命令就是: BR_TRANSACTION_COMPLETE.
     * 
     */

    t->work.type = BINDER_WORK_TRANSACTION;

    if (reply) {
        binder_inner_proc_lock(target_proc);
        if (target_thread->is_dead) {
            binder_inner_proc_unlock(target_proc);
            goto err_dead_proc_or_thread;
        }
        BUG_ON(t->buffer->async_transaction != 0);
        binder_pop_transaction_ilocked(target_thread, in_reply_to);
        binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
        binder_inner_proc_unlock(target_proc);
        wake_up_interruptible_sync(&target_thread->wait);
        binder_restore_priority(current, in_reply_to->saved_priority);
        binder_free_transaction(in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        binder_inner_proc_lock(proc);
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
        binder_inner_proc_unlock(proc);
        /*
         * sends a transaction to a process and wakes it up
         * 将该 事务 发送到 目标进程并且唤醒目标进程处理该事务.
        */
        if (!binder_proc_transaction(t, target_proc, target_thread)) {
            binder_inner_proc_lock(proc);
            binder_pop_transaction_ilocked(thread, t);
            binder_inner_proc_unlock(proc);
            goto err_dead_proc_or_thread;
        }
    } else {
        BUG_ON(target_node == NULL);
        BUG_ON(t->buffer->async_transaction != 1);
        if (!binder_proc_transaction(t, target_proc, NULL))
            goto err_dead_proc_or_thread;
    }
    if (target_thread)
        binder_thread_dec_tmpref(target_thread);
    binder_proc_dec_tmpref(target_proc);
    if (target_node)
        binder_dec_node_tmpref(target_node);
    /*
     * write barrier to synchronize with initialization
     * of log entry
     */
    smp_wmb();
    WRITE_ONCE(e->debug_id_done, t_debug_id);
    return;
}

再次回到 waitForResponse 来看:

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        /*
         * talkWithDriver 干了什么:
         * 1. 发送 BC_TRANSACTION 命令
         * 2. 接收 BR_TRANSACTION_COMPLETE.
        */
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = (uint32_t)mIn.readInt32();

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
            /*
             * 这里虽然回到了用户空间,但是会马上 talkWithDriver ,会不会是一个死循环呢?!
             * 由于 BC_TRANSACTION 是需要等待返回结果的,所以需要等待 Driver 的 BR_REPLY.
            */
        }
    }
}

addService---发送和处理 BR_TRANSACTION 返回协议

  • ServiceManager Client 携带者一个 BBinder 通过 BC_TRANSACTION 发送到 Driver.
  • Driver 创建了 binder_ref 和 binder_node, 并且将 BC_TRANSACTION 的数据拷贝到 目标进程 binder_proc 的 buffer 中.
  • Driver 并且将 BINDER_WORK_TRANSACTION 的工作项添加到 目标进程 也就是 ServiceManager Native 进程中.
  • 目标进程调用 binder_ioctl 读取并处理该工作项.
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    ......

    while (1) {
        ......
        w = binder_dequeue_work_head_ilocked(list);

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            binder_inner_proc_unlock(proc);
            t = container_of(w, struct binder_transaction, work);
        } break;
        }

        if (t->buffer->target_node) { // target_node 不为 NULL
            struct binder_node *target_node = t->buffer->target_node;
            struct binder_priority node_prio;

            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            node_prio.sched_policy = target_node->sched_policy;
            node_prio.prio = target_node->min_priority;
            binder_transaction_priority(current, t, node_prio,
                            target_node->inherit_rt);
            cmd = BR_TRANSACTION;  // 设置 返回命令
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY;
        }
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        t_from = binder_get_txn_from(t);
        if (t_from) {
            struct task_struct *sender = t_from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)
            ((uintptr_t)t->buffer->data +
            binder_alloc_get_user_buffer_offset(&proc->alloc));
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        put_user(cmd, (uint32_t __user *)ptr); // 将 BR_TRANSACTION 写到 UserSpace 中.
        ptr += sizeof(uint32_t);
        copy_to_user(ptr, &tr, sizeof(tr));
        /*
         * 将 binder_transaction_data 写到 目标进程的 UserSpace 中.
         * 并没有通信数据的拷贝,只是拷贝 binder_transaction_data 结构体.
         * binder_transaction_data 中 data 字段会 换算到 KernelSpace 对应的 UserSpace,在UserSpace中可以直接访问.
         */
        ptr += sizeof(tr);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            binder_inner_proc_lock(thread->proc);
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;         // 这里把 t 放到了目标线程的 transaction_stack 中.
            binder_inner_proc_unlock(thread->proc);
        } else {
            binder_free_transaction(t);
        }
        break;
    }

done:

    *consumed = ptr - buffer;
    binder_inner_proc_lock(proc);
    if (proc->requested_threads == 0 &&
        list_empty(&thread->proc->waiting_threads) &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        binder_inner_proc_unlock(proc);
        binder_debug(BINDER_DEBUG_THREADS,
                 "%d:%d BR_SPAWN_LOOPER\n",
                 proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
    } else
        binder_inner_proc_unlock(proc);
    return 0;
}
  • binder_parse
struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */

    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);

        switch(cmd) {
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
        }
    }

    return r;
}
  • bio_init
  • bio_init_from_txn

就是对 binder_io 的设置工作.本身并不复杂.

int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    s = bio_get_string16(msg, &len);

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);

        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

do_add_service 本质上是使用 struct svcinfo 描述 Service,然后使用 全局 svcinfo list 维护所有的 Service.

addService---发送和处理 BC_REPLY 命令协议

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;  // 发送 BC_FREE_BUFFER 命令到 Kernel
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY;
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offsets_size = 0;
        data.txn.data.ptr.buffer = (uintptr_t)&status;
        data.txn.data.ptr.offsets = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)

    ......

        case BC_FREE_BUFFER: {
            binder_uintptr_t data_ptr;
            struct binder_buffer *buffer;

            if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);

            buffer = binder_alloc_prepare_to_free(&proc->alloc,
                                  data_ptr);

            if (buffer->transaction) {
                buffer->transaction->buffer = NULL;
                buffer->transaction = NULL;
            }
            if (buffer->async_transaction && buffer->target_node) {
                struct binder_node *buf_node;
                struct binder_work *w;

                buf_node = buffer->target_node;
                binder_node_inner_lock(buf_node);
                BUG_ON(!buf_node->has_async_transaction);
                BUG_ON(buf_node->proc != proc);
                w = binder_dequeue_work_head_ilocked(
                        &buf_node->async_todo);
                if (!w) {
                    buf_node->has_async_transaction = 0;
                } else {
                    binder_enqueue_work_ilocked(
                            w, &proc->todo);
                    binder_wakeup_proc_ilocked(proc);
                }
                binder_node_inner_unlock(buf_node);
            }
            binder_transaction_buffer_release(proc, buffer, NULL);
            binder_alloc_free_buf(&proc->alloc, buffer);
            break;
        }

        ...
}
  • binder_transaction_buffer_release

addService---发送和处理 BR_REPLY 返回协议

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = (uint32_t)mIn.readInt32();

        IF_LOG_COMMANDS() {
            alog << "Processing waitForResponse Command: "
                << getReturnString(cmd) << endl;
        }

        switch (cmd) {
        
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        err = *reinterpret_cast(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
        }
    }

    return err;
}

启动 Binder 线程池

int main(int argc __unused, char** argv __unused)
{
    ......
    ProcessState::self()->startThreadPool();
    IPCThreadState::self()->joinThreadPool();

    /*
     * 这里有他妈的两个线程唉...
     * Main thread 是用户主动注册的
     * 另一个线程是 Driver 请求用户注册的.
     * 我靠:
     * startThreadPool 会注册一个新的线程.
     * main 函数的主线程也已经注册到了 Driver 中.
     * 所以处理请求的其实是两个线程.
    */

    return 0;
}

Service 需要一个线程不断的 Poll Driver 数据,然后进行处理.

void ProcessState::startThreadPool()
{
    AutoMutex _l(mLock);
    if (!mThreadPoolStarted) {
        mThreadPoolStarted = true;
        spawnPooledThread(true);
    }
}

void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        String8 name = makeBinderThreadName();
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp t = new PoolThread(isMain);
        t->run(name.string());
    }
}

class PoolThread : public Thread
{
public:
    explicit PoolThread(bool isMain)
        : mIsMain(isMain)
    {
    }
    
protected:
    virtual bool threadLoop()
    {
        IPCThreadState::self()->joinThreadPool(mIsMain);
        return false;
    }
    
    const bool mIsMain;
};
void IPCThreadState::joinThreadPool(bool isMain)
{
    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);

    status_t result;
    do {
        processPendingDerefs();
        // now get the next command to be processed, waiting if necessary
        result = getAndExecuteCommand();

    } while (result != -ECONNREFUSED && result != -EBADF);

    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    result = talkWithDriver();
    if (result >= NO_ERROR) {
        size_t IN = mIn.dataAvail();
        if (IN < sizeof(int32_t)) return result;
        cmd = mIn.readInt32();

        result = executeCommand(cmd);
    }

    return result;
}

你可能感兴趣的:(Service组件的启动过程)