相关文章链接:
1. Android Framework - 学习启动篇
2. Android Binder 驱动 - Media 服务的添加过程
3. Android Binder 驱动 - 启动 ServiceManager 进程
4. Android Binder 驱动 - 内核驱动层源码分析
5. Android Binder 驱动 - 从驱动层来分析服务的添加过程
6. Android Binder 驱动 - 从 Java 层来跟踪服务的查找过程
相关源码文件:
// app 进程
/frameworks/base/core/java/android/app/Activity.java
/frameworks/base/core/java/android/app/Instrumentation.java
/frameworks/base/core/java/android/app/ActivityManagerNative.java
/frameworks/base/core/java/android/os/Binder.java
/frameworks/native/libs/binder/BpBinder.cpp
/frameworks/native/libs/binder/ProcessState.cpp
/frameworks/native/libs/binder/IPCThreadState.cpp
/frameworks/base/core/jni/android_os_Parcel.cpp
// ServiceManager 进程
/frameworks/native/cmds/servicemanager/service_manager.c
/frameworks/native/cmds/servicemanager/binder.c
// binder 驱动层
/drivers/android/binder.c
/drivers/staging/android/binder.c
Binder 驱动在 Android 中是无所不在的,比如最常见的 startActivity 和 startService 等等,都会经历多次进程间通信。那本文就基于 Activity 的启动流程,从 Java 层来跟踪分析服务的查找返回过程:
public void startActivityForResult(Intent intent, int requestCode,
@Nullable Bundle options) {
if (mParent == null) {
Instrumentation.ActivityResult ar = mInstrumentation.execStartActivity(this,
mMainThread.getApplicationThread(), mToken, this,intent, requestCode, options);
...
} else {
...
}
}
public ActivityResult execStartActivity(Context who, IBinder contextThread,
IBinder token, Activity target,Intent intent, int requestCode, Bundle options) {
...
try {
int result = ActivityManagerNative.getDefault()
.startActivity(whoThread, who.getBasePackageName(), intent,
intent.resolveTypeIfNeeded(who.getContentResolver()),token,
target != null ? target.mEmbeddedID : null,requestCode, 0, null, options);
checkStartActivityResult(result, intent);
} catch (RemoteException e) {
throw new RuntimeException("Failure from system", e);
}
return null;
}
static public IActivityManager getDefault() {
return gDefault.get();
}
private static final Singleton gDefault = new Singleton() {
protected IActivityManager create() {
IBinder b = ServiceManager.getService("activity");
if (false) {
Log.v("ActivityManager", "default service binder = " + b);
}
IActivityManager am = asInterface(b);
if (false) {
Log.v("ActivityManager", "default service = " + am);
}
return am;
}
};
上面涉及到了两次跨进程通信,一次是 app 进程向 ServiceManager 进程查找获取 ActivityManagerService 的 IBinder 对象,一次是 app 进程向 ActivityManagerService 进程发送启动 Activity 的请求,本文我们主要来跟踪分析 app 进程是如何获取系统服务的,至于如何与 ActivityManagerService 进程通信,后面的文章会陆续分析。
public static IBinder getService(String name) {
try {
IBinder service = sCache.get(name);
if (service != null) {
return service;
} else {
return getIServiceManager().getService(name);
}
} catch (RemoteException e) {
Log.e(TAG, "error in getService", e);
}
return null;
}
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
return sServiceManager;
}
// Find the service manager
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
// 这里跟踪到 Native 层的源码里面
public static final native IBinder getContextObject();
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
jobject javaObjectForIBinder(JNIEnv* env, const sp& val)
{
AutoMutex _l(mProxyLock);
// mClass = android/os/BinderProxy,其实就是 new 一个 java 的 BinderProxy
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
if (object != NULL) {
// 把 BpBinder(0) 的地址设置给 BinderProxy 的 mObject 属性
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
}
return object;
}
getIServiceManager 返回的其实就是 new BpBinder(0) ,只不过在 Java 层来看是一个 IBinder (BinderProxy)对象。我们接着往下跟踪 getService() 的具体实现。
public IBinder getService(String name) throws RemoteException {
// 构建一个发送数据的 Parcel 和一个接收数据的 Parcel
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
// 把请求数据写入 data
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
final class BinderProxy implements IBinder {
...
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
return transactNative(code, data, reply, flags);
}
// mObject = new BpBinder(0)
private long mObject;
...
}
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) {
// 获取 native 层的 Parcel
Parcel* data = parcelForJavaObject(env, dataObj);
Parcel* reply = parcelForJavaObject(env, replyObj);
// 获取 BpBinder(0)
IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);
// 调用 BpBinder 的 transact 方法
status_t err = target->transact(code, *data, reply, flags);
return JNI_FALSE;
}
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags){
if (mAlive) {
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
// 在《Android Binder 驱动 - Media 服务的添加过程》一文中有详细介绍
status_t IPCThreadState::transact(int32_t handle,uint32_t code,
const Parcel& data,Parcel* reply, uint32_t flags){
...
}
IPCThreadState::transact 的具体源码在 《Android Binder 驱动 - Media 服务的添加过程》 一文中已详细分析过,这里就不再啰嗦这部分了。我们主要来看下 ServiceManager 进程是如何回复我们的。
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
// 先执行回调函数
res = func(bs, txn, &msg, &reply);
// 然后把执行结果写入 binder 驱动
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
...
}
bio_put_uint32(reply, 0);
return 0;
}
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
// 像 replay 中写入一个 flat_binder_object
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->handle = handle;
obj->cookie = 0;
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
// 回复命令
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
...
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
// 像驱动层写入数据,read_size = 0
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
通过上面的源码分析可以看到 ServiceManager 进程,最终是像驱动层写入了一个 flat_binder_object 最主要的是给 handle 赋值。那 binder 驱动到底是怎么通知我们的 app 进程的呢?
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
// 这里是执行回复消息
in_reply_to = thread->transaction_stack;
target_thread = in_reply_to->from;
target_proc = target_thread->proc;
} else {
...
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
...
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
...
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
...
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
...
}
if (ref->node->proc == target_proc) {
...
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
trace_binder_transaction_ref_to_ref(t, ref, new_ref);
}
} break;
...
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
...
} else {
...
}
// 再向 app 进程写入数据,唤醒 app 等待
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
// 再向 ServiceManager 自己进程写入 BINDER_WORK_TRANSACTION_COMPLETE
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
由此可见 ServiceManager 进程的返回数据是交给了 binder 驱动,binder 驱动会转发唤醒 app 进程。命令是 BC_REPLY 那么 app 获取到数据后会继续执行相应的操作。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
...
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
...
}
} else {
...
}
}
goto finish;
}
}
return err;
}
// 这里只有一个 flat_binder_object
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
binder_size_t minOffset = 0;
freeDataNoInit();
mError = NO_ERROR;
mData = const_cast(data);
mDataSize = mDataCapacity = dataSize;
//ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
mDataPos = 0;
ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
mObjects = const_cast(objects);
mObjectsSize = mObjectsCapacity = objectsCount;
mNextObjectHint = 0;
mOwner = relFunc;
mOwnerCookie = relCookie;
for (size_t i = 0; i < mObjectsSize; i++) {
binder_size_t offset = mObjects[i];
if (offset < minOffset) {
ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
__func__, (uint64_t)offset, (uint64_t)minOffset);
mObjectsSize = 0;
break;
}
minOffset = offset + sizeof(flat_binder_object);
}
scanForFds();
}
// 回到最初的 getService 代码
public IBinder getService(String name) throws RemoteException {
// 构建一个发送数据的 Parcel 和一个接收数据的 Parcel
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
// 把请求数据写入 data
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr)
{
Parcel* parcel = reinterpret_cast(nativePtr);
if (parcel != NULL) {
return javaObjectForIBinder(env, parcel->readStrongBinder());
}
return NULL;
}
sp Parcel::readStrongBinder() const
{
sp val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
status_t unflatten_binder(const sp& proc,
const Parcel& in, sp* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
...
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(static_cast(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
// 会有一个缓存,返回的就是 BpBinder(handle)
sp ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
至此所有的源码已分析完毕,最终获取返回的系统服务(AMS)是 new BpBinder(handle) 。最后我们不妨再仔细思考一个问题,我们拿到的 handle 值是 ServiceManager 进程的 handle 值吗?
视频地址:https://pan.baidu.com/s/1CHysxz7sKoLftpzXkbNAmw
视频密码:5cfz