在frameworks/native/cmds/servicemanager/Android.bp文件中
cc_binary {
name: "servicemanager",
defaults: ["servicemanager_defaults"],
init_rc: ["servicemanager.rc"],
srcs: ["main.cpp"],
}
cc_binary {
name: "vndservicemanager",
defaults: ["servicemanager_defaults"],
init_rc: ["vndservicemanager.rc"],
vendor: true,
cflags: [
"-DVENDORSERVICEMANAGER=1",
],
required: [
"vndservice",
],
srcs: ["main.cpp"],
}
在android.bp中定义通过main.cpp编译出两个bin文件:servicemaager和vndservicemanager,两个bin文件对应不同的*.rc文件。这个rc文件分别是:frameworks\native\cmds\servicemanager/servicemanager.rc和frameworks\native\cmds\servicemanager/vndservicemanager.rc文件。
两个文件内容分别如下:
service servicemanager /system/bin/servicemanager
class core animation
user system
group system readproc
critical
onrestart restart apexd
onrestart restart audioserver
onrestart restart gatekeeperd
onrestart class_restart main
onrestart class_restart hal
onrestart class_restart early_hal
writepid /dev/cpuset/system-background/tasks
shutdown critical
service vndservicemanager /vendor/bin/vndservicemanager /dev/vndbinder
class core
user system
group system readproc
writepid /dev/cpuset/system-background/tasks
onrestart class_restart main
onrestart class_restart hal
onrestart class_restart early_hal
shutdown critical
servicemanager的main.cpp中的main方法:
frameworks\native\cmds\servicemanager/main.cpp
int main(int argc, char** argv) {
if (argc > 2) {
LOG(FATAL) << "usage: " << argv[0] << " [binder driver]";
}
//1、根据参数确认代码的设备是binder还是vndbinder
const char* driver = argc == 2 ? argv[1] : "/dev/binder";
//2、驱动设备的初始化工作
sp<ProcessState> ps = ProcessState::initWithDriver(driver);
//告知驱动最大线程数,并设定servicemanager的线程最大数
ps->setThreadPoolMaxThreadCount(0);
//设置调用限制,FATAL_IF_NOT_ONEWA意思是:在阻塞调用时中止进程
//oneway 限制,ServiceManager发起的 Binder 调用必须是单向,否则打印堆栈日志提示
ps->setCallRestriction(ProcessState::CallRestriction::FATAL_IF_NOT_ONEWAY);
//3、核心的接口,实例化servicemanager,传入access类用于selinux鉴权
sp<ServiceManager> manager = sp<ServiceManager>::make(std::make_unique<Access>());
//将servicemanager作为一个特殊service添加进来
if (!manager->addService("manager", manager, false /*allowIsolated*/, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk()) {
LOG(ERROR) << "Could not self register servicemanager";
}
//4、将serviceManager设置给IPCThreadState的全局变量
IPCThreadState::self()->setTheContextObject(manager);
//注册到驱动,成为Binder管理员
ps->becomeContextManager();
//准备looper
sp<Looper> looper = Looper::prepare(false /*allowNonCallbacks*/);
//5、通知驱动BC_ENTER_LOOPER,监听驱动fd,有消息时回调到handleEvent处理bindder调用
BinderCallback::setupTo(looper);
//服务的注册监听相关
ClientCallbackCallback::setupTo(looper, manager);
//启动looper,进入每次的poll处理,进程如果没有出现异常情况导致abort是不会推出的
while(true) {
looper->pollAll(-1);
}
// should not be reached
return EXIT_FAILURE;
}
该main方法是serviceManager程序的入口
具体代码在frameworks\native\libs\binder/ProcessState.cpp文件中
frameworks\native\libs\binder/ProcessState.cpp
sp<ProcessState> ProcessState::initWithDriver(const char* driver)
{
//返回一个ProcessState的对象的sp
return init(driver, true /*requireDefault*/);
}
ProcessState是管理**“进程状态”**,Binder中每个进程都会有且只有一个mProcess对象。该对象用于:
1、初始化驱动设备
2、记录驱动的名称、FD
3、记录进程线程数量的上限
4、记录binder的contxt obj
5、启动binder线程
frameworks\native\libs\binder/ProcessState.cpp
sp<ProcessState> ProcessState::init(const char *driver, bool requireDefault)
{
[[clang::no_destroy]] static sp<ProcessState> gProcess;
[[clang::no_destroy]] static std::mutex gProcessMutex;
if (driver == nullptr) {
std::lock_guard<std::mutex> l(gProcessMutex);
return gProcess;
}
[[clang::no_destroy]] static std::once_flag gProcessOnce;
//call_onece确保函数或代码片段在多线程环境下,只需要执行一次
std::call_once(gProcessOnce, [&](){
//判断dev/binder是否可读,成功0,失败-1
if (access(driver, R_OK) == -1) {
ALOGE("Binder driver %s is unavailable. Using /dev/binder instead.", driver);
driver = "/dev/binder";
}
std::lock_guard<std::mutex> l(gProcessMutex);
//实例化单例processState
gProcess = sp<ProcessState>::make(driver);
});
if (requireDefault) {
// Detect if we are trying to initialize with a different driver, and
// consider that an error. ProcessState will only be initialized once above.
LOG_ALWAYS_FATAL_IF(gProcess->getDriverName() != driver,
"ProcessState was already initialized with %s,"
" can't initialize with %s.",
gProcess->getDriverName().c_str(), driver);
}
return gProcess;
}
该函数的参数为driver的设备名称,/dev/binder或者/dev/vndbinder。该函数的逻辑比较简单,ProcessState 是管理“进程状态”,Binder中每个进程都会有且只有一个mProcess对象,如果该实例不为空,则确认该实例打开的driver是否为当前初始化的driver名称。如果该实例不存在,则通过new创建一个。
init方法最重要的是实例化ProceState,该方法调用到ProcessState的构造函数
frameworks\native\libs\binder/ProcessState.cpp
ProcessState::ProcessState(const char *driver)
: mDriverName(String8(driver))
, mDriverFD(open_driver(driver))//open_driver打开驱动
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mWaitingForThreads(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
, mCallRestriction(CallRestriction::NONE)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
//虚拟内存映射,最终调用binder_mmap()函数
//mmap虚拟内存映射大小1M-2页
mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using %s failed: unable to mmap transaction memory.\n", mDriverName.c_str());
close(mDriverFD);
mDriverFD = -1;
mDriverName.clear();
}
}
#ifdef __ANDROID__
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver '%s' could not be opened. Terminating.", driver);
#endif
}
该函数主要做了如下:
在初始化列表中,通过调用open_driver()代码设备驱动。
如果驱动open成功,mDriverFD被赋值后,通过mmap创建大小BINDER_VM_SIZE的buffer,用于接收transactions数据。其中代码中的大小为
frameworks/native/libs/binder/ProcessState.cpp
#define BINDER_VM_SIZE ((1 * 1024 * 1024) - sysconf(_SC_PAGE_SIZE) * 2) 大小是1M-2页
通过命令行可以确认这个大小,假设servicemanager的pid为510,则通过:
cat/proc/510/maps可以看到
748c323000-748c421000 r--p 00000000 00:1f 4 /dev/binderfs/binder
不用奇怪为什么不是/dev/binder,软连接而已
lrwxrwxrwx 1 root root 20 1970-01-01 05:43 binder -> /dev/binderfs/binder
lrwxrwxrwx 1 root root 22 1970-01-01 05:43 hwbinder -> /dev/binderfs/hwbinder
lrwxrwxrwx 1 root root 22 1970-01-01 05:43 vndbinder -> /dev/binderfs/vndbinder
对于binder和vndbinder设备,在ProcessState构造的时候会在初始化列表中调用open_driver()来对设备进行open和初始化。
static int open_driver(const char *driver)
{
//打开/dev/binder,以读写方式,以及为新建的文件描述符
//使能close-on-exec(执行exec时关闭)标志,避免文件描述符
//无意间泄露给了fork创建的子进程
int fd = open(driver, O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
//获取Binder版本,最终调用binder_ioctrl()函数,如果open成功,会查询binder version是否匹配
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
//如果binder version 是当前的,会通知驱动设置最大threads数量
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
uint32_t enable = DEFAULT_ENABLE_ONEWAY_SPAM_DETECTION;
//设置oneway
result = ioctl(fd, BINDER_ENABLE_ONEWAY_SPAM_DETECTION, &enable);
if (result == -1) {
ALOGD("Binder ioctl to enable oneway spam detection failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
}
return fd;
}
这里需要注意,每个进程创建的binder的最大线程数为DEFAULT_MAX_BINDER_THREADS
frameworks/native/libs/binder/ProcessState.cpp
#define DEFAULT_MAX_BINDER_THREADS 15
对于servicemanager,main函数中设定为0,也就是servicemanager直接使用主线程,普通service限制最大的binder thread 为15.
drivers/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
...
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
...
...
}
ProcessState使用self()函数获取对象,因为有vndbinder和binder共用一份代码,所以如果需要使用vndbinder,需要在调用self()函数钱调用initWithDriver()来指定驱动设备名称。如果强制使用self()函数,那么获取的单例针对的驱动设备为kDefaultDriver。
frameworks/native/libs/binder/ProcessState.cpp
#ifdef __ANDROID_VNDK__
const char* kDefaultDriver = "/dev/vndbinder";
#else
const char* kDefaultDriver = "/dev/binder";
#endif
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != nullptr) {
return gProcess;
}
gProcess = new ProcessState(kDefaultDriver);
return gProcess;
}
frameworks\native\libs\binder/ProcessState.cpp
status_t ProcessState::setThreadPoolMaxThreadCount(size_t maxThreads) {
LOG_ALWAYS_FATAL_IF(mThreadPoolStarted && maxThreads < mMaxThreads,
"Binder threadpool cannot be shrunk after starting");
status_t result = NO_ERROR;
if (ioctl(mDriverFD, BINDER_SET_MAX_THREADS, &maxThreads) != -1) {
mMaxThreads = maxThreads;
} else {
result = -errno;
ALOGE("Binder ioctl to set max threads failed: %s", strerror(-result));
}
return result;
}
当ProcessState构造时会open_driver(),servicemanager进程将binder thread子大致设置为0.
核心接口在ServicceManager.cpp中,这里构造时新建一个Access对象
frameworks\native\cmds\servicemanager/ServiceManager.cpp
ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
// TODO(b/151696835): reenable performance hack when we solve bug, since with
// this hack and other fixes, it is unlikely we will see even an ephemeral
// failure when the manifest parse fails. The goal is that the manifest will
// be read incorrectly and cause the process trying to register a HAL to
// fail. If this is in fact an early boot kernel contention issue, then we
// will get no failure, and by its absence, be signalled to invest more
// effort in re-adding this performance hack.
// #ifndef VENDORSERVICEMANAGER
// // can process these at any times, don't want to delay first VINTF client
// std::thread([] {
// vintf::VintfObject::GetDeviceHalManifest();
// vintf::VintfObject::GetFrameworkHalManifest();
// }).detach();
// #endif // !VENDORSERVICEMANAGER
}
ServiceManager::~ServiceManager() {
// this should only happen in tests
for (const auto& [name, callbacks] : mNameToRegistrationCallback) {
CHECK(!callbacks.empty()) << name;
for (const auto& callback : callbacks) {
CHECK(callback != nullptr) << name;
}
}
for (const auto& [name, service] : mNameToService) {
CHECK(service.binder != nullptr) << name;
}
}
通过move 接口调用 Access 的移动构造函数,创建实例mAccess,mAccess 用于通过selinux 确认servicemanager的权限
frameworks\native\cmds\servicemanager/main.cpp
//将servicemanager作为一个特殊service添加进来
if (!manager->addService("manager", manager, false /*allowIsolated*/, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk()) {
LOG(ERROR) << "Could not self register servicemanager";
}
如果时其他的service注册到servicemanager是需要通过IServiceManager经过binder最终调用到ServiceManager中的addService(),而这里直接通过ServiceManager对象直接注册。
Status ServiceManager::addService(const std::string& name, const sp<IBinder>& binder, bool allowIsolated, int32_t dumpPriority) {
auto ctx = mAccess->getCallingContext();
// apps cannot add services
//uid鉴权,检测应用程序有没有权限注册服务
if (multiuser_get_app_id(ctx.uid) >= AID_APP) {
return Status::fromExceptionCode(Status::EX_SECURITY);
}
//selinux鉴权,selinux 是否允许注册为SELABEL_CTX_ANDROID_SERVICE
if (!mAccess->canAdd(ctx, name)) {
return Status::fromExceptionCode(Status::EX_SECURITY);
}
//传入IBinder,不能为nullptr
if (binder == nullptr) {
return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
}
//检查name命名 需要符合要求,由0-9,a-z,A-Z,下划线,短线、点号、斜杠组成,name长度不能超过127
if (!isValidServiceName(name)) {
LOG(ERROR) << "Invalid service name: " << name;
return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
}
//如果vndservicemanager 则查VINTF maifest
#ifndef VENDORSERVICEMANAGER
if (!meetsDeclarationRequirements(binder, name)) {
// already logged
return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
}
#endif // !VENDORSERVICEMANAGER
//和PRC Binder有关,死亡监听,注册linkToDeath,监听service状态
// implicitly unlinked when the binder is removed
if (binder->remoteBinder() != nullptr &&
binder->linkToDeath(sp<ServiceManager>::fromExisting(this)) != OK) {
LOG(ERROR) << "Could not linkToDeath when adding " << name;
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
}
// Overwrite the old service if it exists
//新增一个结构体到map中
//将service的相关信息写入到servicemanager的map中
//mNameToService其实就是name-service的map,当addService()流程执行到ServiceManager中,最终会将新的
//service存放在该变量中,而之后client端也可以通过getService()来从mNameToService中获取name对应的service.
mNameToService[name] = Service {
.binder = binder,
.allowIsolated = allowIsolated,
.dumpPriority = dumpPriority,
.debugPid = ctx.debugPid,
};
//架构中提到的waitForService的跨进程,确认是否注册了service callback,如果注册调用回调
auto it = mNameToRegistrationCallback.find(name);
if (it != mNameToRegistrationCallback.end()) {
for (const sp<IServiceCallback>& cb : it->second) {
mNameToService[name].guaranteeClient = true;
// permission checked in registerForNotifications
cb->onRegistration(name, binder);
}
}
return Status::ok();
}
//将serviceManager设置给IPCThreadState的全局变量
IPCThreadState::self()->setTheContextObject(manager);
//注册到驱动,成为Binder管理员
ps->becomeContextManager();
第一行是创建IPCThreadState,并将servicemanager存放到IPCThreadState中,用于后面transact使用。
第二行是告知binder驱动,serviceManager将成为系统服务管家,对用的handle为0, result = ioctl(mDriverFD, BINDER_SET_CONTEXT_MGR, &unused);
下面先看IPCThreadState::self()介绍
frameworks\native\libs\binder/IPCThreadState.cpp
//每个线程创建后,都会通过pthread_getspecific()确认TLS中是否有已经创建了IPCThreadState,
//如果有就直接返回,如果没有则新建一个。
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS.load(std::memory_order_acquire)) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
// 实例化IPCThreadState对象并返回
return new IPCThreadState;
}
// Racey, heuristic test for simultaneous shutdown.
if (gShutdown.load(std::memory_order_relaxed)) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return nullptr;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS.load(std::memory_order_relaxed)) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return nullptr;
}
gHaveTLS.store(true, std::memory_order_release);
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
frameworks\native\libs\binder/IPCThreadState.cpp
void IPCThreadState::setTheContextObject(const sp<BBinder>& obj)
{
the_context_object = obj;
}
将创建的servicemanager赋值给the_context_object。
bool ProcessState::becomeContextManager()
{
AutoMutex _l(mLock);
flat_binder_object obj {
.flags = FLAT_BINDER_FLAG_TXN_SECURITY_CTX,
};
int result = ioctl(mDriverFD, BINDER_SET_CONTEXT_MGR_EXT, &obj);
// fallback to original method
if (result != 0) {
android_errorWriteLog(0x534e4554, "121035042");
int unused = 0;
result = ioctl(mDriverFD, BINDER_SET_CONTEXT_MGR, &unused);
}
if (result == -1) {
ALOGE("Binder ioctl to become context manager failed: %s\n", strerror(errno));
}
return result == 0;
}
通过运行BINDER_SET_CONTEXT_MGR_EXT命令通知驱动执行binder_ioctl_set_ctx_mgr
drivers/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
...
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
...
...
}```
命令BINDER_SET_CONTEXT_MGR和命令BINDER_SET_CONTEXT_MGR_EXT的主要区别是是否有flat_binder_object,最终都是调用bind_ioctl_set_ctx_mgx函数:
```java
drivers/android/binder.c
static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct flat_binder_object *fbo)
{
int ret = 0;
//进程的binder_proc, 这里是ServiceManager的 binder_proc,之前通过open("/dev/binder")得来
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
struct binder_node *new_node;
kuid_t curr_euid = current_euid(); // 线程的uid
mutex_lock(&context->context_mgr_node_lock);
//正常第一次为null,如果不为null则说明该进程已经设置过context mgr则直接退出
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
//检查当前进程是否具有注册Context Manager的SEAndroid安全权限
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
//读取binder_context_mgr_uid和当前的比,如果不一样,报错
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
//创建binder_node对象
new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
}
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
//把新创建的node对象赋值给context->binder_context_mgr_node,成为serviceManager的binder管理实体
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
out:
mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
binder_ioctl_set_ctx_mgr()的流程也比较简单
1、先检查当前进程是否具有注册ContextManager的SEAndroid安全权限
2、如果具有SELinux权限,会为整个系统的上下文管理器专门生成一个binder_node节点,使该节点的强弱应用为1
3、新创建的binder_node节点,记入context->binder_context_mgr_node,即ServiceManager进程的context binder节点,使之成为servicemanager的binder管理实体。
//准备looper
sp<Looper> looper = Looper::prepare(false /*allowNonCallbacks*/);
主要是通过epoll方式添加对fd的监听
class BinderCallback : public LooperCallback {
public:
static sp<BinderCallback> setupTo(const sp<Looper>& looper) {
sp<BinderCallback> cb = sp<BinderCallback>::make();
int binder_fd = -1;
//获取主线程的binder fd,并通知驱动ENTER_LOOPER
IPCThreadState::self()->setupPolling(&binder_fd);
LOG_ALWAYS_FATAL_IF(binder_fd < 0, "Failed to setupPolling: %d", binder_fd);
//looper 中的epoll 添加对binder_fd 的监听,并且将callback 注册进去,会回调handleEvent
int ret = looper->addFd(binder_fd,
Looper::POLL_CALLBACK,
Looper::EVENT_INPUT,
cb,
nullptr /*data*/);
LOG_ALWAYS_FATAL_IF(ret != 1, "Failed to add binder FD to Looper");
return cb;
}
//epoll 触发该fd事件时,会回调该函数
int handleEvent(int /* fd */, int /* events */, void* /* data */) override {
IPCThreadState::self()->handlePolledCommands();
return 1; // Continue receiving callbacks.
}
};
其实,每一个普通的service在创建后,都会调用ProcessState::StartThreadPool()产生一个main IPC thread,进而用其通过IPCThreadState::joinThreadPool()卵生其他的IPCThreadState,但是serviceManager因为不需要其他线程,所以只是在主线程中使用Looper进行进一步的监听。
每一个IPCThreadState核心应该就是监听、处理binder驱动的交互信息,而这些操作都是在函数**getAndExecuteCommand()**中。
下面分析:
IPCThreadState::self()->setupPolling(&binder_fd);
IPCThreadState::self()->handlePolledCommands();
所涉及的类及方法
同ProcessState类,每个进程有很多的线程用来记录"线程状态",在每次binder的BINDER_WRITE_READ调用后,驱动都会根据情况确定是否需要spawn线程,而创建一个PoolThread都会伴随一个IPCThreadState进行管理,而binder线程中所有的操作都是通过IPCThreadState进行的。
frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mServingStackPointer(nullptr),
mWorkSource(kUnsetWorkSource),
mPropagateWorkSource(false),
mIsLooper(false),
mIsFlushing(false),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0),
mCallRestriction(mProcess->mCallRestriction) {
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}
//每个线程创建后,都会通过pthread_getspecific()确认TLS中是否有已经创建了IPCThreadState,
//如果有就直接返回,如果没有则新建一个。
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS.load(std::memory_order_acquire)) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
// 实例化IPCThreadState对象并返回
return new IPCThreadState;
}
// Racey, heuristic test for simultaneous shutdown.
if (gShutdown.load(std::memory_order_relaxed)) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return nullptr;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS.load(std::memory_order_relaxed)) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return nullptr;
}
gHaveTLS.store(true, std::memory_order_release);
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
每个线程创建后,都会通过pthread_getspecific() 确认TLS中是否有已经创建了IPCThreadState,如果有就直接返回,如果没有则新建一个。
//主要做两件事情,发送BC_ENTER_LOOPER通知驱动进入looper,并将驱动fd返回
status_t IPCThreadState::setupPolling(int* fd)
{
if (mProcess->mDriverFD < 0) {
return -EBADF;
}
mOut.writeInt32(BC_ENTER_LOOPER);
//将线程中的cmd flush给驱动,此处应该是ENTER_LOOPER
flushCommands();
*fd = mProcess->mDriverFD;
return 0;
}
void IPCThreadState::flushCommands()
{
if (mProcess->mDriverFD < 0)
return;
talkWithDriver(false);
// The flush could have caused post-write refcount decrements to have
// been executed, which in turn could result in BC_RELEASE/BC_DECREFS
// being queued in mOut. So flush again, if we need to.
if (mOut.dataSize() > 0) {
talkWithDriver(false);
}
if (mOut.dataSize() > 0) {
ALOGW("mOut.dataSize() > 0 after flushCommands()");
}
}
frameworks\native\libs\binder/IPCThreadState.cpp
//IPCThreadState中存放了两个信息:mIn和mOut,mIn是用来read驱动的数据,
//mOut是用来write驱动的数据。 这里核心是do...while循环,通过BINDER_WRITE_READ与
//驱动交互,如果ioctrl没有碰到中断打扰,do...while在处理完后会返回
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD < 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD < 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
LOG_ALWAYS_FATAL("Driver did not consume write buffer. "
"err: %s consumed: %zu of %zu",
statusToString(err).c_str(),
(size_t)bwr.write_consumed,
mOut.dataSize());
else {
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
到此在BinderCallback:setupTo(looper)中所设计setupPolling()方法跟踪完毕
接着分析IPCThreadState::self()->handlePolledCommands();
frameworks\native\libs\binder/IPCThreadState.cpp
status_t IPCThreadState::handlePolledCommands()
{
status_t result;
do {
result = getAndExecuteCommand();
} while (mIn.dataPosition() < mIn.dataSize());
processPendingDerefs();
flushCommands();
return result;
}
在此方法中可以看到do {}while()循环里面do 方法里面调用了getAndExecuteCommand()方法
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
//步骤1、与binder驱动互动,等待binder驱动返回
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
//步骤2、解析从binder驱动中的reply command
cmd = mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
//步骤3,留意binder处理的thread count
//systemserver中会喂狗,这里当处理的线程count超过最大值,moitor会阻塞直到有足够的数量
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount++;
if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs == 0) {
mProcess->mStarvationStartTimeMs = uptimeMillis();
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
//步骤4,binder通信用户端的核心处理函数,根据reply command进行对应的处理
result = executeCommand(cmd);
//步骤5,每个线程executeCommand()完成都会将thread count减1,且每次都会条件变量broadcast
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount--;
if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs != 0) {
int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;
if (starvationTimeMs > 100) {
ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",
mProcess->mMaxThreads, starvationTimeMs);
}
mProcess->mStarvationStartTimeMs = 0;
}
// Cond broadcast can be expensive, so don't send it every time a binder
// call is processed. b/168806193
if (mProcess->mWaitingForThreads > 0) {
pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
return result;
}
代码逻辑上还是比较简单,主要是三部分:
1、talkWithDriver() 与binder 驱动交互,并确定返回值是否异常;
2、确定execute thread count,system server会喂狗;
3、executeCommand() 进行核心处理;
binder线程核心处理部分,**talkWithDriver()**之后对结果处理核心:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
obj->incStrong(mProcess.get());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);
obj->printRefs();
}
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_RELEASE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_RELEASE from driver on %p", obj);
obj->printRefs();
}
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
// NOTE: This assertion is not valid, because the object may no
// longer exist (thus the (BBinder*)cast above resulting in a different
// memory address).
//ALOG_ASSERT(refs->refBase() == obj,
// "BR_DECREFS: object %p does not match cookie %p (expected %p)",
// refs, obj, refs->refBase());
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
ALOG_ASSERT(success && refs->refBase() == obj,
"BR_ATTEMPT_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION:
{
binder_transaction_data_secctx tr_secctx;
binder_transaction_data& tr = tr_secctx.transaction_data;
if (cmd == (int) BR_TRANSACTION_SEC_CTX) {
result = mIn.read(&tr_secctx, sizeof(tr_secctx));
} else {
result = mIn.read(&tr, sizeof(tr));
tr_secctx.secctx = 0;
}
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer);
const void* origServingStackPointer = mServingStackPointer;
mServingStackPointer = &origServingStackPointer; // anything on the stack
const pid_t origPid = mCallingPid;
const char* origSid = mCallingSid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
const int32_t origWorkSource = mWorkSource;
const bool origPropagateWorkSet = mPropagateWorkSource;
// Calling work source will be set by Parcel#enforceInterface. Parcel#enforceInterface
// is only guaranteed to be called for AIDL-generated stubs so we reset the work source
// here to never propagate it.
clearCallingWorkSource();
clearPropagateWorkSource();
mCallingPid = tr.sender_pid;
mCallingSid = reinterpret_cast<const char*>(tr_secctx.secctx);
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
// ALOGI(">>>> TRANSACT from pid %d sid %s uid %d\n", mCallingPid,
// (mCallingSid ? mCallingSid : ""), mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_TRANSACTION thr " << (void*)pthread_self()
<< " / obj " << tr.target.ptr << " / code "
<< TypeCode(tr.code) << ": " << indent << buffer
<< dedent << endl
<< "Data addr = "
<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
<< ", offsets addr="
<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
}
//对于handle不为0的,会通过BBinder的transact()进行处理,对于handle为0,也就是servicemanager中收到BR_TRANSACTION,
//即用the_context_object(servicemanager在运行时已经将context obj保存到全局变量the_context_object)调用transact().
//其实不管handle为多杀,最终都是通过BBinder调用transact().
//在transact()成功返回后如果是TF_ONE_WAY方式通信,server会通过sendReply()将返回值通过给client端
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d sid %s uid %d\n",
// mCallingPid, origPid, (origSid ? origSid : ""), origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
constexpr uint32_t kForwardReplyFlags = TF_CLEAR_BUF;
sendReply(reply, (tr.flags & kForwardReplyFlags));
} else {
if (error != OK) {
alog << "oneway function results for code " << tr.code
<< " on binder at "
<< reinterpret_cast<void*>(tr.target.ptr)
<< " will be dropped but finished with status "
<< statusToString(error);
// ideally we could log this even when error == OK, but it
// causes too much logspam because some manually-written
// interfaces have clients that call methods which always
// write results, sometimes as oneway methods.
if (reply.dataSize() != 0) {
alog << " and reply parcel size " << reply.dataSize();
}
alog << endl;
}
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mServingStackPointer = origServingStackPointer;
mCallingPid = origPid;
mCallingSid = origSid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
mWorkSource = origWorkSource;
mPropagateWorkSource = origPropagateWorkSet;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
ALOGE("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
至此ServiceManager进程的启动已经梳理完毕。基本流程如下:
1、根据命令行参数,选择启动/dev/binder还是/dev/vndbinder。
2、通过ProcessState::initWithDriver() open、初始化设备驱动,并设置该进程的最大thread数量为0
3、实例化ServiceManager,并将其以特殊的service,注册到ServiceManager中的mServicceMap中
4、将特殊的context obj 存放在IPCThreadState中,并通过ProcessState 通知驱动context mgr
5、通过BinderCallback,通知驱动servicemanager就绪,进入BC_ENTER_LOOPER
6、通过Looper中的epoll将驱动设备fd添加监听,并回调handleEvent();
7、在handleEvent()中处理poll cmd,处理所有的信息