在Binder通信的服务器端,一般会存在这样一条语句:
IPCThreadState::self()->joinThreadPool();
从语句的字面意思都可以理解出,应该是跟线程有关的。与ProcessState::self()一样,IPCThreadState::self()函数也是类IPCThreadState的静态成员函数,属于这个类,不属于具体某个对象。
class IPCThreadState
{
public:
static IPCThreadState* self();
...
}
其函数体实现如下:
IPCThreadState* IPCThreadState::self()
{
// 判断gHaveTLS是true还是false,字面意思也可以知道是用来标记有无TLS(Thread Loacal Storage)的
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
//如果存在TLS,则根据key:gTLS去获取存储指向IPCThreadState对象的指针值,
//这是一个一对多的关系,所有线程使用相同的key,但是其每个线程存储的值(IPCThreadState对象)
//是线程私有的,每个线程都有一个这样的对象
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
//如果此线程不存在这样的key:gTLS对应的值,则创建一个IPCThreadState新对象,
//并在其构造函数中添加进key:gTLS对应的线程局部空间中TLS(thread local storage)
return new IPCThreadState;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) { //gHaveTLS为false表示进程中还没有TLS,则需要创建基于key:gTLS的线程局部存储
//创建基于key:gTLS的线程局部空间
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true; //此时已经有TLS了,固gHaveTLS赋值为ture
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
以上分析多次提到线程局部空间(thread local storage),代表的是每个线程所私有的存储空间,线程中互不可以访问,是一个相对独立的空间,不懂的可以先行查资料去了解一下,这里不展开讲解。此线程局部空间就是去存储每一个线程都拥有的一个IPCThreadState对象,且都是独立的。
再看一下IPCThreadState的构造函数:
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this); //在此往key:gTLS添加IPCThreadState对象
clearCaller();
mIn.setDataCapacity(256); //初始化线程私有的数据输入空间大小
mOut.setDataCapacity(256); //初始化线程私有的数据输出空间大小
}
IPCThreadState::joinThreadPool()函数的目的是让线程进入到循环中,等待命令,解析命令,给出响应,函数体如下:
void IPCThreadState::joinThreadPool(bool isMain = true)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
status_t result;
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand(); //此函数为joinThreadPool()函数的主要任务过程
} while (result != -ECONNREFUSED && result != -EBADF);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
从以上代码可以看出,IPCThreadState::joinThreadPool()函数主要是在一个do…while()循环里面不断的读取命令数据,然后解析命令数据,并给出相应的响应。
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver(); //读取数据
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount++;
if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs == 0) {
mProcess->mStarvationStartTimeMs = uptimeMillis();
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
result = executeCommand(cmd); //执行响应
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount--;
if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs != 0) {
int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;
if (starvationTimeMs > 100) {
ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",
mProcess->mMaxThreads, starvationTimeMs);
}
mProcess->mStarvationStartTimeMs = 0;
}
pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
return result;
}
上面两个函数talkWithDriver()、executeCommand(cmd)会在后面详细介绍,在此不展开。
在Binder通信中,客户端Binder的数据发送最终都会调用以下此函数:
IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
所以有必要分析一下此函数的实现过程,另外,此函数主要发送的数据命令是BC_TRANSACTION,而BC的意思是Binder Command。
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
// 此接口主要是用于把flags, handle, code数据组装到binder_transaction_data结构体,
// 然后写入Parcel mOut变量中
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
//TF_ONE_WAY代表是单向通信,不需要回复
if ((flags & TF_ONE_WAY) == 0) { //flag中不存在TF_ONE_WAY,需要回复
if (reply) { //回复的数据会保存在Parcel replay中
err = waitForResponse(reply);
} else {
Parcel fakeReply; //如果不存在Parcel replay,也会为其临时创建一个Parcel用于接收回复数据
err = waitForResponse(&fakeReply);
}
} else {//flag中存在TF_ONE_WAY,不需要回复
err = waitForResponse(NULL, NULL);
}
return err;
}
接下来需要详细分析一下IPCThreadState::writeTransactionData()函数,
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle; //代理端Binder引用句柄
tr.code = code; //需要服务端执行的动作
tr.flags = binderFlags; //标记
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr)); //最终会把binder_transaction_data tr数据写入Parcel mOut中
return NO_ERROR;
}
在调用IPCThreadState::writeTransactionData()接口后,会调用IPCThreadState::waitForResponse()发送数据到Binder驱动并等待Binder驱动的响应结果,在此函数里进行数据的收发工作,函数实现如下:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break; //与Binder驱动进行数据收发的函数
err = mIn.errorCheck();
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT; goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION; goto finish;
case BR_ACQUIRE_RESULT:
{
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY: //解析来自服务器端的回应,由Binder进行回应
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd); //主要用来解析Binder驱动对命令BC_TRANSACTION的回应命令BR_TRANSACTION
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
两个重点函数IPCThreadState::talkWithDriver()、executeCommand(cmd),其中IPCThreadState::talkWithDriver()与Binder驱动通信,executeCommand(cmd)用于解析来自Binder驱动余下的回应命令,主要还是BR_TRANSACTION回应,BR开头的意思就是Binder Response。
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize(); //mIn数据访问位置大于等于mIn数据空间大小
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail; //需要发送的数据长度
bwr.write_buffer = (uintptr_t)mOut.data();//需要发送的数据的起始地址赋值到bwr.write_buffer
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity(); //读取回应的结果数据的大小
bwr.read_buffer = (uintptr_t)mIn.data(); //读取数据bwr.read_buffer即mIn.data()开始的地址
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
// 与Binder驱动实际进行数据收发的接口,命令BINDER_WRITE_READ,此接口既要发送数据,
// 又要读取驱动的回应数据(当然是在需要回复的情况下)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
} while (err == -EINTR);
return err;
}
在executeCommand(cmd)函数中,主要做的工作就是解析来自Binder驱动对BC_TRANSACTION命令的响应,即是BR_TRANSACTION,函数体如下:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32(); break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
obj->incStrong(mProcess.get());
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION: //主要用于解析发送BC_TRANSACTION时,Binder驱动回应的BR_TRANSACTION
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
Parcel reply;
status_t error;
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
ALOGE("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}