上一篇讲完了Vold的启动,本篇主要来介绍一下SM(StorageManager)和Vold、Kerel和Vold之间如何建立联系,以及信息是如何接收处理的。
之前已经提到了,NM(NetlinkManager)启动的时候会执行startListener开始监听Kernel的uevent事件,这里详细来看下是如何接收和处理消息事件的。
int SocketListener::startListener() {
return startListener(4);
}
int SocketListener::startListener(int backlog) {
if (!mSocketName && mSock == -1) {
SLOGE("Failed to start unbound listener");
errno = EINVAL;
return -1;
} else if (mSocketName) {
if ((mSock = android_get_control_socket(mSocketName)) < 0) {
SLOGE("Obtaining file descriptor socket '%s' failed: %s",
mSocketName, strerror(errno));
return -1;
}
SLOGV("got mSock = %d for %s", mSock, mSocketName);
fcntl(mSock, F_SETFD, FD_CLOEXEC);
}
if (mListen && listen(mSock, backlog) < 0) {
SLOGE("Unable to listen on socket (%s)", strerror(errno));
return -1;
} else if (!mListen)
mClients[mSock] = new SocketClient(mSock, false, mUseCmdNum);
if (pipe2(mCtrlPipe, O_CLOEXEC)) {
SLOGE("pipe failed (%s)", strerror(errno));
return -1;
}
if (pthread_create(&mThread, nullptr, SocketListener::threadStart, this)) {
SLOGE("pthread_create (%s)", strerror(errno));
return -1;
}
return 0;
}
void *SocketListener::threadStart(void *obj) {
SocketListener *me = reinterpret_cast<SocketListener *>(obj);
me->runListener();
pthread_exit(nullptr);
return nullptr;
}
NetlinkHandler继承自NetlinkListener,NetlinkListener又是SoctetListener的子类,因此最终还是调用SoctetListener的startListener函数。该函数就是开始监听消息,接着启动一个线程执行runListener函数,循环读取socket消息并发送给各个client端,NetlinkListener作为其中一个,收到Callback后则会去进一步处理。
void SocketListener::runListener() {
while (true) {
std::vector<pollfd> fds;
pthread_mutex_lock(&mClientsLock);
fds.reserve(2 + mClients.size());
fds.push_back({.fd = mCtrlPipe[0], .events = POLLIN});
if (mListen) fds.push_back({.fd = mSock, .events = POLLIN});
for (auto pair : mClients) {
// NB: calling out to an other object with mClientsLock held (safe)
const int fd = pair.second->getSocket();
if (fd != pair.first) SLOGE("fd mismatch: %d != %d", fd, pair.first);
fds.push_back({.fd = fd, .events = POLLIN});
}
pthread_mutex_unlock(&mClientsLock);
SLOGV("mListen=%d, mSocketName=%s", mListen, mSocketName);
int rc = TEMP_FAILURE_RETRY(poll(fds.data(), fds.size(), -1));
if (rc < 0) {
SLOGE("poll failed (%s) mListen=%d", strerror(errno), mListen);
sleep(1);
continue;
}
if (fds[0].revents & (POLLIN | POLLERR)) {
char c = CtrlPipe_Shutdown;
TEMP_FAILURE_RETRY(read(mCtrlPipe[0], &c, 1));
if (c == CtrlPipe_Shutdown) {
break;
}
continue;
}
if (mListen && (fds[1].revents & (POLLIN | POLLERR))) {
int c = TEMP_FAILURE_RETRY(accept4(mSock, nullptr, nullptr, SOCK_CLOEXEC));
if (c < 0) {
SLOGE("accept failed (%s)", strerror(errno));
sleep(1);
continue;
}
pthread_mutex_lock(&mClientsLock);
mClients[c] = new SocketClient(c, true, mUseCmdNum);
pthread_mutex_unlock(&mClientsLock);
}
// Add all active clients to the pending list first, so we can release
// the lock before invoking the callbacks.
std::vector<SocketClient*> pending;
pthread_mutex_lock(&mClientsLock);
const int size = fds.size();
for (int i = mListen ? 2 : 1; i < size; ++i) {
const struct pollfd& p = fds[i];
if (p.revents & (POLLIN | POLLERR)) {
auto it = mClients.find(p.fd);
if (it == mClients.end()) {
SLOGE("fd vanished: %d", p.fd);
continue;
}
SocketClient* c = it->second;
pending.push_back(c);
c->incRef();
}
}
pthread_mutex_unlock(&mClientsLock);
for (SocketClient* c : pending) {
// Process it, if false is returned, remove from the map
SLOGV("processing fd %d", c->getSocket());
if (!onDataAvailable(c)) {
release(c, false);
}
c->decRef();
}
}
}
详细梳理一下这个过程,mCtrlPipe[0]则作为此处的读端,会从流中读取读取POLLIN对应的事件,每一个client对应的socket文件描述符也都会存储到vector中。接下来就是利用poll函数去轮询vector中的每一个文件描述符。
接下来根据revents来决定是否有事件被读到,如果没有事件读到则进行下一轮,有事件被读到就将保存对应文件描述符的client,最后Callback onDataAvailable函数。接着继续看onDataAvailable。
bool NetlinkListener::onDataAvailable(SocketClient *cli)
{
int socket = cli->getSocket();
ssize_t count;
uid_t uid = -1;
bool require_group = true;
if (mFormat == NETLINK_FORMAT_BINARY_UNICAST) {
require_group = false;
}
count = TEMP_FAILURE_RETRY(uevent_kernel_recv(socket,
mBuffer, sizeof(mBuffer), require_group, &uid));
if (count < 0) {
SLOGE("recvmsg failed (%s)", strerror(errno));
return false;
}
NetlinkEvent *evt = new NetlinkEvent();
if (evt->decode(mBuffer, count, mFormat)) {
onEvent(evt);
} else if (mFormat != NETLINK_FORMAT_BINARY) {
// Don't complain if parseBinaryNetlinkMessage returns false. That can
// just mean that the buffer contained no messages we're interested in.
SLOGE("Error decoding NetlinkEvent");
}
delete evt;
return true;
}
void NetlinkHandler::onEvent(NetlinkEvent* evt) {
VolumeManager* vm = VolumeManager::Instance();
const char* subsys = evt->getSubsystem();
if (!subsys) {
LOG(WARNING) << "No subsystem found in netlink event";
return;
}
if (std::string(subsys) == "block") {
vm->handleBlockEvent(evt);
}
}
onDataAvailable这边是来真正接收数据的,通过给到的client字段与之建立socket通信,借助uevent_kernel_recv函数读取数据,通过decode解析后调用onEvent分发下去。这时候NetlinkHandler就登场了,获取VM单例对象,调用handleBlockEvent进行事件真正的处理。具体的处理过程这里不再详细介绍。
上半部分介绍了Kernel和Vold之间的联系,下面来看下SM和Vold之间的联系。
@Override
public void onStart() {
mStorageManagerService = new StorageManagerService(getContext());
publishBinderService("mount", mStorageManagerService);
mStorageManagerService.start();
}
if (binder != null) {
mVold = IVold.Stub.asInterface(binder);
try {
mVold.setListener(mListener);
} catch (RemoteException e) {
mVold = null;
Slog.w(TAG, "vold listener rejected; trying again", e);
}
} else {
Slog.w(TAG, "vold not found; trying again");
}
在SM中,当SystemServer启动SM时候,回调onStart方法,然后会通过binder获得IVold接口,并且将mListener注册过去,这里便是Vold的aidl接口,对应便是VoldNativeService这个接口类。
当开机广播BootCompleted发出后,SM则会进行lockuser或者unlockuser的操作,接着会添加内置存储,并且reset Vold,其实对应的是调用VM的reset函数,最后是start user,创建对应文件目录,链接到对应的data存储分区。
private void resetIfBootedAndConnected() {
if (mBootCompleted && mDaemonConnected) {
final List<UserInfo> users = mContext.getSystemService(UserManager.class).getUsers();
killMediaProvider(users);
final int[] systemUnlockedUsers;
synchronized (mLock) {
systemUnlockedUsers = mSystemUnlockedUsers;
mDisks.clear();
mVolumes.clear();
addInternalVolumeLocked();
}
try {
mVold.reset();
// Tell vold about all existing and started users
for (UserInfo user : users) {
mVold.onUserAdded(user.id, user.serialNumber);
}
for (int userId : systemUnlockedUsers) {
mVold.onUserStarted(userId);
mStoraged.onUserStarted(userId);
}
mVold.onSecureKeyguardStateChanged(mSecureKeyguardShowing);
mStorageManagerInternal.onReset(mVold);
} catch (Exception e) {
Slog.wtf(TAG, e);
}
}
}
binder::Status VoldNativeService::reset() {
ENFORCE_UID(AID_SYSTEM);
ACQUIRE_LOCK;
return translate(VolumeManager::Instance()->reset());
}
这里来看一下reset的过程,因为前面VM启动过程中,mInternalEmulated->create()会调用getListener()获取监听方法,但是彼时SystemServer还没有启动,SM还没有注册下来,所以并没有通知上去。
此处SM启动结束后执行reset,就会重新执行mInternalEmulated->create();过程,这样就会通知到SM,触发InternalEmulated去mount。
int VolumeManager::reset() {
// Tear down all existing disks/volumes and start from a blank slate so
// newly connected framework hears all events.
if (mInternalEmulated != nullptr) {
mInternalEmulated->destroy();
mInternalEmulated->create();
}
for (const auto& disk : mDisks) {
disk->destroy();
disk->create();
}
updateVirtualDisk();
mAddedUsers.clear();
mStartedUsers.clear();
return 0;
}
触发mount后最终会执行EmulatedVolume类的doMount()函数,进而拉起sdcard进程去完成真正的mount过程。
status_t EmulatedVolume::doMount() {
// We could have migrated storage to an adopted private volume, so always
// call primary storage "emulated" to avoid media rescans.
std::string label = mLabel;
if (getMountFlags() & MountFlags::kPrimary) {
label = "emulated";
}
mFuseDefault = StringPrintf("/mnt/runtime/default/%s", label.c_str());
mFuseRead = StringPrintf("/mnt/runtime/read/%s", label.c_str());
mFuseWrite = StringPrintf("/mnt/runtime/write/%s", label.c_str());
mFuseFull = StringPrintf("/mnt/runtime/full/%s", label.c_str());
setInternalPath(mRawPath);
setPath(StringPrintf("/storage/%s", label.c_str()));
if (fs_prepare_dir(mFuseDefault.c_str(), 0700, AID_ROOT, AID_ROOT) ||
fs_prepare_dir(mFuseRead.c_str(), 0700, AID_ROOT, AID_ROOT) ||
fs_prepare_dir(mFuseWrite.c_str(), 0700, AID_ROOT, AID_ROOT) ||
fs_prepare_dir(mFuseFull.c_str(), 0700, AID_ROOT, AID_ROOT)) {
PLOG(ERROR) << getId() << " failed to create mount points";
return -errno;
}
dev_t before = GetDevice(mFuseFull);
if (!(mFusePid = fork())) {
// clang-format off
if (execl(kFusePath, kFusePath,
"-u", "1023", // AID_MEDIA_RW
"-g", "1023", // AID_MEDIA_RW
"-m",
"-w",
"-G",
"-i",
"-o",
mRawPath.c_str(),
label.c_str(),
NULL)) {
// clang-format on
PLOG(ERROR) << "Failed to exec";
}
LOG(ERROR) << "FUSE exiting";
_exit(1);
}
if (mFusePid == -1) {
PLOG(ERROR) << getId() << " failed to fork";
return -errno;
}
nsecs_t start = systemTime(SYSTEM_TIME_BOOTTIME);
while (before == GetDevice(mFuseFull)) {
LOG(DEBUG) << "Waiting for FUSE to spin up...";
usleep(50000); // 50ms
nsecs_t now = systemTime(SYSTEM_TIME_BOOTTIME);
if (nanoseconds_to_milliseconds(now - start) > 5000) {
LOG(WARNING) << "Timed out while waiting for FUSE to spin up";
return -ETIMEDOUT;
}
}
/* sdcardfs will have exited already. FUSE will still be running */
TEMP_FAILURE_RETRY(waitpid(mFusePid, nullptr, 0));
mFusePid = 0;
return OK;
}
上面便是Kernel和Vold以及SM和Vold之间的联系,以及他们之间是如何进行交互。前者是通过监听Kernel uevent事件来完成上报,后者是通过binder建立通信,通过binder由SM向Vold发送命令,并且在Vold注册listener完成消息上报。