muduo的源代码对于一个初学者来说还是有一些复杂的,其中有很多的回调函数以及交叉的组件,下面我将追踪一次TCP连接过程中发生的事情,不会出现用户态的源码,都是库内部的运行机制。下文笔者将描述一次连接发生的过程,将Channel到加入到loop循环为止。
TcpServer向Acceptor注册的回调代码主要作用是:当一个新的连接到来时,根据Acceptor创建的可连接描述符和客户的地址,创建一个Connection对象,并且将这个对象加入到TcpServer的ConnectionMap中,由TcpServer来管理上述新建con对象。但是现在监听套接字的事件分发对象Channel还没有加入loop,就先不多提这个新的连接到到来时的处理过程。
TcpServer::TcpServer(EventLoop* loop,const InetAddress& listenAddr,const string& nameArg,Option option)
: loop_(CHECK_NOTNULL(loop)),
ipPort_(listenAddr.toIpPort()),name_(nameArg),acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)),
threadPool_(new EventLoopThreadPool(loop, name_)),
connectionCallback_(defaultConnectionCallback),
messageCallback_(defaultMessageCallback),
nextConnId_(1)
{//上面的loop是用户提供的loop
acceptor_->setNewConnectionCallback(
boost::bind(&TcpServer::newConnection, this, _1, _2));//注册给acceptor的回调
}//将在Acceptor接受新连接的时候
void TcpServer::newConnection(int sockfd, const InetAddress& peerAddr)
{//将本函数注册个acceptor
loop_->assertInLoopThread();//断言是否在IO线程
EventLoop* ioLoop = threadPool_->getNextLoop();//获得线程池中的一个loop
char buf[64];//获得线程池map中的string索引
snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_);
++nextConnId_;
string connName = name_ + buf;
LOG_INFO << "TcpServer::newConnection [" << name_
<< "] - new connection [" << connName
<< "] from " << peerAddr.toIpPort();
InetAddress localAddr(sockets::getLocalAddr(sockfd));//获得本地的地址,用于构建Connection
// FIXME poll with zero timeout to double confirm the new connection
// FIXME use make_shared if necessary
TcpConnectionPtr conn(new TcpConnection(ioLoop,
connName,
sockfd,
localAddr,
peerAddr));//构建了一个connection
connections_[connName] = conn;//将新构建的con加入server的map中
conn->setConnectionCallback(connectionCallback_);//muduo默认的
conn->setMessageCallback(messageCallback_);//moduo默认的
conn->setWriteCompleteCallback(writeCompleteCallback_);//??
conn->setCloseCallback(
boost::bind(&TcpServer::removeConnection, this, _1)); // FIXME: unsafe
ioLoop->runInLoop(boost::bind(&TcpConnection::connectEstablished, conn));//在某个线程池的loop中加入这个con
}
Acceptor::Acceptor(EventLoop* loop, const InetAddress& listenAddr, bool reuseport)
: loop_(loop),
acceptSocket_(sockets::createNonblockingOrDie(listenAddr.family())),
acceptChannel_(loop, acceptSocket_.fd()),
listenning_(false),
idleFd_(::open("/dev/null", O_RDONLY | O_CLOEXEC))
{
assert(idleFd_ >= 0);
acceptSocket_.setReuseAddr(true);
acceptSocket_.setReusePort(reuseport);
acceptSocket_.bindAddress(listenAddr);
acceptChannel_.setReadCallback(
boost::bind(&Acceptor::handleRead, this));//Channel设置回调,当sockfd可读时掉用设置的回调
}
void Acceptor::handleRead()
{
loop_->assertInLoopThread();//判断是否在IO线程
InetAddress peerAddr;//客户的地址
//FIXME loop until no more
int connfd = acceptSocket_.accept(&peerAddr);//获得连接的描述符
if (connfd >= 0)
{
// string hostport = peerAddr.toIpPort();
// LOG_TRACE << "Accepts of " << hostport;
if (newConnectionCallback_)
{
newConnectionCallback_(connfd, peerAddr);//TcpServer注册的,创建新的con,并且加入TcpServer的ConnectionMap中。
}
else
{
sockets::close(connfd);
}
}
else
{
LOG_SYSERR << "in Acceptor::handleRead";
// Read the section named "The special problem of
// accept()ing when you can't" in libev's doc.
// By Marc Lehmann, author of libev.
if (errno == EMFILE)
{
::close(idleFd_);
idleFd_ = ::accept(acceptSocket_.fd(), NULL, NULL);
::close(idleFd_);
idleFd_ = ::open("/dev/null", O_RDONLY | O_CLOEXEC);
}
}
}
Channel::Channel(EventLoop* loop, int fd__)
: loop_(loop),
fd_(fd__),
events_(0),
revents_(0),
index_(-1),
logHup_(true),
tied_(false),
eventHandling_(false),
addedToLoop_(false)
{
}
void TcpServer::setThreadNum(int numThreads)
{//设置线程池的开始数目
assert(0 <= numThreads);
threadPool_->setThreadNum(numThreads);
}
void TcpServer::start()
{//TcpServer开始工作
if (started_.getAndSet(1) == 0)//获得原子计数
{
threadPool_->start(threadInitCallback_);//线程池开始工作
assert(!acceptor_->listenning());//打开accepor的监听状态
loop_->runInLoop(
boost::bind(&Acceptor::listen, get_pointer(acceptor_)));//打开acceptor的listening
}
}
void Acceptor::listen()
{
loop_->assertInLoopThread();//判断是否在IO线程
listenning_ = true;//进入监听模式
acceptSocket_.listen();
acceptChannel_.enableReading();//让监听字的channel关注可读事件
}
void enableReading() { events_ |= kReadEvent; update(); }//将关注的事件变为可读,然后更新
void Channel::update()
{
addedToLoop_ = true;//更新channel的状态
loop_->updateChannel(this);//调用POLLER的更新功能
}
void EventLoop::updateChannel(Channel* channel)
{
assert(channel->ownerLoop() == this);//判断channel的LOOP是否是当前的LOOP
assertInLoopThread();//判断是否在IO线程
poller_->updateChannel(channel);//使用POLLER来更新channel
}
void PollPoller::updateChannel(Channel* channel)
{//将channel关注的事件与pollfd同步
Poller::assertInLoopThread();//如果不再loop线程直接退出
LOG_TRACE << "fd = " << channel->fd() << " events = " << channel->events();
if (channel->index() < 0)//获得channel在map中的位置
{
// a new one, add to pollfds_
assert(channels_.find(channel->fd()) == channels_.end());
struct pollfd pfd;//新建一个pfd与channel相关联
pfd.fd = channel->fd();
pfd.events = static_cast<short>(channel->events());//关注的事件设置为channel关注的事件
pfd.revents = 0;//正在发生的事件为0
pollfds_.push_back(pfd);//将设置好的pollfd加入关注事件列表
int idx = static_cast<int>(pollfds_.size())-1;//并且获得加入的位置
channel->set_index(idx);//channel保存自己在pollfds中的位置
channels_[pfd.fd] = channel;//channel将自己加入到channelmap中
}
else
{
// update existing one
assert(channels_.find(channel->fd()) != channels_.end());
assert(channels_[channel->fd()] == channel);//判断位置是否正确
int idx = channel->index();//获得channel在pollfd中的索引
assert(0 <= idx && idx < static_cast<int>(pollfds_.size()));
struct pollfd& pfd = pollfds_[idx];//获得索引
assert(pfd.fd == channel->fd() || pfd.fd == -channel->fd()-1);
pfd.events = static_cast<short>(channel->events());//修改关注的事件
pfd.revents = 0;//将当前发生的事件设置为0
if (channel->isNoneEvent())//如果channel没有任何事件,一个暂时熄火的channel
{
// ignore this pollfd
pfd.fd = -channel->fd()-1;//将索引设置为原来索引的负数
}
}
}
void EventLoop::loop()
{
assert(!looping_);//判断是否在LOOPING
assertInLoopThread();//判断这个函数在LOOP线程调用
looping_ = true;//进入LOOPING状态
quit_ = false; // FIXME: what if someone calls quit() before loop() ?
LOG_TRACE << "EventLoop " << this << " start looping";
while (!quit_)
{
activeChannels_.clear();//将活动线程队列置空
pollReturnTime_ = poller_->poll(kPollTimeMs, &activeChannels_);//获得活动文件描述符的数量,并且获得活动的channel队列
++iteration_;//增加Poll次数
if (Logger::logLevel() <= Logger::TRACE)
{
printActiveChannels();
}
// TODO sort channel by priority
eventHandling_ = true;//事件处理状态
for (ChannelList::iterator it = activeChannels_.begin();
it != activeChannels_.end(); ++it)
{
currentActiveChannel_ = *it;//获得当前活动的事件
currentActiveChannel_->handleEvent(pollReturnTime_);//处理事件,传递一个poll的阻塞时间
}
currentActiveChannel_ = NULL;//将当前活动事件置为空
eventHandling_ = false;//退出事件处理状态
doPendingFunctors();//处理用户在其他线程注册给IO线程的事件
}
LOG_TRACE << "EventLoop " << this << " stop looping";
looping_ = false;//推出LOOPING状态
}
一个监听套接字已经进入循环,如果此时一个新的连接到来又会发生什么事情呢?
void Channel::handleEvent(Timestamp receiveTime)
{
boost::shared_ptr<void> guard;
if (tied_)
{
guard = tie_.lock();//提升成功说明con存在
if (guard)//这样做比较保险
{
handleEventWithGuard(receiveTime);
}
}
else
{
handleEventWithGuard(receiveTime);
}
}
void Channel::handleEventWithGuard(Timestamp receiveTime)
{//真正的处理各种事件
eventHandling_ = true;//处理事件状态
LOG_TRACE << reventsToString();
if ((revents_ & POLLHUP) && !(revents_ & POLLIN))
{
if (logHup_)
{
LOG_WARN << "fd = " << fd_ << " Channel::handle_event() POLLHUP";
}
if (closeCallback_) closeCallback_();
}
if (revents_ & POLLNVAL)
{
LOG_WARN << "fd = " << fd_ << " Channel::handle_event() POLLNVAL";
}
if (revents_ & (POLLERR | POLLNVAL))
{
if (errorCallback_) errorCallback_();
}
if (revents_ & (POLLIN | POLLPRI | POLLRDHUP))
{
if (readCallback_) readCallback_(receiveTime);
}
if (revents_ & POLLOUT)
{
if (writeCallback_) writeCallback_();
}
eventHandling_ = false;
}
void Acceptor::handleRead()
{
loop_->assertInLoopThread();//判断是否在IO线程
InetAddress peerAddr;//客户的地址
//FIXME loop until no more
int connfd = acceptSocket_.accept(&peerAddr);//获得连接的描述符
if (connfd >= 0)
{
// string hostport = peerAddr.toIpPort();
// LOG_TRACE << "Accepts of " << hostport;
if (newConnectionCallback_)
{
newConnectionCallback_(connfd, peerAddr);//这是个关键步骤,重点在于这个回调是谁注册的
}
else
{
sockets::close(connfd);
}
}
else
{
LOG_SYSERR << "in Acceptor::handleRead";
// Read the section named "The special problem of
// accept()ing when you can't" in libev's doc.
// By Marc Lehmann, author of libev.
if (errno == EMFILE)
{
::close(idleFd_);
idleFd_ = ::accept(acceptSocket_.fd(), NULL, NULL);
::close(idleFd_);
idleFd_ = ::open("/dev/null", O_RDONLY | O_CLOEXEC);
}
}
}
void TcpServer::newConnection(int sockfd, const InetAddress& peerAddr)
{//将本函数注册个acceptor
loop_->assertInLoopThread();//断言是否在IO线程
EventLoop* ioLoop = threadPool_->getNextLoop();//获得线程池中的一个loop
char buf[64];//获得线程池map中的string索引
snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_);
++nextConnId_;
string connName = name_ + buf;
LOG_INFO << "TcpServer::newConnection [" << name_
<< "] - new connection [" << connName
<< "] from " << peerAddr.toIpPort();
InetAddress localAddr(sockets::getLocalAddr(sockfd));//获得本地的地址,用于构建Connection
// FIXME poll with zero timeout to double confirm the new connection
// FIXME use make_shared if necessary
TcpConnectionPtr conn(new TcpConnection(ioLoop,
connName,
sockfd,
localAddr,
peerAddr));//构建了一个connection
connections_[connName] = conn;//将新构建的con加入server的map中
conn->setConnectionCallback(connectionCallback_);//muduo默认的
conn->setMessageCallback(messageCallback_);//moduo默认的
conn->setWriteCompleteCallback(writeCompleteCallback_);//??
conn->setCloseCallback(
boost::bind(&TcpServer::removeConnection, this, _1)); // FIXME: unsafe
ioLoop->runInLoop(boost::bind(&TcpConnection::connectEstablished, conn));//在某个线程池的loop中加入这个con
}
void TcpConnection::connectEstablished()
{//建立连接
loop_->assertInLoopThread();//断言是否在IO线程
assert(state_ == kConnecting);//正处于连接建立过程
setState(kConnected);
channel_->tie(shared_from_this());//使channel的tie的指向不为空
channel_->enableReading();//将connection设置为可读的
connectionCallback_(shared_from_this());//用户提供的回调函数,muduo有提供默认的
}
void enableReading() { events_ |= kReadEvent; update(); }//将关注的事件变为可读,然后更新
void Channel::update()
{
addedToLoop_ = true;//更新channel的状态
loop_->updateChannel(this);//调用POLLER的更新功能
}
void EventLoop::updateChannel(Channel* channel)
{
assert(channel->ownerLoop() == this);//判断channel的LOOP是否是当前的LOOP
assertInLoopThread();//判断是否在IO线程
poller_->updateChannel(channel);//使用POLLER来更新channel
}
void PollPoller::updateChannel(Channel* channel)
{//将channel关注的事件与pollfd同步
Poller::assertInLoopThread();//如果不再loop线程直接退出
LOG_TRACE << "fd = " << channel->fd() << " events = " << channel->events();
if (channel->index() < 0)//获得channel在map中的位置
{
// a new one, add to pollfds_
assert(channels_.find(channel->fd()) == channels_.end());
struct pollfd pfd;//新建一个pfd与channel相关联
pfd.fd = channel->fd();
pfd.events = static_cast<short>(channel->events());//关注的事件设置为channel关注的事件
pfd.revents = 0;//正在发生的事件为0
pollfds_.push_back(pfd);//将设置好的pollfd加入关注事件列表
int idx = static_cast<int>(pollfds_.size())-1;//并且获得加入的位置
channel->set_index(idx);//channel保存自己在pollfds中的位置
channels_[pfd.fd] = channel;//channel将自己加入到channelmap中
}
else
{
// update existing one
assert(channels_.find(channel->fd()) != channels_.end());
assert(channels_[channel->fd()] == channel);//判断位置是否正确
int idx = channel->index();//获得channel在pollfd中的索引
assert(0 <= idx && idx < static_cast<int>(pollfds_.size()));
struct pollfd& pfd = pollfds_[idx];//获得索引
assert(pfd.fd == channel->fd() || pfd.fd == -channel->fd()-1);
pfd.events = static_cast<short>(channel->events());//修改关注的事件
pfd.revents = 0;//将当前发生的事件设置为0
if (channel->isNoneEvent())//如果channel没有任何事件,一个暂时熄火的channel
{
// ignore this pollfd
pfd.fd = -channel->fd()-1;//将索引设置为原来索引的负数
}
}
}
最后一个连接的channel加入loop循环,新的循环已经开始了。