EventLoop类实现了reactor的基本模式 ,它的数据定义如下:
void abortNotInLoopThread(); //不在主I/O线程
void handleRead(); // waked up //将事件通知描述符里的内容读走,以便让其继续检测事件通知
void doPendingFunctors(); //执行转交给I/O的任务
void printActiveChannels() const; // DEBUG //将发生的事件写入日志
typedef std::vector ChannelList; //事件分发器列表
bool looping_; /* atomic */ //是否运行
bool quit_; /* atomic and shared between threads, okay on x86, I guess. */ //是否退出事件循环
bool eventHandling_; /* atomic */
bool callingPendingFunctors_; /* atomic */
int64_t iteration_; //事件循环的次数
const pid_t threadId_; //运行loop的线程ID
Timestamp pollReturnTime_; //poll阻塞的时间
boost::scoped_ptr poller_; //IO复用
boost::scoped_ptr timerQueue_; //定时器队列
int wakeupFd_; //唤醒套接字
// unlike in TimerQueue, which is an internal class,
// we don't expose Channel to client.
boost::scoped_ptr wakeupChannel_; //封装事件描述符
boost::any context_;
// scratch variables
ChannelList activeChannels_; //活跃的事件集
Channel* currentActiveChannel_; //当前处理的事件集
mutable MutexLock mutex_; //互斥锁
std::vector pendingFunctors_ GUARDED_BY(mutex_); //需要在主I/O线程执行的任务
EventLoop通过boost库下的智能指针scoped_ptr来管理Poller_,TimerQueue_,wakeupChannel_对象,这样不容易发生内存显露,其中变量pendingFunctors_为需要在I/O线程中执行的任务集,例如上面所讲的定时器的增删接口的执行,就会先放在此集合里,然后有主I/O线程来执行,那么主线程在调用loop函数之后会阻塞在poller函数中,此时我们应该如何唤醒I/O线程呢?muduo中采用了linux的新特性eventfd来唤醒I/O线程。
EventLoop的主要功能如下:
1.首先我们应该调用updateChannel来添加一些事件(内部调用poller->updateChannel()来添加注册事件)
2.接着调用loop函数来执行事件循环,在执行事件循环的过程中,会则色在poller->poll调用处,Poller类会把活跃的事件放在activeChannel集合中
3.然后调用Channel中的handleEvent来处理事件发生时对应的回调函数,处理完事件函数后还会处理必须由I/O线程来完成的doPendingFunctors函数
当然我们可以在中间的过程中注册一些普通事件或通过run*类函数来注册定时事件,我们也可以调用updateChannel和removeChannel来增删Channel。
EventLoop.h:
#ifndef MUDUO_NET_EVENTLOOP_H
#define MUDUO_NET_EVENTLOOP_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
namespace muduo
{
namespace net
{
class Channel;
class Poller;
class TimerQueue;
/// Reactor, at most one per thread.
///
/// This is an interface class, so don't expose too much details.
class EventLoop : boost::noncopyable
{
public:
typedef boost::function Functor;
EventLoop();
~EventLoop(); // force out-line dtor, for scoped_ptr members.
/// 主循环
void loop();
/// 退出主循环
void quit();
/// poll延迟的时间
Timestamp pollReturnTime() const { return pollReturnTime_; }
/// 迭代次数
int64_t iteration() const { return iteration_; }
/// 在主循环中运行
void runInLoop(const Functor& cb);
/// 插入主循环任务队列
void queueInLoop(const Functor& cb);
size_t queueSize() const;
/// 某个时间点执行定时回调
TimerId runAt(const Timestamp& time, const TimerCallback& cb);
/// 某个时间点之后执行定时回调
TimerId runAfter(double delay, const TimerCallback& cb);
/// 在每个时间间隔处理某个回调函数
TimerId runEvery(double interval, const TimerCallback& cb);
/// 删除某个定时器
void cancel(TimerId timerId);
void wakeup(); //唤醒事件通知描述符
void updateChannel(Channel* channel); //添加某个事件分发器
void removeChannel(Channel* channel); //移除某个事件分发器
bool hasChannel(Channel* channel);
// pid_t threadId() const { return threadId_; }
void assertInLoopThread() //如果不在I/O线程中则退出程序
{
if (!isInLoopThread())
{
abortNotInLoopThread();
}
}
bool isInLoopThread() const { return threadId_ == CurrentThread::tid(); } //检测是否在I/O线程中
// bool callingPendingFunctors() const { return callingPendingFunctors_; }
bool eventHandling() const { return eventHandling_; } //是否正在处理事件
void setContext(const boost::any& context)
{ context_ = context; }
const boost::any& getContext() const
{ return context_; }
boost::any* getMutableContext()
{ return &context_; }
static EventLoop* getEventLoopOfCurrentThread(); //判断当前线程是否为I/O线程
private:
void abortNotInLoopThread(); //不在主I/O线程
void handleRead(); // waked up //将事件通知描述符里的内容读走,以便让其继续检测事件通知
void doPendingFunctors(); //执行转交给I/O的任务
void printActiveChannels() const; // DEBUG //将发生的事件写入日志
typedef std::vector ChannelList; //事件分发器列表
bool looping_; /* atomic */ //是否运行
bool quit_; //是否退出事件循环
bool eventHandling_; /* atomic */
bool callingPendingFunctors_; /* atomic */
int64_t iteration_; //事件循环的次数
const pid_t threadId_; //运行loop的线程ID
Timestamp pollReturnTime_; //poll阻塞的时间
boost::scoped_ptr poller_; //IO复用
boost::scoped_ptr timerQueue_; //定时器队列
int wakeupFd_; //唤醒套接字
// unlike in TimerQueue, which is an internal class,
// we don't expose Channel to client.
boost::scoped_ptr wakeupChannel_; //封装事件描述符
boost::any context_;
// scratch variables
ChannelList activeChannels_; //活跃的事件集
Channel* currentActiveChannel_; //当前处理的事件集
mutable MutexLock mutex_; //互斥锁
std::vector pendingFunctors_ GUARDED_BY(mutex_); //需要在主I/O线程执行的任务
};
}
}
#endif // MUDUO_NET_EVENTLOOP_H
EventLoop.cc
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace muduo;
using namespace muduo::net;
namespace
{
//当前线程EventLoop对象指针
//线程局部存储
__thread EventLoop* t_loopInThisThread = 0;
const int kPollTimeMs = 10000;
int createEventfd()
{
int evtfd = ::eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (evtfd < 0)
{
LOG_SYSERR << "Failed in eventfd";
abort();
}
return evtfd;
}
#pragma GCC diagnostic ignored "-Wold-style-cast"
class IgnoreSigPipe
{
public:
IgnoreSigPipe()
{
::signal(SIGPIPE, SIG_IGN);
// LOG_TRACE << "Ignore SIGPIPE";
}
};
#pragma GCC diagnostic error "-Wold-style-cast"
IgnoreSigPipe initObj;
}
EventLoop* EventLoop::getEventLoopOfCurrentThread()
{
return t_loopInThisThread;
}
EventLoop::EventLoop() //初始化事件循环
: looping_(false),
quit_(false),
eventHandling_(false),
callingPendingFunctors_(false),
iteration_(0),
threadId_(CurrentThread::tid()),
poller_(Poller::newDefaultPoller(this)),
timerQueue_(new TimerQueue(this)),
wakeupFd_(createEventfd()),
wakeupChannel_(new Channel(this, wakeupFd_)),
currentActiveChannel_(NULL)
{
LOG_DEBUG << "EventLoop created " << this << " in thread " << threadId_;
if (t_loopInThisThread)
{
LOG_FATAL << "Another EventLoop " << t_loopInThisThread
<< " exists in this thread " << threadId_;
}
else
{
t_loopInThisThread = this;
}
wakeupChannel_->setReadCallback(
boost::bind(&EventLoop::handleRead, this)); //注册读完成时的回调函数
// we are always reading the wakeupfd
wakeupChannel_->enableReading(); //注册可读事件
}
EventLoop::~EventLoop()
{
LOG_DEBUG << "EventLoop " << this << " of thread " << threadId_
<< " destructs in thread " << CurrentThread::tid();
wakeupChannel_->disableAll();
wakeupChannel_->remove();
::close(wakeupFd_);
t_loopInThisThread = NULL;
}
void EventLoop::loop()
{
assert(!looping_);
assertInLoopThread();
looping_ = true;
quit_ = false; // FIXME: what if someone calls quit() before loop() ?
LOG_TRACE << "EventLoop " << this << " start looping";
while (!quit_)
{
activeChannels_.clear(); //删除事件列表所有元素
//通过poller获取就绪的channel,放到activeChannels_中,poller会将发生的事件类型填写到channel的revents_中,供Channel::handleEvent使用,调用相应的事件回调函数
pollReturnTime_ = poller_->poll(kPollTimeMs, &activeChannels_);
++iteration_;
if (Logger::logLevel() <= Logger::TRACE)
{
printActiveChannels(); //将发生的事件写入日志
}
// TODO sort channel by priority
eventHandling_ = true;
//处理就绪事件
for (ChannelList::iterator it = activeChannels_.begin();
it != activeChannels_.end(); ++it)
{
currentActiveChannel_ = *it;
//调用channel的事件处理函数handleEvent,根据poller设置的发生的事件类型,调用相应的用户函数
currentActiveChannel_->handleEvent(pollReturnTime_);
}
currentActiveChannel_ = NULL;
eventHandling_ = false;
doPendingFunctors(); //处理一些其它的任务
}
LOG_TRACE << "EventLoop " << this << " stop looping";
looping_ = false;
}
void EventLoop::quit()
{
quit_ = true;
// There is a chance that loop() just executes while(!quit_) and exits,
// then EventLoop destructs, then we are accessing an invalid object.
// Can be fixed using mutex_ in both places.
if (!isInLoopThread())
{
wakeup();
}
}
void EventLoop::runInLoop(const Functor& cb)
{
if (isInLoopThread())
{
cb();
}
else
{
queueInLoop(cb);
}
}
void EventLoop::queueInLoop(const Functor& cb)
{
{
MutexLockGuard lock(mutex_);
pendingFunctors_.push_back(cb);
}
if (!isInLoopThread() || callingPendingFunctors_)
{
wakeup();
}
}
size_t EventLoop::queueSize() const
{
MutexLockGuard lock(mutex_);
return pendingFunctors_.size();
}
TimerId EventLoop::runAt(const Timestamp& time, const TimerCallback& cb)
{
return timerQueue_->addTimer(cb, time, 0.0);
}
TimerId EventLoop::runAfter(double delay, const TimerCallback& cb)
{
Timestamp time(addTime(Timestamp::now(), delay));
return runAt(time, cb);
}
TimerId EventLoop::runEvery(double interval, const TimerCallback& cb)
{
Timestamp time(addTime(Timestamp::now(), interval));
return timerQueue_->addTimer(cb, time, interval);
}
void EventLoop::cancel(TimerId timerId)
{
return timerQueue_->cancel(timerId);
}
void EventLoop::updateChannel(Channel* channel) //添加更新事件分发器到map中
{
assert(channel->ownerLoop() == this);
assertInLoopThread();
poller_->updateChannel(channel);
}
void EventLoop::removeChannel(Channel* channel) //从map中移除事件分发器
{
assert(channel->ownerLoop() == this);
assertInLoopThread();
if (eventHandling_)
{
assert(currentActiveChannel_ == channel ||
std::find(activeChannels_.begin(), activeChannels_.end(), channel) == activeChannels_.end());
}
poller_->removeChannel(channel);
}
bool EventLoop::hasChannel(Channel* channel) //查找事件分发器是否在channels_中
{
assert(channel->ownerLoop() == this);
assertInLoopThread();
return poller_->hasChannel(channel);
}
void EventLoop::abortNotInLoopThread()
{
LOG_FATAL << "EventLoop::abortNotInLoopThread - EventLoop " << this
<< " was created in threadId_ = " << threadId_
<< ", current thread id = " << CurrentThread::tid();
}
void EventLoop::wakeup()
{
uint64_t one = 1;
ssize_t n = sockets::write(wakeupFd_, &one, sizeof one);
if (n != sizeof one)
{
LOG_ERROR << "EventLoop::wakeup() writes " << n << " bytes instead of 8";
}
}
void EventLoop::handleRead()
{
uint64_t one = 1;
ssize_t n = sockets::read(wakeupFd_, &one, sizeof one);
if (n != sizeof one)
{
LOG_ERROR << "EventLoop::handleRead() reads " << n << " bytes instead of 8";
}
}
void EventLoop::doPendingFunctors()
{
std::vector functors;
callingPendingFunctors_ = true;
{
MutexLockGuard lock(mutex_);
functors.swap(pendingFunctors_); //提高效率防止死锁
}
for (size_t i = 0; i < functors.size(); ++i)
{
functors[i]();
}
callingPendingFunctors_ = false;
}
void EventLoop::printActiveChannels() const //将发生的事件写入日志
{
for (ChannelList::const_iterator it = activeChannels_.begin();
it != activeChannels_.end(); ++it)
{
const Channel* ch = *it;
LOG_TRACE << "{" << ch->reventsToString() << "} ";
}
}