Linux简单高并发模型——Epoll + 线程池

首先是一个locker.h的文件,封装了信号量、互斥量、条件变量。

在线程池中的任务队列需要互斥量的保护,当任务队列中有任务到达时,需要唤醒一个等待pthread_cond_wait()的线程,线程池停止时,需要唤醒所以的线程,调用的是pthread_cond_broadcast()。


locker.h文件:

#ifndef _LOCKER_H_
#define _LOCKER_H_

#include 
#include 
#include 

/*信号量的类*/
class sem_locker
{
private:
    sem_t m_sem;

public:
    //初始化信号量
    sem_locker()
    {
	if(sem_init(&m_sem, 0, 0) != 0)
	    printf("sem init error\n");
    }
    //销毁信号量
    ~sem_locker()
    {
	sem_destroy(&m_sem);
    }

    //等待信号量
    bool wait()
    {
	return sem_wait(&m_sem) == 0;
    }
    //添加信号量
    bool add()
    {
	return sem_post(&m_sem) == 0;
    }
};


/*互斥 locker*/
class mutex_locker
{
private:
    pthread_mutex_t m_mutex;

public:
    mutex_locker()
    {
    	if(pthread_mutex_init(&m_mutex, NULL) != 0)
	    printf("mutex init error!");
    }
    ~mutex_locker()
    {
	pthread_mutex_destroy(&m_mutex);
    }

    bool mutex_lock()  //lock mutex
    {
	return pthread_mutex_lock(&m_mutex) == 0;
    }
    bool mutex_unlock()   //unlock
    {
	return pthread_mutex_unlock(&m_mutex) == 0;
    }
};

/*条件变量 locker*/
class cond_locker
{
private:
    pthread_mutex_t m_mutex;
    pthread_cond_t m_cond;

public:
    // 初始化 m_mutex and m_cond
    cond_locker()
    {
	if(pthread_mutex_init(&m_mutex, NULL) != 0)
	    printf("mutex init error");
	if(pthread_cond_init(&m_cond, NULL) != 0)
	{   //条件变量初始化是被,释放初始化成功的mutex
	    pthread_mutex_destroy(&m_mutex);
	    printf("cond init error");
	}
    }
    // destroy mutex and cond
    ~cond_locker()
    {
	pthread_mutex_destroy(&m_mutex);
	pthread_cond_destroy(&m_cond);
    }
    //等待条件变量
    bool wait()
    {
	int ans = 0;
	pthread_mutex_lock(&m_mutex);
	ans = pthread_cond_wait(&m_cond, &m_mutex);
	pthread_mutex_unlock(&m_mutex);
	return ans == 0;
    }
    //唤醒等待条件变量的线程
    bool signal()
    {
	return pthread_cond_signal(&m_cond) == 0;
    }

    //唤醒all等待条件变量的线程
    bool broadcast()
    {
            return pthread_cond_broadcast(&m_cond) == 0;
    }
};

#endif


thread_pool.h文件。

创建threadnum个线程,并调用pthread_detach()分离线程,线程结束,自动回收资源。(前面的一篇博客的线程池有bug,不完整,线程池退出时,不能让所有的线程正常退出)


#ifndef _PTHREAD_POOL_
#define _PTHREAD_POOL_

#include "locker.h"
#include 
#include 
#include 
#include 
#include 
#include 

template
class threadpool
{
private:
    int thread_number;  //线程池的线程数
    //int max_task_number;  //任务队列中的最大任务数
    pthread_t *all_threads;   //线程数组
    std::queue task_queue; //任务队列
    mutex_locker queue_mutex_locker;  //互斥锁
    //sem_locker queue_sem_locker;   //信号量
    cond_locker queue_cond_locker; //cond
    bool is_stop; //是否结束线程
public:
    threadpool(int thread_num = 20);
    ~threadpool();
    bool append_task(T *task);  //添加任务
    void start();              //线程池开启
    void stop();               //线程池关闭
private:
    //线程运行的函数。执行run()函数
    static void *worker(void *arg);
    void run();
    T *getTask();   //获取任务
};

template 
threadpool::threadpool(int thread_num):
	thread_number(thread_num),is_stop(false), all_threads(NULL)
{       //构造函数
    if(thread_num <= 0)
	printf("threadpool can't init because thread_number = 0");

    all_threads = new pthread_t[thread_number];
    if(all_threads == NULL)
    	printf("can't init threadpool because thread array can't new");
}

template 
threadpool::~threadpool()
{
    delete []all_threads;
    stop();
}

template 
void threadpool::stop() //线程池停止
{
        is_stop = true;
        //queue_sem_locker.add();
        queue_cond_locker.broadcast();
}

template 
void threadpool::start()  //线程池启动
{
    for(int i = 0; i < thread_number; ++i)
    {
	//printf("create the %dth pthread\n", i);
	if(pthread_create(all_threads + i, NULL, worker, this) != 0)
	{//创建线程失败,清除成功申请的资源并抛出异常
	    delete []all_threads;
	    throw std::exception();
	}
	if(pthread_detach(all_threads[i]))
	{//将线程设置为脱离线程,失败则清除成功申请的资源并抛出异常
	    delete []all_threads;
	    throw std::exception();
	}
    }
}
//添加任务进入任务队列
template 
bool threadpool::append_task(T *task)   //添加任务
{   //获取互斥锁
    queue_mutex_locker.mutex_lock();
    
    bool is_signal = task_queue.empty();
    //添加进入队列
    task_queue.push(task);
    queue_mutex_locker.mutex_unlock();
    //唤醒等待任务的线程
    if(is_signal)
    {
            queue_cond_locker.signal();
    }
    return true;
}

template 
void *threadpool::worker(void *arg)  //线程工作函数
{
    threadpool *pool = (threadpool *)arg;
    pool->run();
    return pool;
}

template 
T* threadpool::getTask()   //从任务队列中获取任务
{
    T *task = NULL;
    queue_mutex_locker.mutex_lock();
    if(!task_queue.empty())
    {
        task = task_queue.front();
        task_queue.pop();
    }
    queue_mutex_locker.mutex_unlock();
    return task;
}

template 
void threadpool::run()
{
    while(!is_stop){
        T *task = getTask();
        if(task == NULL)  //队列为空,等待
                queue_cond_locker.wait();
        else              //执行任务
                task->doit();
    }
    //for test
    //printf("exit%d\n", (unsigned long)pthread_self());
}

#endif



封装了epoll。

EpollServer.h中的BaseTask.h和Task.h应该放在另外一个文件中的。这里图个方便,哈哈。

#ifndef _EPOLL_SERVER_H_
#define _EPOLL_SERVER_H_

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
//#include 

#include "thread_pool.h"

#define MAX_EVENT 1024   //epoll_events的最大个数
#define MAX_BUFFER 2048  //Buffer的最大字节

class BaseTask
{
public:
	virtual void doit() = 0;
};

class Task : public BaseTask
{
private:
	int sockfd;
	char order[MAX_BUFFER];
public:
	Task(char *str, int fd) : sockfd(fd)
	{
		memset(order, '\0', MAX_BUFFER);
		strcpy(order, str);
	}
	void doit()  //任务的执行函数
	{
		//do something of the order
		//printf("%s\n", order);
		snprintf(order, MAX_BUFFER - 1, "somedata\n");
		write(sockfd, order, strlen(order));
	}
};

class EpollServer
{
private:
	bool is_stop;   //是否停止epoll_wait的标志
	int threadnum;   //线程数目
	int sockfd;     //监听的fd
	int port;      //端口
	int epollfd;    //Epoll的fd
	threadpool *pool;   //线程池的指针
	//char address[20];
	epoll_event events[MAX_EVENT];  //epoll的events数组
	struct sockaddr_in bindAddr;   //绑定的sockaddr

public://构造函数
	EpollServer()
	{}
	EpollServer(int ports, int thread) : is_stop(false) , threadnum(thread) ,
		port(ports), pool(NULL)
	{
	}
	~EpollServer()  //析构
	{
		delete pool;
	}

	void init();

	void epoll();

	static int setnonblocking(int fd)  //将fd设置称非阻塞
	{
		int old_option = fcntl(fd, F_GETFL);
		int new_option = old_option | O_NONBLOCK;
		fcntl(fd, F_SETFL, new_option);
		return old_option;
	}

	static void addfd(int epollfd, int sockfd, bool oneshot)  //向Epoll中添加fd
	{//oneshot表示是否设置称同一时刻,只能有一个线程访问fd,数据的读取都在主线程中,所以调用都设置成false
		epoll_event event;
		event.data.fd = sockfd;
		event.events = EPOLLIN | EPOLLET;
		if(oneshot)
		{
			event.events |= EPOLLONESHOT;
		}
		epoll_ctl(epollfd, EPOLL_CTL_ADD, sockfd, &event); //添加fd
		EpollServer::setnonblocking(sockfd);
	}

};

void EpollServer::init()   //EpollServer的初始化
{
	bzero(&bindAddr, sizeof(bindAddr));
	bindAddr.sin_family = AF_INET;
	bindAddr.sin_port = htons(port);
	bindAddr.sin_addr.s_addr = htonl(INADDR_ANY);
        //创建Socket
	sockfd = socket(AF_INET, SOCK_STREAM, 0);
	if(sockfd < 0)
	{
		printf("EpollServer socket init error\n");
		return;
	}
	int ret = bind(sockfd, (struct sockaddr *)&bindAddr, sizeof(bindAddr));
	if(ret < 0)
	{
		printf("EpollServer bind init error\n");
		return;
	}
	ret = listen(sockfd, 10);
	if(ret < 0)
	{
		printf("EpollServer listen init error\n");
		return;
	}
        //create Epoll
	epollfd = epoll_create(1024);
	if(epollfd < 0)
	{
		printf("EpollServer epoll_create init error\n");
		return;
	}
	pool = new threadpool(threadnum);  //创建线程池
}

void EpollServer::epoll()
{
	pool->start();   //线程池启动
	//
	addfd(epollfd, sockfd, false);
	while(!is_stop)
	{//调用epoll_wait
		int ret = epoll_wait(epollfd, events, MAX_EVENT, -1);
		if(ret < 0)  //出错处理
		{
			printf("epoll_wait error\n");
			break;
		}
		for(int i = 0; i < ret; ++i)
		{
			int fd = events[i].data.fd;
			if(fd == sockfd)  //新的连接到来
			{
				struct sockaddr_in clientAddr;
				socklen_t len = sizeof(clientAddr);
				int confd = accept(sockfd, (struct sockaddr *)
					&clientAddr, &len);

				EpollServer::addfd(epollfd, confd, false);
			}
			else if(events[i].events & EPOLLIN)  //某个fd上有数据可读
			{
				char buffer[MAX_BUFFER];
		readagain:	memset(buffer, 0, sizeof(buffer));
				int ret = read(fd, buffer, MAX_BUFFER - 1);
				if(ret == 0)  //某个fd关闭了连接,从Epoll中删除并关闭fd
				{
					struct epoll_event ev;
					ev.events = EPOLLIN;
					ev.data.fd = fd;
					epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &ev);
					shutdown(fd, SHUT_RDWR);
					printf("%d logout\n", fd);
					continue;
				}
				else if(ret < 0)//读取出错,尝试再次读取
				{
					if(errno == EAGAIN)	
					{
						printf("read error! read again\n");
						goto readagain;
					    	break;
					}
				}
				else//成功读取,向线程池中添加任务
				{
					BaseTask *task = new Task(buffer, fd);
					pool->append_task(task);
				}
			}
			else
			{
				printf("something else had happened\n");
			}
		}
	}
	close(sockfd);//结束。

	pool->stop();
}

#endif


接下来是简单的Demo的测试。

#include "EpollServer.h"

int main(int argc, char const *argv[])
{
	if(argc != 3)
	{
		printf("usage %s port threadnum\n", argv[0]);
		return -1;
	}
	int port = atoi(argv[1]);
	if(port == 0)
	{
		printf("port must be Integer\n");
		return -1;
	}
	int threadnum = atoi(argv[2]);
	if(port == 0)
	{
		printf("threadnum must be Integer\n");
		return -1;
	}
	EpollServer *epoll = new EpollServer(port, threadnum);

	epoll->init();

	epoll->epoll();
	return 0;
}


代码在Ubuntu中编译通过。下次再来更新能够支持并发量的多少。


-------------------------------------------------------------------------------------------------











你可能感兴趣的:(Linux,C/C++)