高性能HTTP服务器

高性能HTTP服务器

最近一直在做一个高性能的http服务器,首先要构建一个支持TCP的高性能网络编程框架,再完成这个框架之后,再增加上http的特性便可以了。

设计需求:

  • 采用 reactor 模型,使用 epoll 作为事件分发实现。
  • 必须支持多线程,从而可以支持单线程单 reactor 模式,也可以支持多线程主 - 从 reactor 模式。可以将套接字上的 I/O 事件分离到多个线程上。
  • 封装读写操作到 Buffer 对象中。

下面根据设计需求分为三个模块依次来讲解反应堆模式设计、I/O模型和多线程模型设计以及数据读写封装和buffer。

反应堆模式设计

反应堆模式,主要是设计一个基于事件分发和回调的反应堆框架。这个框架里面的主要对象包括:

  • event_loop

你可以把 event_loop 这个对象理解成和一个线程绑定的无限事件循环,你会在各种语言里看到 event_loop 这个抽象。这是什么意思呢?简单来说,它就是一个无限循环着的事件分发器,一旦有事件发生,它就会回调预先定义好的回调函数,完成事件的处理。具体来说,event_loop 使用 poll 或者 epoll 方法将一个线程阻塞,等待各种 I/O 事件的发生。

下面给出event_loop.h代码:

#ifndef EVENT_LOOP_H
#define EVENT_LOOP_H

#include 
#include "channel.h"
#include "event_dispatcher.h"
#include "common.h"

extern const struct event_dispatcher poll_dispatcher;
extern const struct event_dispatcher epoll_dispatcher;

// 存储channel event事件链表
struct channel_element {
    int type; // 事件类型(1: add  2: delete  3: update)
    struct channel *channel; // channel对象
    struct channel_element *next; // next指针
};
// event_loop对象可以理解为:一个无限循环着的事件分发器,一旦有事件发生,它就会回调预先定义好的回调函数,完成事件的处理。
// 具体来说:event_loop使用poll或者epoll方法将一个线程阻塞,等待各种I/O事件的发生
struct event_loop {
    int quit;
	// 事件分发器
    const struct event_dispatcher *eventDispatcher;

    /** 对应的事件分发器的数据. **/
    void *event_dispatcher_data;
    struct channel_map *channelMap;

    /** 待注册的事件列表. **/
    int is_handle_pending;
    struct channel_element *pending_head;
    struct channel_element *pending_tail;

    pthread_t owner_thread_id;
    pthread_mutex_t mutex;
    pthread_cond_t cond;
    
    // 套接字对,用于主线程与子线程交互
    int socketPair[2];
    char *thread_name;
};

// event_loop初始化(无参)
struct event_loop *event_loop_init();

// event_loop初始化(有参)
struct event_loop *event_loop_init_with_name(char * thread_name);

// event_loop启动
int event_loop_run(struct event_loop *eventLoop);

void event_loop_wakeup(struct event_loop *eventLoop);

//增加channel event事件函数入口
int event_loop_add_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1);

//删除channel event事件函数入口
int event_loop_remove_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1);

//修改channel event事件函数入口
int event_loop_update_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1);

// 具体实现add
int event_loop_handle_pending_add(struct event_loop *eventLoop, int fd, struct channel *channel);

// 具体实现remove
int event_loop_handle_pending_remove(struct event_loop *eventLoop, int fd, struct channel *channel);

// 具体实现update
int event_loop_handle_pending_update(struct event_loop *eventLoop, int fd, struct channel *channel);

// dispather派发完事件之后,调用该方法通知event_loop执行对应事件的相关callback方法
// res: EVENT_READ | EVENT_READ等
int channel_event_activate(struct event_loop *eventLoop, int fd, int res);

#endif

下面给出event_loop.c代码: 

#include 
#include "event_loop.h"
#include "common.h"
#include "log.h"
#include "event_dispatcher.h"
#include "channel.h"
#include "utils.h"

// 注册存储在pending_head链表中的待处理事件
int event_loop_handle_pending_channel(struct event_loop *eventLoop) {
    // get the lock
    pthread_mutex_lock(&eventLoop->mutex);
    // 处理标志位置1
    eventLoop->is_handle_pending = 1;

    // 遍历pending_head链表,依次注册到event_loop事件分发中
    struct channel_element *channelElement = eventLoop->pending_head;
    while (channelElement != NULL) {
        //save into event_map
        struct channel *channel = channelElement->channel;
        int fd = channel->fd;
        if (channelElement->type == 1) {
            event_loop_handle_pending_add(eventLoop, fd, channel);
        } else if (channelElement->type == 2) {
            event_loop_handle_pending_remove(eventLoop, fd, channel);
        } else if (channelElement->type == 3) {
            event_loop_handle_pending_update(eventLoop, fd, channel);
        }
        channelElement = channelElement->next;
    }

    // 处理完成后,待处理链表置空
    eventLoop->pending_head = eventLoop->pending_tail = NULL;
    // 处理标志位置0
    eventLoop->is_handle_pending = 0;

    // release the lock
    pthread_mutex_unlock(&eventLoop->mutex);

    return 0;
}

// 将待处理事件先添加到pending_list当中(无锁)
void event_loop_channel_buffer_nolock(struct event_loop *eventLoop, int fd, struct channel *channel1, int type) {
    // 将channel_element对象添加到pending_list当中
    struct channel_element *channelElement = malloc(sizeof(struct channel_element));
    channelElement->channel = channel1;
    channelElement->type = type;
    channelElement->next = NULL;
    //第一个元素
    if (eventLoop->pending_head == NULL) {
        eventLoop->pending_head = eventLoop->pending_tail = channelElement;
    } else {
        eventLoop->pending_tail->next = channelElement;
        eventLoop->pending_tail = channelElement;
    }
}

// 添加后便处理对应的事件
int event_loop_do_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1, int type) {
    // get the lock
    pthread_mutex_lock(&eventLoop->mutex);
    assert(eventLoop->is_handle_pending == 0);
	// 首先先添加到pending_list当中
    event_loop_channel_buffer_nolock(eventLoop, fd, channel1, type);
    //release the lock
    pthread_mutex_unlock(&eventLoop->mutex);
	
    if (!isInSameThread(eventLoop)) {
        event_loop_wakeup(eventLoop);
    } else {
        event_loop_handle_pending_channel(eventLoop);
    }

    return 0;

}

int event_loop_add_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1) {
    return event_loop_do_channel_event(eventLoop, fd, channel1, 1);
}

int event_loop_remove_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1) {
    return event_loop_do_channel_event(eventLoop, fd, channel1, 2);
}

int event_loop_update_channel_event(struct event_loop *eventLoop, int fd, struct channel *channel1) {
    return event_loop_do_channel_event(eventLoop, fd, channel1, 3);
}

// in the i/o thread
// 将channel对象添加到event_loop中的map中以及事件分发中
int event_loop_handle_pending_add(struct event_loop *eventLoop, int fd, struct channel *channel) {
    yolanda_msgx("add channel fd == %d, %s", fd, eventLoop->thread_name);
	
	// 获取event_loop的channel_map对象
    struct channel_map *map = eventLoop->channelMap;

    if (fd < 0)
        return 0;
	// 若map已满,则进行扩容
    if (fd >= map->nentries) {
        if (map_make_space(map, fd, sizeof(struct channel *)) == -1)
            return (-1);
    }

    //第一次创建,增加
    if ((map)->entries[fd] == NULL) {
		// 将channel对象添加至map中
        map->entries[fd] = channel;
        // 进一步将channel对象添加至epoll监听事件中
        struct event_dispatcher *eventDispatcher = eventLoop->eventDispatcher;
        eventDispatcher->add(eventLoop, channel);
        return 1;
    }

    return 0;
}

// in the i/o thread
// 把channel对象从event_loop的map及事件分发中删除
int event_loop_handle_pending_remove(struct event_loop *eventLoop, int fd, struct channel *channel1) {
    struct channel_map *map = eventLoop->channelMap;
    assert(fd == channel1->fd);

    if (fd < 0)
        return 0;

    if (fd >= map->nentries)
        return (-1);

    struct channel *channel2 = map->entries[fd];

    //update dispatcher(multi-thread)here
    struct event_dispatcher *eventDispatcher = eventLoop->eventDispatcher;

    int retval = 0;
    if (eventDispatcher->del(eventLoop, channel2) == -1) {
        // 删除失败返回
        retval = -1;
    } else {
        // 删除成功返回
        retval = 1;
    }
    
    // 删除event_loop的map中该fd对应的channel对象
    map->entries[fd] = NULL;
    return retval;
}

// in the i/o thread
// 更新event_loop当中channel对象对应描述字fd的注册事件
int event_loop_handle_pending_update(struct event_loop *eventLoop, int fd, struct channel *channel) {
    yolanda_msgx("update channel fd == %d, %s", fd, eventLoop->thread_name);
    struct channel_map *map = eventLoop->channelMap;

    if (fd < 0)
        return 0;

    if ((map)->entries[fd] == NULL) {
        return (-1);
    }
	// free(map->entries[fd]);
	// map->entries[fd] = channel;
    // update channel
    struct event_dispatcher *eventDispatcher = eventLoop->eventDispatcher;
    eventDispatcher->update(eventLoop, channel);
}

// 用于注册事件响应时,执行相应callback方法
// dispather派发完事件之后,调用该方法通知event_loop执行对应事件的相关callback方法
int channel_event_activate(struct event_loop *eventLoop, int fd, int revents) {
	//获取event_loop下的channel_map
    struct channel_map *map = eventLoop->channelMap;
    yolanda_msgx("activate channel fd == %d, revents=%d, %s", fd, revents, eventLoop->thread_name);

	// 错误检查
    if (fd < 0)
        return 0;

    if (fd >= map->nentries)return (-1);

	// 获取套接字对应的channel对象                
    struct channel *channel = map->entries[fd];
	// 合法性检查
    assert(fd == channel->fd);

	// 处理读事件
    if (revents & (EVENT_READ)) {
        // 执行注册事件channel对象当中的读事件回调函数
        if (channel->eventReadCallback) channel->eventReadCallback(channel->data);
    }
	// 处理写事件
    if (revents & (EVENT_WRITE)) {
        // 执行注册事件channel对象当中的写事件回调函数
        if (channel->eventWriteCallback) channel->eventWriteCallback(channel->data);
    }

    return 0;

}

// 唤醒子线程event_loop,向注册套接字对当中写入一个字符
void event_loop_wakeup(struct event_loop *eventLoop) {
    char one = 'a';
    ssize_t n = write(eventLoop->socketPair[0], &one, sizeof one);
    if (n != sizeof one) {
        LOG_ERR("wakeup event loop thread failed");
    }
}

// 处理注册套接字当中的读事件,接收一个字符
int handleWakeup(void *data) {
    struct event_loop *eventLoop = (struct event_loop *) data;
    char one;
    ssize_t n = read(eventLoop->socketPair[1], &one, sizeof one);
    if (n != sizeof one) {
        LOG_ERR("handleWakeup  failed");
    }
    yolanda_msgx("wakeup, %s", eventLoop->thread_name);
}

// 初始化函数(无名)
struct event_loop *event_loop_init() {
    return event_loop_init_with_name(NULL);
}
// 初始化函数(有名)
struct event_loop *event_loop_init_with_name(char *thread_name) {
    struct event_loop *eventLoop = malloc(sizeof(struct event_loop));
    pthread_mutex_init(&eventLoop->mutex, NULL);
    pthread_cond_init(&eventLoop->cond, NULL);

    if (thread_name != NULL) {
        eventLoop->thread_name = thread_name;
    } else {
        eventLoop->thread_name = "main thread";
    }

    eventLoop->quit = 0;
    eventLoop->channelMap = malloc(sizeof(struct channel_map));
    map_init(eventLoop->channelMap);

#ifdef EPOLL_ENABLE
    yolanda_msgx("set epoll as dispatcher, %s", eventLoop->thread_name);
    eventLoop->eventDispatcher = &epoll_dispatcher;
#else
    yolanda_msgx("set poll as dispatcher, %s", eventLoop->thread_name);
    eventLoop->eventDispatcher = &poll_dispatcher;
#endif
    eventLoop->event_dispatcher_data = eventLoop->eventDispatcher->init(eventLoop);

    // 在初始化完成event_loop对象后,便注册套接字对,并将套接字的读事件注册到event_loop事件分发当中
    eventLoop->owner_thread_id = pthread_self();
    if (socketpair(AF_UNIX, SOCK_STREAM, 0, eventLoop->socketPair) < 0) {
        LOG_ERR("socketpair set fialed");
    }
    eventLoop->is_handle_pending = 0;
    eventLoop->pending_head = NULL;
    eventLoop->pending_tail = NULL;

    struct channel *channel = channel_new(eventLoop->socketPair[1], EVENT_READ, handleWakeup, NULL, eventLoop);
    event_loop_add_channel_event(eventLoop, eventLoop->socketPair[0], channel);

    return eventLoop;
}

/**
 *
 * 1.参数验证
 * 2.调用dispatcher来进行事件分发,分发完回调事件处理函数
 */
// 这也是我们子线程循环执行的入口函数
int event_loop_run(struct event_loop *eventLoop) {
    assert(eventLoop != NULL);

    struct event_dispatcher *dispatcher = eventLoop->eventDispatcher;

    if (eventLoop->owner_thread_id != pthread_self()) {
        exit(1);
    }

    yolanda_msgx("event loop run, %s", eventLoop->thread_name);
    struct timeval timeval;
    timeval.tv_sec = 1;

    while (!eventLoop->quit) {
        // 先进行事件分发
        // block here to wait I/O event, and get active channels
        dispatcher->dispatch(eventLoop, &timeval);
        
        // 然后处理待pending list当中的事件
        // handle the pending channel
        event_loop_handle_pending_channel(eventLoop);
    }

    yolanda_msgx("event loop end, %s", eventLoop->thread_name);
    return 0;
}

 

  • channel

对各种注册到 event_loop 上的对象,我们抽象成 channel 来表示,例如注册到 event_loop 上的监听事件,注册到 event_loop 上的套接字读写事件等。在各种语言的 API 里,你都会看到 channel 这个对象,大体上它们表达的意思跟我们这里的设计思路是比较一致的。

下面给出channel.h的代码:

#ifndef CHANNEL_H
#define CHANNEL_H

#include "common.h"
#include "event_loop.h"
#include "buffer.h"

#define EVENT_TIMEOUT    0x01
/** Wait for a socket or FD to become readable */
#define EVENT_READ        0x02
/** Wait for a socket or FD to become writeable */
#define EVENT_WRITE    0x04
/** Wait for a POSIX signal to be raised*/
#define EVENT_SIGNAL    0x08

// 定义一个函数指针类型:返回值:int 参数:void*
typedef int (*event_read_callback)(void *data);

typedef int (*event_write_callback)(void *data);

// channel:可以理解为各种注册到event_loop上的对象(如注册到event_loop上的监听事件、注册到event_loop上的套接字读写事件等)
struct channel {
	// 监听事件对应的描述字
    int fd;
	
	// 监听事件类型
    int events;   //表示event类型
	
	// 定义一个指向事件读回调函数指针
    event_read_callback eventReadCallback;
	
	// 定义一个指向事件写回调函数指针
    event_write_callback eventWriteCallback;
    void *data; //callback data, 可能是event_loop,也可能是tcp_server或者tcp_connection
};

// 返回一个新的channel对象
struct channel *
channel_new(int fd, int events, event_read_callback eventReadCallback, event_write_callback eventWriteCallback,
            void *data);

int channel_write_event_is_enabled(struct channel *channel);

int channel_write_event_enable(struct channel *channel);

int channel_write_event_disable(struct channel *channel);


#endif

下面给出channel.c的代码:

#include "channel.h"

// 创建并初始化channel对象
struct channel *
channel_new(int fd, int events, event_read_callback eventReadCallback, event_write_callback eventWriteCallback,
            void *data) {
    struct channel *chan = malloc(sizeof(struct channel));
    chan->fd = fd;
    chan->events = events;
    chan->eventReadCallback = eventReadCallback;
    chan->eventWriteCallback = eventWriteCallback;
    chan->data = data;
    return chan;
}

// 判断channel对象的类型是否为EVENT_WRITE
int channel_write_event_is_enabled(struct channel *channel) {
    return channel->events & EVENT_WRITE;
}

// 令注册在event_loop中的channel对象的事件类型变为可写
int channel_write_event_enable(struct channel *channel) {
    struct event_loop *eventLoop = (struct event_loop *) channel->data;
    channel->events = channel->events | EVENT_WRITE;
    event_loop_update_channel_event(eventLoop, channel->fd, channel);
}

// 令注册在event_loop中的channel对象的事件类型变为不可写
int channel_write_event_disable(struct channel *channel) {
    struct event_loop *eventLoop = (struct event_loop *) channel->data;
    channel->events = channel->events & ~EVENT_WRITE;
    event_loop_update_channel_event(eventLoop, channel->fd, channel);
}

 

  • acceptor

acceptor对象表示的是服务器端监听器,acceptor 对象最终会作为一个 channel 对象,注册到 event_loop 上,以便进行连接完成的事件分发和检测。event_dispatcherevent_dispatcher 是对事件分发机制的一种抽象,也就是说,可以实现一个基于 poll 的 poll_dispatcher,也可以实现一个基于 epoll 的 epoll_dispatcher。在这里,我们统一设计一个 event_dispatcher 结构体,来抽象这些行为。

下面给出event_dispatcher.h的代码:

#ifndef EVENT_DISPATCHER_H
#define EVENT_DISPATCHER_H

#include "channel.h"

/** 抽象的event_dispatcher结构体,对应的实现如select,poll,epoll等I/O复用. */
// event_dispatcher是对事件分发机制的一种抽象,也就是说,可以实现一个基于poll的poll_dispatcher,也可以实现一个基于epoll的epoll_dispathcer
struct event_dispatcher {
    /**  对应实现 */
    const char *name;

    /**  初始化函数 */
    void *(*init)(struct event_loop * eventLoop);

    /** 通知dispatcher新增一个channel事件*/
    int (*add)(struct event_loop * eventLoop, struct channel * channel);

    /** 通知dispatcher删除一个channel事件*/
    int (*del)(struct event_loop * eventLoop, struct channel * channel);

    /** 通知dispatcher更新channel对应的事件*/
    int (*update)(struct event_loop * eventLoop, struct channel * channel);

    /** 实现事件分发,然后调用event_loop的event_activate方法执行callback*/
    int (*dispatch)(struct event_loop * eventLoop, struct timeval *);

    /** 清除数据 */
    void (*clear)(struct event_loop * eventLoop);
};

#endif

下面给出基于epoll实现的epoll_dispatcher事件分发机制,epoll_dispatcher.c代码:

#include  
#include "event_dispatcher.h"
#include "event_loop.h"
#include "log.h"

#define MAXEVENTS 128

// 事件分发机制的数据对象
typedef struct {
    // events数组当中事件个数
    int event_count;
    int nfds;
    int realloc_copy;
    // epoll实例
    int efd;
    // epoll_wait返回的待处理的I/O事件events数组
    struct epoll_event *events;
} epoll_dispatcher_data;

// 初始化函数
static void *epoll_init(struct event_loop *);

// 通过实现epoll_ctl增删改监控事件
static int epoll_add(struct event_loop *, struct channel *channel1);

static int epoll_del(struct event_loop *, struct channel *channel1);

static int epoll_update(struct event_loop *, struct channel *channel1);

// 开始事件分发,也就是调用epoll_wait并处理I/O事件
static int epoll_dispatch(struct event_loop *, struct timeval *);

static void epoll_clear(struct event_loop *);

const struct event_dispatcher epoll_dispatcher = {
        "epoll",
        epoll_init,
        epoll_add,
        epoll_del,
        epoll_update,
        epoll_dispatch,
        epoll_clear,
};

/**  初始化函数 */
// 返回一个epoll_dispatcher_data对象(服务于event_loop)
void *epoll_init(struct event_loop *eventLoop) {
    epoll_dispatcher_data *epollDispatcherData = malloc(sizeof(epoll_dispatcher_data));

    epollDispatcherData->event_count = 0;
    epollDispatcherData->nfds = 0;
    epollDispatcherData->realloc_copy = 0;
    epollDispatcherData->efd = 0;

	// 创建一个epoll实例()
    epollDispatcherData->efd = epoll_create1(0);
	// 返回:若成功返回一个大于0的值,表示epoll实例;若返回-1表示出错。
    if (epollDispatcherData->efd == -1) {
        error(1, errno, "epoll create failed");
    }
	
	// 初始化epoll注册事件列表
    epollDispatcherData->events = calloc(MAXEVENTS, sizeof(struct epoll_event));
	
	// 返回epoll_dispatcher_data对象
    return epollDispatcherData;
}

/** 通知dispatcher新增一个channel事件*/
int epoll_add(struct event_loop *eventLoop, struct channel *channel1) {
	// 获取event_loop中的事件分发数据(转换为epoll_dispatcher_data)
    epoll_dispatcher_data *pollDispatcherData = (epoll_dispatcher_data *) eventLoop->event_dispatcher_data;

	// 获取注册事件对应的描述字fd
    int fd = channel1->fd;
	// 获取注册事件类型
    int events = 0;
    if (channel1->events & EVENT_READ) {
        events = events | EPOLLIN;
    }
    if (channel1->events & EVENT_WRITE) {
        events = events | EPOLLOUT;
    }
	// 定义epoll注册事件对象
    struct epoll_event event;
    event.data.fd = fd;
    event.events = events;
	
	// 设置为edge-triggered边缘触发
	// event.events = events | EPOLLET;

	// 调用epoll_ctl函数往epoll实例中增加监控的事件(op:EPOLL_CTL_ADD)
    if (epoll_ctl(pollDispatcherData->efd, EPOLL_CTL_ADD, fd, &event) == -1) {
        error(1, errno, "epoll_ctl add  fd failed");
    }

    return 0;
}

/** 通知dispatcher删除一个channel事件*/
int epoll_del(struct event_loop *eventLoop, struct channel *channel1) {
	// 获取event_loop中的事件分发数据(转换为epoll_dispatcher_data)
    epoll_dispatcher_data *pollDispatcherData = (epoll_dispatcher_data *) eventLoop->event_dispatcher_data;

	// 获取注册事件对应的描述字fd
    int fd = channel1->fd;
	
	// 获取注册事件类型
    int events = 0;
    if (channel1->events & EVENT_READ) {
        events = events | EPOLLIN;
    }

    if (channel1->events & EVENT_WRITE) {
        events = events | EPOLLOUT;
    }
	
	// 定义epoll注册事件对象
    struct epoll_event event;
    event.data.fd = fd;
    event.events = events;
	
	// 设置为edge-triggered边缘触发
	//  event.events = events | EPOLLET;
	
	// 调用epoll_ctl函数往epoll实例中删除监控的事件(op:EPOLL_CTL_DEL)
    if (epoll_ctl(pollDispatcherData->efd, EPOLL_CTL_DEL, fd, &event) == -1) {
        error(1, errno, "epoll_ctl delete fd failed");
    }

    return 0;
}

/** 通知dispatcher更新channel对应的事件*/
int epoll_update(struct event_loop *eventLoop, struct channel *channel1) {
	// 获取event_loop中的事件分发数据(转换为epoll_dispatcher_data)
    epoll_dispatcher_data *pollDispatcherData = (epoll_dispatcher_data *) eventLoop->event_dispatcher_data;

	// 获取注册事件对应的描述字fd
    int fd = channel1->fd;

	// 获取注册事件类型
    int events = 0;
    if (channel1->events & EVENT_READ) {
        events = events | EPOLLIN;
    }
    if (channel1->events & EVENT_WRITE) {
        events = events | EPOLLOUT;
    }

	// 定义epoll注册事件对象
    struct epoll_event event;
    event.data.fd = fd;
    event.events = events;
	
	// 设置为edge-triggered边缘触发
	//  event.events = events | EPOLLET;
	
	// 调用epoll_ctl函数修改epoll实例中描述字对应的监控的事件(op:EPOLL_CTL_MOD)
    if (epoll_ctl(pollDispatcherData->efd, EPOLL_CTL_MOD, fd, &event) == -1) {
        error(1, errno, "epoll_ctl modify fd failed");
    }

    return 0;
}

/** 实现事件分发,然后调用event_loop的event_activate方法执行callback*/
int epoll_dispatch(struct event_loop *eventLoop, struct timeval *timeval) {
	// 获取event_loop中的事件分发数据(转换为epoll_dispatcher_data)
    epoll_dispatcher_data *epollDispatcherData = (epoll_dispatcher_data *) eventLoop->event_dispatcher_data;
    int i, n;
	
	// 等待内核I/O事件分发
    n = epoll_wait(epollDispatcherData->efd, epollDispatcherData->events, MAXEVENTS, -1);
	// 返回给用户空间需要处理的I/O事件存放在epollDispatcherData->events数组当中,数组大小为n
    yolanda_msgx("epoll_wait wakeup, %s", eventLoop->thread_name);
	// 依次处理每个I/O事件
    for (i = 0; i < n; i++) {
		// 若对应描述字出错或被挂起则关闭描述字
        if ((epollDispatcherData->events[i].events & EPOLLERR) || (epollDispatcherData->events[i].events & EPOLLHUP)) {
            fprintf(stderr, "epoll error\n");
            close(epollDispatcherData->events[i].data.fd);
            continue;
        }
		// 若对应描述字可读
        if (epollDispatcherData->events[i].events & EPOLLIN) {
            yolanda_msgx("get message channel fd==%d for read, %s", epollDispatcherData->events[i].data.fd, eventLoop->thread_name);
            channel_event_activate(eventLoop, epollDispatcherData->events[i].data.fd, EVENT_READ);
        }
		// 若对应描述字可写
        if (epollDispatcherData->events[i].events & EPOLLOUT) {
            yolanda_msgx("get message channel fd==%d for write, %s", epollDispatcherData->events[i].data.fd,eventLoop->thread_name);
            channel_event_activate(eventLoop, epollDispatcherData->events[i].data.fd, EVENT_WRITE);
        }
    }

    return 0;
}

/** 清除数据 */
void epoll_clear(struct event_loop *eventLoop) {
	// 获取event_loop中的事件分发数据(转换为epoll_dispatcher_data)
    epoll_dispatcher_data *epollDispatcherData = (epoll_dispatcher_data *) eventLoop->event_dispatcher_data;
	// 释放待处理I/O事件events数组的空间
    free(epollDispatcherData->events);
	// 关闭epoll描述字efd
    close(epollDispatcherData->efd);
	// 释放epoll_dispatcher_data对象空间
    free(epollDispatcherData);
	// 将epoll_dispatcher_data指针置空
    eventLoop->event_dispatcher_data = NULL;

    return;
}

 

  • channel_map

channel_map保存了描述字到 channel 的映射,这样就可以在事件发生时,根据事件类型对应的套接字快速找到 chanel 对象里的事件处理函数。

下面给出channel_map.c的代码:

#ifndef CHANNEL_MAP_H
#define CHANNEL_MAP_H


#include "channel.h"

/**
 * channel映射表, key为对应的socket描述字
 */
// channel_map保存了描述字到channel的映射,这样就可以在事件发生时,根据事件类型对应的套接字快速找到chanel对象里的事件处理函数。
struct channel_map {
    void **entries;

    /* The number of entries available in entries */
    int nentries;
};


int map_make_space(struct channel_map *map, int slot, int msize);

void map_init(struct channel_map *map);

void map_clear(struct channel_map *map);

#endif

下面给出channel_map.c的代码:

#include 
#include "channel_map.h"

// slot为目标扩容值(通常为待注册的描述字fd),msize为channel对象指针的大小
int map_make_space(struct channel_map *map, int slot, int msize) {
    if (map->nentries <= slot) {
		// 若nentries为0,则默认容量大小为32
        int nentries = map->nentries ? map->nentries : 32;
        void **tmp;
		// 成倍扩容直至容量大于slot
        while (nentries <= slot)
            nentries <<= 1;
		// 调用realloc在原先内存基础上扩容
        tmp = (void **) realloc(map->entries, nentries * msize);
        if (tmp == NULL)
            return (-1);
		// 调用memset函数为刚刚申请好的内存未填充部分补0
        memset(&tmp[map->nentries], 0,
               (nentries - map->nentries) * msize);

        map->nentries = nentries;
        map->entries = tmp;
    }

    return (0);
}

void map_init(struct channel_map *map) {
    map->nentries = 0;
    map->entries = NULL;
}

void map_clear(struct channel_map *map) {
    if (map->entries != NULL) {
        int i;
        for (i = 0; i < map->nentries; ++i) {
            if (map->entries[i] != NULL)
                free(map->entries[i]);
        }
        free(map->entries);
        map->entries = NULL;
    }
    map->nentries = 0;
}

 

事件驱动模式(Reactor模式)

首先先讲解一下事件驱动模式,Reactor模式是处理并发I/O比较常见的一种模式,用于同步I/O,中心思想是将所有要处理的I/O事件注册到一个中心I/O多路复用器上,同时主线程/进程阻塞在多路复用器上;一旦有I/O事件到来或是准备就绪(文件描述符或socket可读、写),多路复用器返回并将事先注册的相应I/O事件分发到对应的处理器中。下面给出一个 reactor 线程上同时负责分发 acceptor 的事件、已连接套接字的 I/O 事件。

高性能HTTP服务器_第1张图片 single reactor thread

主 - 从 Reactor 模式

主 - 从这个模式的核心思想是,主反应堆线程只负责分发 Acceptor 连接建立,已连接套接字上的 I/O 事件交给 sub-reactor 负责分发。其中 sub-reactor 的数量,可以根据 CPU 的核数来灵活设置。

比如一个四核 CPU,我们可以设置 sub-reactor 为 4。相当于有 4 个身手不凡的反应堆线程同时在工作,这大大增强了 I/O 分发处理的效率。而且,同一个套接字事件分发只会出现在一个反应堆线程中,这会大大减少并发处理的锁开销。

高性能HTTP服务器_第2张图片 主-从Reactor模式

我来解释一下这张图,我们的主反应堆线程一直在感知连接建立的事件,如果有连接成功建立,主反应堆线程通过 accept 方法获取已连接套接字,接下来会按照一定的算法选取一个从反应堆线程,并把已连接套接字加入到选择好的从反应堆线程中。主反应堆线程唯一的工作,就是调用 accept 获取已连接套接字,以及将已连接套接字加入到从反应堆线程中。

I/O模式和多线程模式设计

由于我们需要采用主-从Reactor模式,因为在单Reactor模式下,reactor 反应堆同时分发 Acceptor 上的连接建立事件和已建立连接的 I/O 事件,会出现客户端连接成功率偏低的现象。因此采用主-从Reactor模式,解决了I/O分发的效率问题,我们让主反应堆线程只负责分发 Acceptor 连接建立,已连接套接字上的 I/O 事件交给 sub-reactor 负责分发。

进一步来说,我们引入了多线程,将我们的模式进一步提高为:主 - 从 reactor+worker threads 模式

主 - 从 Reactor + Worker Threads 模式

如果说主 - 从 reactor 模式解决了 I/O 分发的高效率问题,那么 work threads 就解决了业务逻辑和 I/O 分发之间的耦合问题。把这两个策略组装在一起,就是实战中普遍采用的模式。大名鼎鼎的 Netty,就是把这种模式发挥到极致的一种实现。

下面给出主-从reactor+worker threads模式的描述图:

主 - 从 Reactor + Worker Threads 模式

 

I/O 线程和多线程模型,主要解决 event_loop 的线程运行问题,以及事件分发和回调的线程执行问题。

  • thread_pool

thread_pool 维护了一个 sub-reactor 的线程列表,它可以提供给主 reactor 线程使用,每次当有新的连接建立时,可以从 thread_pool 里获取一个线程,以便用它来完成对新连接套接字的 read/write 事件注册,将 I/O 线程和主 reactor 线程分离。

下面给出thread_pool.h的代码:

#ifndef THREAD_POOL_H
#define THREAD_POOL_H

#include "event_loop.h"
#include "event_loop_thread.h"

// thread_pool维护了一个sub_reactor的线程列表,它可以提供给主reactor线程使用,
// 每次当有新的连接建立时,可以从thread_pool里获取一个线程,以便用它来完成对
// 新连接套接字的read/write事件注册,将I/O线程和主reactor线程分离
struct thread_pool {
    //创建thread_pool的主线程
    struct event_loop *mainLoop;
    //是否已经启动
    int started;
    //线程数目
    int thread_number;
    //数组指针,指向创建的event_loop_thread数组
    struct event_loop_thread *eventLoopThreads;

    //表示在数组里的位置,用来决定选择哪个event_loop_thread服务
    int position;

};

// 返回一个新的线程池
struct thread_pool *thread_pool_new(struct event_loop *mainLoop, int threadNumber);

// 开始运行线程池
void thread_pool_start(struct thread_pool *);

// 根据一定算法从线程池中选取一个线程
struct event_loop *thread_pool_get_loop(struct thread_pool *);

#endif

下面给出thread_pool.c的代码:

#include 
#include "utils.h"
#include "thread_pool.h"

// 构造出新的thread_pool对象
struct thread_pool *thread_pool_new(struct event_loop *mainLoop, int threadNumber) {

    struct thread_pool *threadPool = malloc(sizeof(struct thread_pool));
    threadPool->mainLoop = mainLoop;
    threadPool->position = 0;
    threadPool->thread_number = threadNumber;
    threadPool->started = 0;
    threadPool->eventLoopThreads = NULL;
    return threadPool;
}

//一定是main thread发起
void thread_pool_start(struct thread_pool *threadPool) {
	// 有效性检查,一开始一定是未启动状态
    assert(!threadPool->started);
	// 检查当前线程是否未mainLoop对应的main thread
    assertInSameThread(threadPool->mainLoop);

	// 启动标志置1
    threadPool->started = 1;
    void *tmp;

	// 若线程数小于等于0,则直接返回
    if (threadPool->thread_number <= 0) {
        return;
    }
	// 初始化线程列表空间
    threadPool->eventLoopThreads = malloc(threadPool->thread_number * sizeof(struct event_loop_thread));
    for (int i = 0; i < threadPool->thread_number; ++i) {
		// 初始化每个线程
        event_loop_thread_init(&threadPool->eventLoopThreads[i], i);
		// 启动每个线程
        event_loop_thread_start(&threadPool->eventLoopThreads[i]);
    }
}

//一定是main thread中选择(从线程池中选择一个线程,选择方法为顺序选择)
struct event_loop *thread_pool_get_loop(struct thread_pool *threadPool) {
    assert(threadPool->started);
    assertInSameThread(threadPool->mainLoop);

    //优先选择当前主线程
    struct event_loop *selected = threadPool->mainLoop;

    //从线程池中按照顺序挑选出一个线程
    if (threadPool->thread_number > 0) {
        selected = threadPool->eventLoopThreads[threadPool->position].eventLoop;
        if (++threadPool->position >= threadPool->thread_number) {
            threadPool->position = 0;
        }
    }

    return selected;
}

 

  • event_loop_thread

event_loop_thread 是 reactor 的线程实现,连接套接字的 read/write 事件检测都是在这个线程里完成的。

下面给出event_loop_thread.h的代码:

#ifndef EVENT_LOOP_THREAD_H
#define EVENT_LOOP_THREAD_H

#include    

// 反应堆线程
struct event_loop_thread {
	// 反应堆对象
    struct event_loop *eventLoop;
	// 线程id
    pthread_t thread_tid;        /* thread ID */
	// 互斥量
    pthread_mutex_t mutex;
	// 条件变量
    pthread_cond_t cond;
	// 线程名
    char * thread_name;
    long thread_count;    /* # connections handled */
};

//初始化已经分配内存的event_loop_thread
int event_loop_thread_init(struct event_loop_thread *, int);

//由主线程调用,初始化一个子线程,并且让子线程开始运行event_loop
struct event_loop *event_loop_thread_start(struct event_loop_thread *);

#endif

下面给出event_loop_thread.c的代码:

#include 
#include "event_loop_thread.h"
#include "event_loop.h"

// 启动反应堆线程
void *event_loop_thread_run(void *arg) {
    struct event_loop_thread *eventLoopThread = (struct event_loop_thread *) arg;
	// 互斥量加锁
    pthread_mutex_lock(&eventLoopThread->mutex);

    // 初始化化eventLoop对象
    eventLoopThread->eventLoop = event_loop_init_with_name(eventLoopThread->thread_name);
    yolanda_msgx("event loop thread init and signal, %s", eventLoopThread->thread_name);
	// 唤醒主线程,告知主线程子线程初始化完毕
    pthread_cond_signal(&eventLoopThread->cond);
	// 互斥量解锁
    pthread_mutex_unlock(&eventLoopThread->mutex);

    //子线程event loop run
    event_loop_run(eventLoopThread->eventLoop);
}

//初始化已经分配内存的event_loop_thread
int event_loop_thread_init(struct event_loop_thread *eventLoopThread, int i) {
	// 初始化互斥量和条件变量
    pthread_mutex_init(&eventLoopThread->mutex, NULL);
    pthread_cond_init(&eventLoopThread->cond, NULL);
    eventLoopThread->eventLoop = NULL;
    eventLoopThread->thread_count = 0;
    eventLoopThread->thread_tid = 0;

    char *buf = malloc(16);
    sprintf(buf, "Thread-%d\0", i + 1);
    eventLoopThread->thread_name = buf;

    return 0;
}


//由主线程调用,初始化一个子线程,并且让子线程开始运行event_loop
struct event_loop *event_loop_thread_start(struct event_loop_thread *eventLoopThread) {
	// 创建并启动线程
    pthread_create(&eventLoopThread->thread_tid, NULL, &event_loop_thread_run, eventLoopThread);

	// 给thread的互斥量加锁
    assert(pthread_mutex_lock(&eventLoopThread->mutex) == 0);

    while (eventLoopThread->eventLoop == NULL) {
        assert(pthread_cond_wait(&eventLoopThread->cond, &eventLoopThread->mutex) == 0);
    
	
    assert(pthread_mutex_unlock(&eventLoopThread->mutex) == 0);

    yolanda_msgx("event loop thread started, %s", eventLoopThread->thread_name);
    return eventLoopThread->eventLoop;
}

由于我们的主Reactor线程,仅仅负责服务器端监听套接字上的连接事件分发,因此我们需要为每一个连接创建一个tcp_connection对象,需要说明的是,这个tcp_connection对象,我们可以理解成为一个channel对象,不过仅仅与tcp_connection一一对应的关系,话不多说放码过来:

下面给出tcp_connection.h的代码:

#ifndef TCP_CONNECTION
#define TCP_CONNECTION

#include "event_loop.h"
#include "channel.h"
#include "buffer.h"
#include "tcp_server.h"

struct tcp_connection {
	// event_loop对象
    struct event_loop *eventLoop;
	// 注册事件channel对象
    struct channel *channel;
    char *name;
    struct buffer *input_buffer;   //接收缓冲区
    struct buffer *output_buffer;  //发送缓冲区

	// 连接完成回调函数
    connection_completed_call_back connectionCompletedCallBack;
	// 读数据回调函数
    message_call_back messageCallBack;
	// 写完成回调函数
    write_completed_call_back writeCompletedCallBack;
	// 连接关闭回调函数
    connection_closed_call_back connectionClosedCallBack;

    void * data; //for callback use: http_server
    void * request; // for callback use
    void * response; // for callback use
};

struct tcp_connection *
tcp_connection_new(int fd, struct event_loop *eventLoop, connection_completed_call_back connectionCompletedCallBack,
                   connection_closed_call_back connectionClosedCallBack,
                   message_call_back messageCallBack, write_completed_call_back writeCompletedCallBack);

//应用层调用入口
int tcp_connection_send_data(struct tcp_connection *tcpConnection, void *data, int size);

//应用层调用入口
int tcp_connection_send_buffer(struct tcp_connection *tcpConnection, struct buffer * buffer);

// 关闭连接
void tcp_connection_shutdown(struct tcp_connection * tcpConnection);
//int tcp_connection_append_buffer(struct tcp_connection *tcpConnection);
#endif

下面给出tcp_connection.c的代码:

#include "tcp_connection.h"
#include "utils.h"


int handle_connection_closed(struct tcp_connection *tcpConnection) {
    struct event_loop *eventLoop = tcpConnection->eventLoop;
    struct channel *channel = tcpConnection->channel;
	// 删除掉该描述字对应注册在event_loop中的事件
    event_loop_remove_channel_event(eventLoop, channel->fd, channel);
    if (tcpConnection->connectionClosedCallBack != NULL) {
        tcpConnection->connectionClosedCallBack(tcpConnection);
    }
}

// 当连接描述字有数据可读时,将数据读入接收缓冲区当中,并调用对应的读数据回调函数,若没有则关闭该连接对象
int handle_read(void *data) {
    struct tcp_connection *tcpConnection = (struct tcp_connection *) data;
    struct buffer *input_buffer = tcpConnection->input_buffer;
    struct channel *channel = tcpConnection->channel;

    if (buffer_socket_read(input_buffer, channel->fd) > 0) {
        //应用程序真正读取Buffer里的数据
        if (tcpConnection->messageCallBack != NULL) {
            tcpConnection->messageCallBack(input_buffer, tcpConnection);
        }
    } else {
        handle_connection_closed(tcpConnection);
    }
}

//发送缓冲区可以往外写
//把channel对应的output_buffer不断往外发送
int handle_write(void *data) {
    struct tcp_connection *tcpConnection = (struct tcp_connection *) data;
    struct event_loop *eventLoop = tcpConnection->eventLoop;
    assertInSameThread(eventLoop);

    struct buffer *output_buffer = tcpConnection->output_buffer;
    struct channel *channel = tcpConnection->channel;

    ssize_t nwrited = write(channel->fd, output_buffer->data + output_buffer->readIndex,
                            buffer_readable_size(output_buffer));
    if (nwrited > 0) {
        //已读nwrited字节
        output_buffer->readIndex += nwrited;
        //如果数据完全发送出去,就不需要继续了
        if (buffer_readable_size(output_buffer) == 0) {
            channel_write_event_disable(channel);
        }
        //回调writeCompletedCallBack
        if (tcpConnection->writeCompletedCallBack != NULL) {
            tcpConnection->writeCompletedCallBack(tcpConnection);
        }
    } else {
        yolanda_msgx("handle_write for tcp connection %s", tcpConnection->name);
    }

}

// 连接对象创建函数
struct tcp_connection *
tcp_connection_new(int connected_fd, struct event_loop *eventLoop,
                   connection_completed_call_back connectionCompletedCallBack,
                   connection_closed_call_back connectionClosedCallBack,
                   message_call_back messageCallBack, write_completed_call_back writeCompletedCallBack) {
    struct tcp_connection *tcpConnection = malloc(sizeof(struct tcp_connection));
	// 初始化四个回调函数
    tcpConnection->writeCompletedCallBack = writeCompletedCallBack;
    tcpConnection->messageCallBack = messageCallBack;
    tcpConnection->connectionCompletedCallBack = connectionCompletedCallBack;
    tcpConnection->connectionClosedCallBack = connectionClosedCallBack;
	// 初始化event_loop对象
    tcpConnection->eventLoop = eventLoop;
	// 初始化发送接收缓冲区buffer对象
    tcpConnection->input_buffer = buffer_new();
    tcpConnection->output_buffer = buffer_new();

	// 命名连接对象:connection-连接描述字
    char *buf = malloc(16);
    sprintf(buf, "connection-%d\0", connected_fd);
    tcpConnection->name = buf;

    // add event read for the new connection
	// 创建读事件channel对象
    struct channel *channel1 = channel_new(connected_fd, EVENT_READ, handle_read, handle_write, tcpConnection);
    tcpConnection->channel = channel1;

    //connectionCompletedCallBack callback
    if (tcpConnection->connectionCompletedCallBack != NULL) {
        tcpConnection->connectionCompletedCallBack(tcpConnection);
    }
	// 将连接套接字的读事件channel对象注册到event_loop中
    event_loop_add_channel_event(tcpConnection->eventLoop, connected_fd, tcpConnection->channel);
    return tcpConnection;
}

//应用层调用入口
int tcp_connection_send_data(struct tcp_connection *tcpConnection, void *data, int size) {
    size_t nwrited = 0;
    size_t nleft = size;
    int fault = 0;

    struct channel *channel = tcpConnection->channel;
    struct buffer *output_buffer = tcpConnection->output_buffer;

    //先往套接字尝试发送数据
    if (!channel_write_event_is_enabled(channel) && buffer_readable_size(output_buffer) == 0) {
        nwrited = write(channel->fd, data, size);
        if (nwrited >= 0) {
            nleft = nleft - nwrited;
        } else {
            nwrited = 0;
            if (errno != EWOULDBLOCK) {
                if (errno == EPIPE || errno == ECONNRESET) {
                    fault = 1;
                }
            }
        }
    }

    if (!fault && nleft > 0) {
        //拷贝到Buffer中,Buffer的数据由框架接管
        buffer_append(output_buffer, data + nwrited, nleft);
        if (!channel_write_event_is_enabled(channel)) {
            channel_write_event_enable(channel);
        }
    }

    return nwrited;
}

int tcp_connection_send_buffer(struct tcp_connection *tcpConnection, struct buffer *buffer) {
    int size = buffer_readable_size(buffer);
    int result = tcp_connection_send_data(tcpConnection, buffer->data + buffer->readIndex, size);
    buffer->readIndex += size;
    return result;
}

void tcp_connection_shutdown(struct tcp_connection *tcpConnection) {
    if (shutdown(tcpConnection->channel->fd, SHUT_WR) < 0) {
        yolanda_msgx("tcp_connection_shutdown failed, socket == %d", tcpConnection->channel->fd);
    }
}

下面给出主Reaction中创建的TCPserver对象的相关实现,也是所有以上设计模块最终服务功能汇合的对象:

下面给出tcp_server.h的代码实现:

#ifndef TCP_SERVER_H
#define TCP_SERVER_H

typedef int (*connection_completed_call_back)(struct tcp_connection *tcpConnection);

typedef int (*message_call_back)(struct buffer *buffer, struct tcp_connection *tcpConnection);

typedef int (*write_completed_call_back)(struct tcp_connection *tcpConnection);

typedef int (*connection_closed_call_back)(struct tcp_connection *tcpConnection);

#include "acceptor.h"
#include "event_loop.h"
#include "thread_pool.h"
#include "buffer.h"
#include "tcp_connection.h"

struct TCPserver {
	// 端口号
    int port;
	// 主线程eventLoop对象
    struct event_loop *eventLoop;
	// acceptor对象服务器监听器
    struct acceptor *acceptor;
	// 关于连接的四个回调函数
    connection_completed_call_back connectionCompletedCallBack;
    message_call_back messageCallBack;
    write_completed_call_back writeCompletedCallBack;
    connection_closed_call_back connectionClosedCallBack;
    int threadNum;
    struct thread_pool *threadPool;
    void * data; //for callback use: http_server
};


//准备监听套接字
struct TCPserver *
tcp_server_init(struct event_loop *eventLoop, struct acceptor *acceptor,
                connection_completed_call_back connectionCallBack,
                message_call_back messageCallBack,
                write_completed_call_back writeCompletedCallBack,
                connection_closed_call_back connectionClosedCallBack,
                int threadNum);

//开启监听
void tcp_server_start(struct TCPserver *tcpServer);

//设置callback数据
void tcp_server_set_data(struct TCPserver *tcpServer, void * data);

#endif

下面给出tcp_server.c的代码实现:

#include 
#include "common.h"
#include "tcp_server.h"
#include "thread_pool.h"
#include "utils.h"
#include "tcp_connection.h"

int tcp_server(int port) {
    int listenfd;
    listenfd = socket(AF_INET, SOCK_STREAM, 0);

    struct sockaddr_in server_addr;
    bzero(&server_addr, sizeof(server_addr));
    server_addr.sin_family = AF_INET;
    server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
    server_addr.sin_port = htons(port);

    int on = 1;
    setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));

    int rt1 = bind(listenfd, (struct sockaddr *) &server_addr, sizeof(server_addr));
    if (rt1 < 0) {
        error(1, errno, "bind failed ");
    }

    int rt2 = listen(listenfd, LISTENQ);
    if (rt2 < 0) {
        error(1, errno, "listen failed ");
    }

    signal(SIGPIPE, SIG_IGN);

    int connfd;
    struct sockaddr_in client_addr;
    socklen_t client_len = sizeof(client_addr);

    if ((connfd = accept(listenfd, (struct sockaddr *) &client_addr, &client_len)) < 0) {
        error(1, errno, "bind failed ");
    }

    return connfd;
}

// 创建一个tcp服务器监听套接字(阻塞)
int tcp_server_listen(int port) {
    int listenfd;
    listenfd = socket(AF_INET, SOCK_STREAM, 0);

    struct sockaddr_in server_addr;
    bzero(&server_addr, sizeof(server_addr));
    server_addr.sin_family = AF_INET;
    server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
    server_addr.sin_port = htons(port);

    int on = 1;
    setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));

    int rt1 = bind(listenfd, (struct sockaddr *) &server_addr, sizeof(server_addr));
    if (rt1 < 0) {
        error(1, errno, "bind failed ");
    }

    int rt2 = listen(listenfd, LISTENQ);
    if (rt2 < 0) {
        error(1, errno, "listen failed ");
    }

    signal(SIGPIPE, SIG_IGN);

    return listenfd;
}

// 创建一个tcp服务器监听套接字(非阻塞)
int tcp_nonblocking_server_listen(int port) {
    int listenfd;
    listenfd = socket(AF_INET, SOCK_STREAM, 0);

	// 重点就在于这一句
    make_nonblocking(listenfd);

    struct sockaddr_in server_addr;
    bzero(&server_addr, sizeof(server_addr));
    server_addr.sin_family = AF_INET;
    server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
    server_addr.sin_port = htons(port);

    int on = 1;
    setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));

    int rt1 = bind(listenfd, (struct sockaddr *) &server_addr, sizeof(server_addr));
    if (rt1 < 0) {
        error(1, errno, "bind failed ");
    }

    int rt2 = listen(listenfd, LISTENQ);
    if (rt2 < 0) {
        error(1, errno, "listen failed ");
    }

    signal(SIGPIPE, SIG_IGN);

    return listenfd;
}

void make_nonblocking(int fd) {
    fcntl(fd, F_SETFL, O_NONBLOCK);
}


struct TCPserver *
tcp_server_init(struct event_loop *eventLoop, struct acceptor *acceptor,
                connection_completed_call_back connectionCompletedCallBack,
                message_call_back messageCallBack,
                write_completed_call_back writeCompletedCallBack,
                connection_closed_call_back connectionClosedCallBack,
                int threadNum) {
    struct TCPserver *tcpServer = malloc(sizeof(struct TCPserver));
    tcpServer->eventLoop = eventLoop;
    tcpServer->acceptor = acceptor;
    tcpServer->connectionCompletedCallBack = connectionCompletedCallBack;
    tcpServer->messageCallBack = messageCallBack;
    tcpServer->writeCompletedCallBack = writeCompletedCallBack;
    tcpServer->connectionClosedCallBack = connectionClosedCallBack;
    tcpServer->threadNum = threadNum;
    tcpServer->threadPool = thread_pool_new(eventLoop, threadNum);
    tcpServer->data = NULL;

    return tcpServer;
}

// 处理当有新的连接时的回调函数(参数data:)
int handle_connection_established(void *data) {
    struct TCPserver *tcpServer = (struct TCPserver *) data;
    struct acceptor *acceptor = tcpServer->acceptor;
    int listenfd = acceptor->listen_fd;

	// 获取连接套接字connected_fd,并设置为非阻塞
    struct sockaddr_in client_addr;
    socklen_t client_len = sizeof(client_addr);
    int connected_fd = accept(listenfd, (struct sockaddr *) &client_addr, &client_len);
    make_nonblocking(connected_fd);

    yolanda_msgx("new connection established, socket == %d", connected_fd);

	// 从TCPserver对象中的线程池当中选择一个线程服务,获取其事件分发对象event_loop
    // choose event loop from the thread pool
    struct event_loop *eventLoop = thread_pool_get_loop(tcpServer->threadPool);

    // create a new tcp connection
	// 创建一个新的tcp连接对象
    struct tcp_connection *tcpConnection = tcp_connection_new(connected_fd, eventLoop,
                                                              tcpServer->connectionCompletedCallBack,
                                                              tcpServer->connectionClosedCallBack,
                                                              tcpServer->messageCallBack,
                                                              tcpServer->writeCompletedCallBack);
    //for callback use
    if (tcpServer->data != NULL) {
        tcpConnection->data = tcpServer->data;
    }
    return 0;
}

//开启监听
void tcp_server_start(struct TCPserver *tcpServer) {
    struct acceptor *acceptor = tcpServer->acceptor;
    struct event_loop *eventLoop = tcpServer->eventLoop;

    //开启多个线程
    thread_pool_start(tcpServer->threadPool);

	// 初始化并启动完线程池后,将监听套接字的读事件注册到event_loop上
    //acceptor主线程, 同时把tcpServer作为参数传给channel对象
    struct channel *channel = channel_new(acceptor->listen_fd, EVENT_READ, handle_connection_established, NULL,
                                          tcpServer);
    event_loop_add_channel_event(eventLoop, channel->fd, channel);
    return;
}

//设置callback数据
void tcp_server_set_data(struct TCPserver *tcpServer, void *data) {
    if (data != NULL) {
        tcpServer->data = data;
    }
}

数据读写封装和buffer设计

后续补充~

你可能感兴趣的:(随便写写)