最近项目要找一款web 框架,性能是一个很大的指标,在选型的同时,做了一些性能比较,现将测试代码和比较结果和大家分享一下。
说到web 框架,很多人会想到tomcat、jetty,基于java的最常用的两种web server,其性能无法与nginx相比较,在此略过
测试环境:
cpu:2.0G HZ
VMware 虚拟机
一个worker进程
ab命令:ab -c 500 -n 100000 http://10.10.73.210:80/index.html
qps:11002
我测试的机器是一台VMware 虚拟机,所以性能或许会稍差一些,不过即便如此,qps也能达到万级以上。
tornado,在python领域,一直以来tornado的性能都被十分看好,之所以把tornado也纳入备选方案,主要从python的开发效率角度出发
测试代码如下:
import os.path import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import sys class MainHandler(tornado.web.RequestHandler): @tornado.web.asynchronous def get(self): print("one test client come") self.write("Hello, world shen") self.finish() application = tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": port = sys.argv[1] print port application.listen(port) tornado.ioloop.IOLoop.instance().start()
qps大约在:1158
通过测试可以发现,虽说tornado性能不错,不过和nginx相比,还是差了许多
由于nginx是基于libevent,设想,如果自己单独开发一套tcp server,其性能会达到多少,当然针对tcp server的选型,epoll是首选,不过开发起来实在麻烦,libevent替代产品主要有libev 和libuv,其性能测试如下:
#include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/queue.h> #include <sys/types.h> #include <sys/socket.h> #include <arpa/inet.h> #include <math.h> #include <netinet/in.h> #include <time.h> #include <unistd.h> #include <sys/resource.h> #include <sys/time.h> #include "ev.h" #define BACKLOG 512 #define LSN_PORT 8090 #define BUFFER_SIZE 1024 static size_t g_ses_count; typedef struct ses_t { // self info size_t sid; }ses_t; int create_listen_fd() { // create user listen socket fd int listen_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == listen_fd) { printf("create socket failed"); return -1; } int yes = 1; int ret = setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)); if (-1 == ret) { printf("set socket opt SO_REUSEADDR failed"); return -1; } struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(struct sockaddr_in)); server_addr.sin_family = AF_INET; // host byte order server_addr.sin_port = htons(LSN_PORT); // short, network byte order server_addr.sin_addr.s_addr = INADDR_ANY; // automatically fill with my IP memset(server_addr.sin_zero, '\0', sizeof(server_addr.sin_zero)); ret = bind(listen_fd, (struct sockaddr *)&server_addr, sizeof(server_addr)); if (-1 == ret) { printf("bind failed"); return -1; } ret = listen(listen_fd, BACKLOG); if (-1 == ret) { printf("listen failed"); return -1; } return listen_fd; } void close_ses(struct ev_loop *loop, struct ev_io *watcher, ses_t * ses) { if (watcher->fd > 0) { close(watcher->fd); } // free watcher if (watcher) { ev_io_stop(loop, watcher); free(watcher); } free(ses); } void user_cb_test(struct ev_loop *loop, struct ev_io *watcher, int revents) { ses_t * ses = (ses_t *)watcher->data; if(EV_ERROR & revents) { printf("[%lu]: error event in user_cb", ses->sid); return ; } if (EV_READ & revents) { ssize_t buf_tmp_len = 0; char buf_tmp[BUFFER_SIZE] = {0}; buf_tmp_len = recv(watcher->fd, buf_tmp, BUFFER_SIZE, 0); if(buf_tmp_len <= 0) { printf("[%lu]: recv error, will close ses", ses->sid); close_ses(loop, watcher, ses); return; } printf("%s", buf_tmp); if (strstr((char *)buf_tmp, "\r\n\r\n")) { #if 0 int i = 0; int c = 0; for (i = 0; i <= 1000000; i++) { int a = 0; int b = 1; c = a + b + i; } printf("[%lu][%d]: new user has came\n", ses->sid, c); #endif char * resp = "HTTP/1.1 200 OK\r\n\ Server: nginx/1.9.3\r\n\ Date: Thu, 13 Aug 2015 03:04:20 GMT\r\n\ Content-Type: text/plain\r\n\ Content-Length: 17\r\n\ Connection: keep-alive\r\n\r\n\ hello_world, shen"; send(watcher->fd, resp, strlen(resp), 0); close_ses(loop, watcher, ses); return; } } if (EV_WRITE & revents) { // ad_rp_send_data_to_user(ses); } return; } void accept_cb_test(struct ev_loop *loop, struct ev_io *watcher, int revents) { if(EV_ERROR & revents) { printf("error event in accept_cb_test"); return; } struct sockaddr_in user_addr; memset(&user_addr, 0, sizeof(struct sockaddr_in)); socklen_t user_len = sizeof(user_addr); int user_fd = accept(watcher->fd, (struct sockaddr*)&user_addr, &user_len); if(user_fd < 0) { printf("accept return error"); return; } int flags = fcntl(user_fd, F_GETFL, NULL); if (flags == -1) { printf("failed in fcntl"); return; } flags |= O_NONBLOCK; if (fcntl(user_fd, F_SETFL, flags) == -1) { printf("failed in fcntl"); return; } struct ev_io * user_w = (struct ev_io*)calloc(1, sizeof(struct ev_io)); ses_t * new_ses = calloc(1, sizeof(ses_t)); new_ses->sid = g_ses_count++; user_w->data = new_ses; ev_io_init(user_w, user_cb_test, user_fd, EV_READ); ev_io_start(loop, user_w); printf("new user accept\n"); return; } int main(int argc, char *argv[]) { printf("=========main start=============\n"); struct ev_loop *loop = ev_default_loop(0); int listen_fd = create_listen_fd(); if (listen_fd < 0) { printf("failed in create user listen fd"); return -1; } struct ev_io lst_w; ev_io_init(&lst_w, accept_cb_test, listen_fd, EV_READ); ev_io_start(loop, &lst_w); ev_run(loop, 0); return 1; }
#include "uv.h" #include <stdio.h> #include <stdlib.h> #include <string.h> typedef struct { uv_write_t req; uv_buf_t buf; } write_req_t; static uv_tcp_t tcpServer; static uv_loop_t* loop; static uv_handle_t* server; static void echo_alloc(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { printf("echo alloc called, size[%d]\n", suggested_size); buf->base = malloc(suggested_size); buf->len = suggested_size; } static void after_write(uv_write_t* req, int status) { write_req_t* wr; /* Free the read/write buffer and the request */ wr = (write_req_t*) req; // free(wr->buf.base); free(wr); printf("after_write is called\n"); if (status == 0) return; fprintf(stderr, "uv_write error: %s - %s\n", uv_err_name(status), uv_strerror(status)); } static void on_close(uv_handle_t* peer) { free(peer); } static void after_read(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) { int i; write_req_t *wr; uv_shutdown_t* sreq; free(buf->base); printf("1111---after_read is called\n"); if (nread == UV_EOF) { printf("22222---after_read is called\n"); return; } printf("3333---after_read is called\n"); char * resp = "HTTP/1.1 200 OK\r\n\ Server: nginx/1.9.3\r\n\ Date: Thu, 13 Aug 2015 03:04:20 GMT\r\n\ Content-Type: text/plain\r\n\ Content-Length: 17\r\n\ Connection: keep-alive\r\n\r\n\ hello_world, shen"; wr = (write_req_t*) malloc(sizeof *wr); wr->buf = uv_buf_init(resp, strlen(resp)); if (uv_write(&wr->req, handle, &wr->buf, 1, after_write)) { printf("uv_write failed\n"); } printf("4444---after_read is called\n"); uv_close((uv_handle_t*)handle, on_close); } static void on_connection(uv_stream_t* server, int status) { uv_stream_t* stream; int r; if (status != 0) { fprintf(stderr, "Connect error %s\n", uv_err_name(status)); } stream = malloc(sizeof(uv_tcp_t)); r = uv_tcp_init(loop, (uv_tcp_t*)stream); /* associate server with stream */ stream->data = server; r = uv_accept(server, stream); r = uv_read_start(stream, echo_alloc, after_read); } int main() { loop = uv_default_loop(); struct sockaddr_in addr; int r; int port = 7700; uv_ip4_addr("0.0.0.0", port, &addr); server = (uv_handle_t*)&tcpServer; r = uv_tcp_init(loop, &tcpServer); if (r) { /* TODO: Error codes */ fprintf(stderr, "Socket creation error\n"); return 1; } r = uv_tcp_bind(&tcpServer, (const struct sockaddr*) &addr, 0); if (r) { /* TODO: Error codes */ fprintf(stderr, "Bind error\n"); return 1; } r = uv_listen((uv_stream_t*)&tcpServer, SOMAXCONN, on_connection); if (r) { /* TODO: Error codes */ fprintf(stderr, "Listen error %s\n", uv_err_name(r)); return 1; } printf("start loop\n"); uv_run(loop, UV_RUN_DEFAULT); return 0; }ab命令:ab -c 500 -n 100000 http://10.10.73.210:7700/helloworld
qps比较:libev(15960) > libuv(15879) > nginx(11002) > tornado(1158)
1、前三个性能不相上下,nginx略差也不能说明问题,毕竟nginx其处理流程会稍微复杂些(从消息进入到消息返回,会经过若干个module)
2、tornado版本:version 4.1,750+ qps。version 3.2,1100+ qps,版本3.2 要比 4.1 性能略好。