libuv echo server性能压测

Server代码

#include 
#include 
#include 
#include 

#define DEFAULT_PORT 7000
#define DEFAULT_BACKLOG 128

uv_loop_t *loop;
struct sockaddr_in addr;

typedef struct {
    uv_write_t req;
    uv_buf_t buf;
} write_req_t;

void free_write_req(uv_write_t *req) {
    write_req_t *wr = (write_req_t*) req;
    free(wr->buf.base);
    free(wr);
}

void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
    buf->base = (char*) malloc(suggested_size);
    buf->len = suggested_size;
}

void on_close(uv_handle_t* handle) {
    free(handle);
}

void echo_write(uv_write_t *req, int status) {
    if (status) {
        fprintf(stderr, "Write error %s\n", uv_strerror(status));
    }
    free_write_req(req);
}

// 某个客户有数据发来 可读
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
    if (nread > 0) {
        write_req_t *req = (write_req_t*) malloc(sizeof(write_req_t));
        req->buf = uv_buf_init(buf->base, nread);
        // 把读取到的数据再次写到目标 client 也就是客户那边 echo回显
        uv_write((uv_write_t*) req, client, &req->buf, 1, echo_write);
        return;
    }
    if (nread < 0) {
        if (nread != UV_EOF)
            fprintf(stderr, "Read error %s\n", uv_err_name(nread));
        uv_close((uv_handle_t*) client, on_close);
    }

    free(buf->base);
}

// 监听socket可读代表着 有新连接到来了
void on_new_connection(uv_stream_t *server, int status) {
    if (status < 0) {
        fprintf(stderr, "New connection error %s\n", uv_strerror(status));
        // error!
        return;
    }

    uv_tcp_t *client = (uv_tcp_t*) malloc(sizeof(uv_tcp_t));
    uv_tcp_init(loop, client);
    // 取出连接 监听这个连接socket的可读事件
    if (uv_accept(server, (uv_stream_t*) client) == 0) {
        uv_read_start((uv_stream_t*) client, alloc_buffer, echo_read);
    }
    else {
        uv_close((uv_handle_t*) client, on_close);
    }
}

int main() {
    loop = uv_default_loop();
    
    // 初始化
    uv_tcp_t server;
    uv_tcp_init(loop, &server);
    
    // 设置ip4 地址
    uv_ip4_addr("0.0.0.0", DEFAULT_PORT, &addr);
    
    // bind地址到server上
    uv_tcp_bind(&server, (const struct sockaddr*)&addr, 0);
    // 把socket变成监听socket
    int r = uv_listen((uv_stream_t*) &server, DEFAULT_BACKLOG, on_new_connection);
    if (r) {
        fprintf(stderr, "Listen error %s\n", uv_strerror(r));
        return 1;
    }
    // 等待监听socket可读 读取连接 然后再监听连接socket echo回显数据
    return uv_run(loop, UV_RUN_DEFAULT);
}

Makefile

CC=g++
SRC = $(wildcard *.cpp)
OBJS = $(patsubst %.cpp, %.o, $(SRC))
FLAG = -g -O2 -Werror -I. -I/root/libuv/include -pthread -luv -ltcmalloc
TARGET = a.out

$(TARGET):$(OBJS)
        $(CC) -o $@ $^ $(FLAG)

%.o:%.cpp
        $(CC) -o $@ -c $(FLAG) $< -g -MD -MF [email protected]

clean:
        rm -rf $(TARGET) $(OBJS)

客户端

rust_echo_bench

运行

cargo run --release -- --address "127.0.0.1:7000" --number 1 --duration 60 --length 20480
    Finished release [optimized] target(s) in 0.00s
     Running `target/release/echo_bench --address '127.0.0.1:7000' --number 1 --duration 60 --length 20480`
Benchmarking: 127.0.0.1:7000
1 clients, running 20480 bytes, 60 sec.

Speed: 40022 request/sec, 40022 response/sec
Requests: 2401365
Responses: 2401364

网卡配置

[root@localhost rust_echo_bench]# ethtool enp0s3
Settings for enp0s3:
        Supported ports: [ TP ]
        Supported link modes:   10baseT/Half 10baseT/Full
                                100baseT/Half 100baseT/Full
                                1000baseT/Full
        Supported pause frame use: No
        Supports auto-negotiation: Yes
        Supported FEC modes: Not reported
        Advertised link modes:  10baseT/Half 10baseT/Full
                                100baseT/Half 100baseT/Full
                                1000baseT/Full
        Advertised pause frame use: No
        Advertised auto-negotiation: Yes
        Advertised FEC modes: Not reported
        Speed: 1000Mb/s
        Duplex: Full
        Port: Twisted Pair
        PHYAD: 0
        Transceiver: internal
        Auto-negotiation: on
        MDI-X: off (auto)
        Supports Wake-on: umbg
        Wake-on: d
        Current message level: 0x00000007 (7)
                               drv probe link
        Link detected: yes

分析

客户端请求qps: 4w

包大小:20480byte = 2560bit

网卡速度:1Gb/s

其中4w* 2560刚好就是1G

所以以目前的网卡速度,没办法压测出libuv的性能上线。

结论

1Gb/s的网卡速度,如果按照流的方式去发送请求

每个包假设1kb,那么server可以处理的最大qps为10w/s

假设网卡速度为10Gb/s,那么server的理论最大qps为100w/s

但是根据数据的统计,通过epoll的方式去做服务的化,server的qps只能到30w/s

如果使用io_uring来处理网络包,server的qps可以到100w/s

你可能感兴趣的:(libuv echo server性能压测)