libuv多线程用法笔记

最近有一个需求,要求写一个Redis客户端,要求跨平台,找了些资料研究了一下,决定用libuv,于是开干。

思路:

基于业务考虑,将uv_loop放在线程中运行中,发送使用uv_async_init实现跨线程。

注意事项:

1. uv_loop_t、uv_work_t、uv_async_t等变量最好在线程中分配资源,uv_default_loop方法务必在线程中调用,uv_run在线程结尾调用,使用线程阻塞。uv_async_t最好提前分配好(之前没用event、wait的时候每次uv_async_send时new uv_async_t对象会出现崩溃,疑似线程安全问题)。

2. uv_async_send是线程安全的非阻塞函数,可将参数通过uv_async_t->data传递到回调函数中,但存在一个问题,uv_async_send速度如果大于回调函数处理速度,有些函数无法执行。因此建议建议使用event,wait机制实现同步(仅表示线程通信同步)。

3. 虽然支持多线程,但工作线程数量上限是4个(环境:windows 10,CPU:AMD 6核12线程)。

4. 没有直观disconnect回调,实现起来要靠uv_read间接实现,不直观。(这也是作者改用libevent的主要原因)。

分析:

未解读libuv源码,猜测原因为全局事件队列导致。至于线程数上限问题,还需要进一步解读源码,此处只是记录一下应用时遇到的坑,避免下次再跳进去。

附代码(Windows平台测试代码):


#include
#include
#include
#include
#include

#include "../libuv/include/uv.h"

#pragma comment(lib, "Ws2_32.lib")
#pragma comment(lib, "IPHLPAPI.lib") // GetAdaptersAddresses
//#pragma comment(lib, "Kernel32.lib") // GetProcessMemoryInfo
#pragma comment(lib, "Psapi.lib") // GetProcessMemoryInfo
#pragma comment(lib, "Userenv.lib") // GetUserProfileDirectory
#pragma comment(lib, "Secur32.lib") // GetUserNameExA

#pragma comment(lib, "../libuv/Debug/lib/libuv-v141-mtd.lib")


typedef std::function SubMsgCallback;


class RedisClient
{
public:
typedef enum UV_ASYNC_TYPE {
UV_ASYNC_UNKNOWN = 0,
UV_ASYNC_CONNECT = 1,
UV_ASYNC_WRITE = 2,
} UV_ASYNC_TYPE;

typedef struct uv_async_ctx {
HANDLE hEvent[2];
RedisClient * pThis;
int32_t eType;
void * pData;
size_t nSize;
} uv_async_ctx;

public:
RedisClient(const std::string & ip, const uint16_t port, const std::string & password = "") :
_srvipv4(ip), _srvport(port)
{
_workThread = new uv_thread_t;

uv_thread_create(_workThread, WorkThreadProc, this);
uv_thread_detach(_workThread);
}
virtual ~RedisClient()
{

}

public:
void Login()
{
int32_t iRet = -1;
uv_async_ctx * ctx = new uv_async_ctx;

ctx->hEvent[0] = CreateEventA(NULL, FALSE, FALSE, NULL);
ctx->hEvent[1] = CreateEventA(NULL, FALSE, FALSE, NULL);
ctx->pThis = this;
ctx->eType = UV_ASYNC_CONNECT;
ctx->nSize = sizeof(struct sockaddr_in);
ctx->pData = new char[ctx->nSize];

iRet = uv_ip4_addr(_srvipv4.c_str(), _srvport, (sockaddr_in *)ctx->pData);

_asyncList[1]->data = ctx;
uv_async_send(_asyncList[1]);

WaitForSingleObject(ctx->hEvent[0], INFINITE);
WaitForSingleObject(ctx->hEvent[1], INFINITE);

delete[] ctx->pData;
delete ctx;
}

void Logout()
{

}

void Publish(const std::string & channel, const std::string & data)
{
uv_async_ctx * ctx = new uv_async_ctx;

ctx->hEvent[0] = CreateEventA(NULL, FALSE, FALSE, NULL);
ctx->hEvent[1] = nullptr;
ctx->pThis = this;
ctx->eType = UV_ASYNC_WRITE;
ctx->pData = new char[256];
ctx->nSize = 256;

snprintf((char *)ctx->pData, 256, "publish %s %s\r\n", channel.c_str(), data.c_str());

for (int i = 0; i != 1024; ++i)
{
std::cout << "Publish thread: " << std::this_thread::get_id() << ", index: " << i + 1 << std::endl;
_asyncList[0]->data = ctx;
Sleep(1);
uv_async_send(_asyncList[0]);
WaitForSingleObject(ctx->hEvent[0], INFINITE); // 如果不等待,回调函数中执行时间过长,将导致方法调用无效
}

delete[] ctx->pData;
delete ctx;
}

void Subscribe(const std::string & channel, const SubMsgCallback & cbfn)
{

}

void Unsubscribe(const std::string & channel)
{

}

private:


private:
static void WorkThreadProc(void * arg)
{
RedisClient * pThis = (RedisClient *)arg;
pThis->_cbfnLocker = new uv_mutex_t;
pThis->_req = new uv_work_t;
pThis->_connect1 = new uv_connect_t;
pThis->_connect2 = new uv_connect_t;
pThis->_tcp1 = new uv_tcp_t;
pThis->_tcp2 = new uv_tcp_t;

for (int i = 0; i != 1024; ++i)
pThis->_asyncList[i] = new uv_async_t;

pThis->_loop = uv_default_loop();

uv_loop_init(pThis->_loop);
for (int i = 0; i != 1024; ++i)
uv_async_init(pThis->_loop, pThis->_asyncList[i], uv_async_cb);
uv_queue_work(pThis->_loop, pThis->_req, uv_work_cb, after_work_cb);

uv_run(pThis->_loop, UV_RUN_DEFAULT);
}

private:
static void uv_async_cb(uv_async_t * handle)
{
uv_async_ctx * ctx = (uv_async_ctx *)handle->data;

static int num = 0;
std::cout << "uv_async_cb thread: " << std::this_thread::get_id() << ", index: " << ++num << std::endl;

switch (ctx->eType)
{
case UV_ASYNC_CONNECT:
{
RedisClient * pThis = ctx->pThis;
sockaddr_in * addr = (sockaddr_in *)ctx->pData;
pThis->_connect1->data = ctx;
pThis->_connect2->data = ctx;

uv_tcp_init(pThis->_loop, pThis->_tcp1);
uv_tcp_init(pThis->_loop, pThis->_tcp2);
uv_tcp_connect(pThis->_connect1, pThis->_tcp1, (const sockaddr*)addr, uv_connect_cb);
uv_tcp_connect(pThis->_connect2, pThis->_tcp2, (const sockaddr*)addr, uv_connect_cb);

//SetEvent(ctx->hEvent);
break;
}
case UV_ASYNC_WRITE:
{
//Sleep(10); // 模拟处理时间过长的情景
SetEvent(ctx->hEvent[0]);
break;
}
default:
break;
}
}

static void uv_work_cb(uv_work_t * req)
{

}

static void after_work_cb(uv_work_t * req, int status)
{

}

static void uv_connect_cb(uv_connect_t * req, int status)
{
printf("uv_connect_cb.\n");

uv_async_ctx * ctx = (uv_async_ctx *)req->data;
SetEvent(ctx->hEvent[0]);
SetEvent(ctx->hEvent[1]);
}

protected:
std::map _subCbfnList;
uv_mutex_t * _cbfnLocker;
uv_thread_t * _workThread;
uv_loop_t * _loop;
uv_work_t * _req;
uv_timer_t * _timer;
uv_async_t * _asyncList[1024];
uv_mutex_t * _asyncLocker;
uv_connect_t * _connect1;
uv_connect_t * _connect2;
uv_tcp_t * _tcp1;
uv_tcp_t * _tcp2;
uv_stream_t * _stream;
std::string _srvipv4;
uint16_t _srvport;
std::string _password;
};

 


int main()
{
RedisClient cli("192.167.0.184", 6379);

Sleep(1000);

cli.Login();

Sleep(10000);

cli.Publish("key", "data");

Sleep(-1);

return 0;
}

转载于:https://www.cnblogs.com/MookerLee/p/11114044.html

你可能感兴趣的:(libuv多线程用法笔记)