TCP客户端连接步骤:
① .连接方法
Uv_loop_t *loop = uv_default_loop();
uv_tcp_t *client = malloc…;
uv_connect_t* connect_req = malloc…;
uv_tcp_init(loop, client)
uv_tcp_connect(connect_req, client, addr, connect_cb);
uv_run(loop);
getchar(); //服务端不需要这个,现在还不明白为什么
②.回调函数
static void connect_cb(uv_connect_t*req, int status)
{
int r;
uv_buf_t buf = uv_buf_init("just test", 10);
这必须是动态分配内存,在uv_write函数内部会对这个指针赋值。
uv_write_t *reqw = (uv_write_t*)malloc(sizeof *reqw);
在write_cb中,释放分配的内存。
r = uv_write(reqw, (uv_stream_t*)(req->handle),&buf, 1, write_cb);
}
static void write_cb(uv_write_t*req, int status)
{
}
static void read_cb(uv_stream_t*tcp, ssize_t nread, uv_buf_t buf)
{
}
uv_connect_tis a subclass of uv_req_t
2.TCP服务端连接步骤
①.连接方法
loop = uv_default_loop();
structsockaddr_in addr = uv_ip4_addr("127.0.0.1",5432);
int r;
server = (uv_handle_t*)&tcpServer;
r = uv_tcp_init(loop, &tcpServer);
if (r){
std::cout << "Socket creation error" <<std::endl;
return;
}
r = uv_tcp_bind(&tcpServer, addr);
if (r){
std::cout << "Bind error" << std::endl;
return;
}
r =uv_listen((uv_stream_t*)&tcpServer, 10, on_connection);
if (r){
std::cout << "Listen error" << std::endl;
return;
}
uv_run(loop);
②.回调函数
static voidon_connection(uv_stream_t* server, int status)
{
uv_stream_t* stream;
int r;
用这种方式来初始化一个新连接
stream = (uv_stream_t*)malloc(sizeof(uv_tcp_t));
ASSERT(stream != NULL);
r = uv_tcp_init(loop,(uv_tcp_t*)stream);
stream->data = server;
r = uv_accept(server, stream);
}
*uv_stream_t is a subclass of uv_handle_t
*
* uv_stream is an abstract class.
*
* uv_stream_t is the parent class of uv_tcp_t,uv_pipe_t, uv_tty_t, and
* soon uv_file_t.
在客户端的连接中
Uv_tcp_connect(connect_req…);connect_req是一个uv_connect_t*参数,相应的connect_cb的第一个参数为uv_connect_t*,(uv_connect_t是uv_req_t的子类)
服务端的连接中
uv_listen((uv_stream_t*)&tcpServer,10, on_connection);相应的on_connect的第一个参数为uv_stream_t*
看来node.js的思想和ACE非常像,把请求对象和连接对象分别封装成不同概念的东西。
3.tcp –open
创建套接字然后使用uv_tcp_open,再uv_tcp_connect时,函数内部不会再创建套接字,仅此而已。
4.tcp_read_stop
Uv_read_stop((uv_stream_t*)&tcp_handle);
Uv_close((uv_handle_t*)tcp_handle);
UDP客户端:
uv_udp_send_t req;
r = uv_udp_init(uv_default_loop(),&client);
ASSERT(r == 0);
buf = uv_buf_init("PING", 4);
r = uv_udp_send(&req, &client,&buf, 1, addr, cl_send_cb);
voidcl_send_cb(uv_udp_send_t* req, intstatus)
{}
UDP服务端:
r = uv_udp_init(uv_default_loop(),&server);
ASSERT(r == 0);
r = uv_udp_bind(&server, addr, 0);
ASSERT(r == 0);
r = uv_udp_recv_start(&server,alloc_cb, sv_recv_cb);
ASSERT(r == 0);
static void sv_recv_cb(uv_udp_t* handle,
ssize_tnread,
uv_buf_tbuf,
struct sockaddr* addr,
unsigned flags)
{}
定时器:
int64_t start_time = uv_now(uv_default_loop());
void never_cb(uv_timer_t* handle, int status)
{
std::cout << "never_cb should never be called"<< std::endl;
}
static voidonce_close_cb(uv_handle_t*handle)
{}
static voidonce_cb(uv_timer_t* handle, int status)
{
uv_close((uv_handle_t*)handle, once_close_cb);
uv_update_time(uv_default_loop());
}
r = uv_timer_init(uv_default_loop(), &never);
ASSERT(r == 0);
r = uv_timer_start(&never, never_cb, 100, 100);
ASSERT(r == 0);
r = uv_timer_stop(&never);
ASSERT(r == 0);
uv_unref((uv_handle_t*)&never);
uv_run(uv_default_loop());
同步对象:
uv_cond_init(&signal_cond)
uv_cond_destroy(&signal_cond);
If libuv has been compiled with debuggingenabled, uv_mutex_destroy(), uv_mutex_lock() and uv_mutex_unlock() will abort() on error.Similarly uv_mutex_trylock() will abort if the error is anything otherthan EAGAIN.
Note:
Libuv 里面有read/write锁uv_rwlock_t numlock;
Warning
mutexesand rwlocks DO NOT work inside a signal handler, whereas uv_async_send does.
线程间通信用uv_async_t
线程对象:
uv_thread_t tid;
int r;
r = uv_thread_create(&tid,thread_entry, (void*)42);
ASSERT(r == 0);
r = uv_thread_join(&tid);
线程池:
r = uv_queue_work(uv_default_loop(),&work_req, NULL, after_work_cb); //倒数第二个参数为NULL,返回-1,after_work_cb也不会被调用
ASSERT(r == -1);
libuv work queue
uv_queue_work() is a convenience function that allows an application torun a task in a separate thread, and have a callback that is triggered when thetask is done. A seemingly simple function, what makes uv_queue_work() tempting is that it allows potentiallyany third-party libraries to be used with the event-loop paradigm. When you useevent loops, it is imperativeto make sure that no function which runs periodically in the loop thread blockswhen performing I/O or is a serious CPU hog, because this means the loop slows downand events are not being dealt with at full capacity.
意思是:IO线程里面不应该有阻塞操作,libuv的处理方式是让系统自己处理这些阻塞操作。就是IOCP嘛。