之前两节分析了zmq的tcp通讯流程,除了tcp之外,zmq还支持许多其他的通讯模式,比如inproc,ipc,pgm,epgm,tipc等。这一节接着分析inpro,即进程内通讯。
和tcp通讯相比,进程内通讯要简单许多,因为不涉及到远程连接的认证以及数据的编码和解码,只是简单的在两个socket_base_t之间连接一个pipe,通过pipe在线程间传递数据即可。
inproc通讯也两个套接字分别调用bind和connect进行连接,但是同样对顺序没有要求,下面分别看一下bind和connect对inproc的实现,首先是connect方法:
int zmq::socket_base_t::connect (const char *addr_)
{
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0))
return -1;
// Parse addr_ string.
std::string protocol;
std::string address;
if (parse_uri (addr_, protocol, address) || check_protocol (protocol))
return -1;
if (protocol == "inproc") {
// TODO: inproc connect is specific with respect to creating pipes
// as there's no 'reconnect' functionality implemented. Once that
// is in place we should follow generic pipe creation algorithm.
// Find the peer endpoint.
endpoint_t peer = find_endpoint (addr_);
// The total HWM for an inproc connection should be the sum of
// the binder's HWM and the connector's HWM.
int sndhwm = 0;
if (peer.socket == NULL)
sndhwm = options.sndhwm;
else if (options.sndhwm != 0 && peer.options.rcvhwm != 0)
sndhwm = options.sndhwm + peer.options.rcvhwm;
int rcvhwm = 0;
if (peer.socket == NULL)
rcvhwm = options.rcvhwm;
else
if (options.rcvhwm != 0 && peer.options.sndhwm != 0)
rcvhwm = options.rcvhwm + peer.options.sndhwm;
// Create a bi-directional pipe to connect the peers.
object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket};
pipe_t *new_pipes [2] = {NULL, NULL};
bool conflate = options.conflate &&
(options.type == ZMQ_DEALER ||
options.type == ZMQ_PULL ||
options.type == ZMQ_PUSH ||
options.type == ZMQ_PUB ||
options.type == ZMQ_SUB);
int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm};
bool conflates [2] = {conflate, conflate};
int rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
// Attach local end of the pipe to this socket object.
attach_pipe (new_pipes [0]);
if (!peer.socket) {
// The peer doesn't exist yet so we don't know whether
// to send the identity message or not. To resolve this,
// we always send our identity and drop it later if
// the peer doesn't expect it.
msg_t id;
rc = id.init_size (options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), options.identity, options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [0]->write (&id);
zmq_assert (written);
new_pipes [0]->flush ();
const endpoint_t endpoint = {this, options};
pend_connection (std::string (addr_), endpoint, new_pipes);
}
else {
// If required, send the identity of the local socket to the peer.
if (peer.options.recv_identity) {
msg_t id;
rc = id.init_size (options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), options.identity, options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [0]->write (&id);
zmq_assert (written);
new_pipes [0]->flush ();
}
// If required, send the identity of the peer to the local socket.
if (options.recv_identity) {
msg_t id;
rc = id.init_size (peer.options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), peer.options.identity, peer.options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [1]->write (&id);
zmq_assert (written);
new_pipes [1]->flush ();
}
// Attach remote end of the pipe to the peer socket. Note that peer's
// seqnum was incremented in find_endpoint function. We don't need it
// increased here.
send_bind (peer.socket, new_pipes [1], false);
}
// Save last endpoint URI
last_endpoint.assign (addr_);
// remember inproc connections for disconnect
inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes [0]));
return 0;
}
}
connect方法首先判断需要连接的地址是否已经绑定过,之后创建pipepair,把其中的一条attach在自身,如果需要连接socket_base_t已经存在,则向它发送绑定命令。这期间还会根据是否需要identity信息来决定是否发送identity消息。如果需要连接的socket_base_t不存在,则connect方法调用pend_connection:
void zmq::ctx_t::pend_connection (const std::string &addr_,
const endpoint_t &endpoint_, pipe_t **pipes_)
{
const pending_connection_t pending_connection =
{endpoint_, pipes_ [0], pipes_ [1]};
endpoints_sync.lock ();
endpoints_t::iterator it = endpoints.find (addr_);
if (it == endpoints.end ()) {
// Still no bind.
endpoint_.socket->inc_seqnum ();
pending_connections.insert (pending_connections_t::value_type (addr_, pending_connection));
}
else
// Bind has happened in the mean time, connect directly
connect_inproc_sockets (it->second.socket, it->second.options, pending_connection, connect_side);
endpoints_sync.unlock ();
}
pend_connection调用endpoints_sync锁,之后会在判断需要连接的地址是否刚刚添加进来(其他线程的操作),如果有马上调用connect_inproc_sockets,如果没有则在pending_connections注册这一条connect操作。
void zmq::ctx_t::connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
options_t& bind_options, const pending_connection_t &pending_connection_, side side_)
{
bind_socket_->inc_seqnum();
pending_connection_.bind_pipe->set_tid (bind_socket_->get_tid ());
if (!bind_options.recv_identity) {
msg_t msg;
const bool ok = pending_connection_.bind_pipe->read (&msg);
zmq_assert (ok);
const int rc = msg.close ();
errno_assert (rc == 0);
}
int sndhwm = 0;
if (pending_connection_.endpoint.options.sndhwm != 0 && bind_options.rcvhwm != 0)
sndhwm = pending_connection_.endpoint.options.sndhwm + bind_options.rcvhwm;
int rcvhwm = 0;
if (pending_connection_.endpoint.options.rcvhwm != 0 && bind_options.sndhwm != 0)
rcvhwm = pending_connection_.endpoint.options.rcvhwm + bind_options.sndhwm;
bool conflate = pending_connection_.endpoint.options.conflate &&
(pending_connection_.endpoint.options.type == ZMQ_DEALER ||
pending_connection_.endpoint.options.type == ZMQ_PULL ||
pending_connection_.endpoint.options.type == ZMQ_PUSH ||
pending_connection_.endpoint.options.type == ZMQ_PUB ||
pending_connection_.endpoint.options.type == ZMQ_SUB);
int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm};
pending_connection_.connect_pipe->set_hwms(hwms [1], hwms [0]);
pending_connection_.bind_pipe->set_hwms(hwms [0], hwms [1]);
if (side_ == bind_side) {
command_t cmd;
cmd.type = command_t::bind;
cmd.args.bind.pipe = pending_connection_.bind_pipe;
bind_socket_->process_command (cmd);
bind_socket_->send_inproc_connected (pending_connection_.endpoint.socket);
}
else
pending_connection_.connect_pipe->send_bind (bind_socket_, pending_connection_.bind_pipe, false);
if (pending_connection_.endpoint.options.recv_identity) {
msg_t id;
int rc = id.init_size (bind_options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), bind_options.identity, bind_options.identity_size);
id.set_flags (msg_t::identity);
bool written = pending_connection_.bind_pipe->write (&id);
zmq_assert (written);
pending_connection_.bind_pipe->flush ();
}
}
connect_inproc_sockets操作主要是把pipe和socket_bast_t进行绑定。这里需要注意identity,connect中如果判断需要连接的socket_base_t不存在也会发送一条identity。所以bind socket创建之后如果不需要identity,要先读出这条脏数据处理掉。
接下来看bind操作:
int zmq::socket_base_t::bind (const char *addr_)
{
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0))
return -1;
// Parse addr_ string.
std::string protocol;
std::string address;
if (parse_uri (addr_, protocol, address) || check_protocol (protocol))
return -1;
if (protocol == "inproc") {
const endpoint_t endpoint = { this, options };
const int rc = register_endpoint (addr_, endpoint);
if (rc == 0) {
connect_pending (addr_, this);
last_endpoint.assign (addr_);
}
return rc;
}
}
bind操作首先调用register_endpoint:
int zmq::ctx_t::register_endpoint (const char *addr_,
const endpoint_t &endpoint_)
{
endpoints_sync.lock ();
const bool inserted = endpoints.insert (
endpoints_t::value_type (std::string (addr_), endpoint_)).second;
endpoints_sync.unlock ();
if (!inserted) {
errno = EADDRINUSE;
return -1;
}
return 0;
}
该方法比较简单,用endpoints存储所有绑定的地址。socket_base_t之后调用connect_pending:
void zmq::ctx_t::connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_)
{
endpoints_sync.lock ();
std::pair<pending_connections_t::iterator, pending_connections_t::iterator> pending = pending_connections.equal_range(addr_);
for (pending_connections_t::iterator p = pending.first; p != pending.second; ++p)
connect_inproc_sockets(bind_socket_, endpoints[addr_].options, p->second, bind_side);
pending_connections.erase(pending.first, pending.second);
endpoints_sync.unlock ();
}
connect_pending检查是否之前是否有socket_base_t请求连接刚刚bind的地址,如果有,分别调用connect_inproc_sockets进行连接,该方法上面已经分析过,注意该方法的最后一个参数是判断调用该方法的线程和bind_socket是否在一个线程内,已决定command是需要发送到邮箱中还是直接调用对应的处理方法。
连接建立好之后,线程间就可以通过socket_base_t互相通讯了。
之前说过,除了inproc外,zmq还提供了很多其他的通讯模式:
ipc:主要用于进程间通讯
pgm/epgm :多路广播
tipc:基于tipc协议的通讯
这几种通讯模式用到的比较少,这里不做详细分析(其实我也没细看这几种的实现方式)。