Connector 主要用于发起连接,并带有自动重连的功能,成员主要有一个channel_,
C++ Code
1
|
|
boost::scoped_ptr<Channel> channel_;
// Connector所对应的Channel
|
与Acceptor 相比少了一个
acceptSocket_ 成员,因为Connector 是创建一个新的sockfd 并connect 它,如下:Connector::start()-->Connector::startInLoop()-->void Connector::connect()
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
|
void Connector::connect()
{
int sockfd = sockets::createNonblockingOrDie();
// 创建非阻塞套接字
int ret = sockets::connect(sockfd, serverAddr_.getSockAddrInet());
int savedErrno = (ret ==
0) ?
0 : errno;
switch (savedErrno)
{
case
0:
case EINPROGRESS:
// 非阻塞套接字,未连接成功返回码是EINPROGRESS表示正在连接
case EINTR:
case EISCONN:
// 连接成功 connecting(sockfd);
break;
....
}
}
|
-->Connector::connecting()
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
|
void Connector::connecting(
int sockfd)
{
setState(kConnecting);
assert(!channel_);
// Channel与sockfd关联 channel_.reset(
new Channel(loop_, sockfd));
// 设置可写回调函数,这时候如果socket没有错误,sockfd就处于可写状态 channel_->setWriteCallback(
boost::bind(&Connector::handleWrite,
this));
// FIXME: unsafe
// 设置错误回调函数 channel_->setErrorCallback(
boost::bind(&Connector::handleError,
this));
// FIXME: unsafe
channel_->enableWriting();
// 让Poller关注可写事件 }
|
现在connnect(sockfd) 没有出错,sockfd 就处于可写状态(内核缓冲区不为满),而且poller 关注了可写事件,触发调用Connector::handleWrite()
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
|
void Connector::handleWrite()
{
LOG_TRACE <<
"Connector::handleWrite " << state_;
if (state_ == kConnecting)
{
int sockfd = removeAndResetChannel();
// 从poller中移除关注,并将channel置空
// socket可写并不意味着连接一定建立成功
// 还需要用getsockopt(sockfd, SOL_SOCKET, SO_ERROR, ...)再次确认一下。
int err = sockets::getSocketError(sockfd);
......
else
// 连接成功 {
setState(kConnected);
if (connect_)
{
newConnectionCallback_(sockfd);
// 回调 }
}
}
}
|
注意:在handleWrite()里面需要removeAndResetChannel(),因此此时连接建立,故不用再关注channel的可写事件,最终会执行 channel_.reset(); 即把channel析构了。此外函数需要返回sockfd, 让TcpConnection来接管。
连接成功后调用newConnectionCallback_(sockfd); 通过下面函数设置:
C++ Code
1
2
3
4
|
|
void setNewConnectionCallback(
const NewConnectionCallback &cb)
{
newConnectionCallback_ = cb;
}
|
实际上 Connector 一般也不单独使用,作为TcpClient 的成员:
C++ Code
1
2
|
|
typedef boost::shared_ptr<Connector> ConnectorPtr;
ConnectorPtr connector_;
// 用于主动发起连接
|
但TcpClient 与 TcpServer 不同的是只有一个TcpConnection 成员:
C++ Code
1
|
|
TcpConnectionPtr connection_;
// Connector连接成功以后,得到一个TcpConnection
|
即一个TcpClient 对应一个TcpConnection 和一个 Connector;而一个TcpServer 对应一个TcpConnection 列表 和 一个 Acceptor。
在TcpClient 构造函数中:
C++ Code
1
2
3
|
|
// 设置连接成功回调函数 connector_->setNewConnectionCallback(
boost::bind(&TcpClient::newConnection,
this, _1));
|
也就是说现在会运行TcpClient::newConnectionn()
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
|
void TcpClient::newConnection(
int sockfd)
{
...........
TcpConnectionPtr conn(
new TcpConnection(loop_, connName, sockfd, localAddr, peerAddr));
conn->setConnectionCallback(connectionCallback_);
conn->setMessageCallback(messageCallback_);
conn->setWriteCompleteCallback(writeCompleteCallback_);
conn->setCloseCallback(
boost::bind(&TcpClient::removeConnection,
this, _1));
// FIXME: unsafe {
MutexLockGuard lock(mutex_);
connection_ = conn;
// 保存TcpConnection }
conn->connectEstablished();
// 这里回调connectionCallback_ }
|
此外与TcpServer 还有一点不同的是,TcpServer 可以有多个Reactor,即mainReactor+ThreadPool(subReactors),但TcpClient 只能有一个Reactor,即一个事件循环EventLoop,由它来处理这个TcpConnection 的事件(可读事件(包括接收数据,连接关闭),可写事件(内核发送缓冲区不为满),错误事件)。当然我们可以开多个TcpClient绑定在同个EventLoop上,这样一个EventLoop 就管理多个TcpClient, 也就是多个TcpConnection,事件发生的处理流程与TcpServer 类似,可以参考以前笔记。
还需要说明一点是,用户调用TcpServer/TcpClient 的setXXXCallback() 系列公有接口函数设置回调函数,实际上最终设置的是TcpConnection 的XXXCallback_ 成员,这些回调函数会在事件发生时被调用,比如连接建立,消息到来等。
测试代码:
先开启回射服务器端如 ./reactor_test11
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
|
|
#include <muduo/net/TcpServer.h>
#include <muduo/net/EventLoop.h>
#include <muduo/net/InetAddress.h>
#include <boost/bind.hpp>
#include <stdio.h>
using
namespace muduo;
using
namespace muduo::net;
class TestServer
{
public:
TestServer(EventLoop *loop,
const InetAddress &listenAddr)
: loop_(loop),
server_(loop, listenAddr,
"TestServer")
{
server_.setConnectionCallback(
boost::bind(&TestServer::onConnection,
this, _1));
server_.setMessageCallback(
boost::bind(&TestServer::onMessage,
this, _1, _2, _3));
}
void start()
{
server_.start();
}
private:
void onConnection(
const TcpConnectionPtr &conn)
{
if (conn->connected())
{
printf(
"onConnection(): new connection [%s] from %s\n",
conn->name().c_str(),
conn->peerAddress().toIpPort().c_str());
}
else
{
printf(
"onConnection(): connection [%s] is down\n",
conn->name().c_str());
}
}
void onMessage(
const TcpConnectionPtr &conn,
Buffer *buf,
Timestamp receiveTime)
{
string msg(buf->retrieveAllAsString());
printf(
"onMessage(): received %zd bytes from connection [%s] at %s\n",
msg.size(),
conn->name().c_str(),
receiveTime.toFormattedString().c_str());
conn->send(msg);
}
EventLoop *loop_;
TcpServer server_;
};
int main()
{
printf(
"main(): pid = %d\n", getpid());
InetAddress listenAddr(
8888);
EventLoop loop;
TestServer server(&loop, listenAddr);
server.start();
loop.loop();
}
|
接着运行./tcpclient_test
分别输入
aaaaaaaaaaa
XXXXXXXXXXXXXXXX
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
|
|
#include <muduo/net/Channel.h>
#include <muduo/net/TcpClient.h>
#include <muduo/base/Logging.h>
#include <muduo/net/EventLoop.h>
#include <muduo/net/InetAddress.h>
#include <boost/bind.hpp>
#include <stdio.h>
using
namespace muduo;
using
namespace muduo::net;
class TestClient
{
public:
TestClient(EventLoop *loop,
const InetAddress &listenAddr)
: loop_(loop),
client_(loop, listenAddr,
"TestClient"),
stdinChannel_(loop,
0)
{
client_.setConnectionCallback(
boost::bind(&TestClient::onConnection,
this, _1));
client_.setMessageCallback(
boost::bind(&TestClient::onMessage,
this, _1, _2, _3));
//client_.enableRetry();
// 标准输入缓冲区中有数据的时候,回调TestClient::handleRead stdinChannel_.setReadCallback(boost::bind(&TestClient::handleRead,
this));
stdinChannel_.enableReading();
// 关注可读事件 }
void connect()
{
client_.connect();
}
private:
void onConnection(
const TcpConnectionPtr &conn)
{
if (conn->connected())
{
printf(
"onConnection(): new connection [%s] from %s\n",
conn->name().c_str(),
conn->peerAddress().toIpPort().c_str());
}
else
{
printf(
"onConnection(): connection [%s] is down\n",
conn->name().c_str());
}
}
void onMessage(
const TcpConnectionPtr &conn, Buffer *buf, Timestamp time)
{
string msg(buf->retrieveAllAsString());
printf(
"onMessage(): recv a message [%s]\n", msg.c_str());
LOG_TRACE << conn->name() <<
" recv " << msg.size() <<
" bytes at " << time.toFormattedString();
}
// 标准输入缓冲区中有数据的时候,回调该函数
void handleRead()
{
char buf[
1024] = {
0};
fgets(buf,
1024, stdin);
buf[strlen(buf) -
1] =
'\0';
// 去除\n client_.connection()->send(buf);
}
EventLoop *loop_;
TcpClient client_;
Channel stdinChannel_;
// 标准输入Channel };
int main(
int argc,
char *argv[])
{
LOG_INFO <<
"pid = " << getpid() <<
", tid = " << CurrentThread::tid();
EventLoop loop;
InetAddress serverAddr(
"127.0.0.1",
8888);
TestClient client(&loop, serverAddr);
client.connect();
loop.loop();
}
|
服务器端输出如下:
simba@ubuntu:~/Documents/build/debug/bin$ ./reactor_test11
20131110 07:57:14.970756Z 3400 TRACE IgnoreSigPipe Ignore SIGPIPE - EventLoop.cc:51
main(): pid = 3400
20131110 07:57:14.986047Z 3400 TRACE updateChannel fd = 4 events = 3 - EPollPoller.cc:104
20131110 07:57:14.986501Z 3400 TRACE EventLoop EventLoop created 0xBFADD094 in thread 3400 - EventLoop.cc:76
20131110 07:57:14.986822Z 3400 TRACE updateChannel fd = 5 events = 3 - EPollPoller.cc:104
20131110 07:57:14.987696Z 3400 TRACE updateChannel fd = 6 events = 3 - EPollPoller.cc:104
20131110 07:57:14.988252Z 3400 TRACE loop EventLoop 0xBFADD094 start looping - EventLoop.cc:108
20131110 07:57:17.022285Z 3400 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:17.022988Z 3400 TRACE printActiveChannels {6: IN } - EventLoop.cc:271
20131110 07:57:17.023190Z 3400 INFO TcpServer::newConnection [TestServer] - new connection [TestServer:0.0.0.0:8888#1] from 127.0.0.1:54917 - TcpServer.cc:93
20131110 07:57:17.023348Z 3400 DEBUG TcpConnection TcpConnection::ctor[TestServer:0.0.0.0:8888#1] at 0x84417E0 fd=8 - TcpConnection.cc:65
20131110 07:57:17.023359Z 3400 TRACE newConnection [1] usecount=1 - TcpServer.cc:111
20131110 07:57:17.023387Z 3400 TRACE newConnection [2] usecount=2 - TcpServer.cc:113
20131110 07:57:17.023417Z 3400 TRACE connectEstablished [3] usecount=6 - TcpConnection.cc:238
20131110 07:57:17.023424Z 3400 TRACE updateChannel fd = 8 events = 3 - EPollPoller.cc:104
onConnection(): new connection [TestServer:0.0.0.0:8888#1] from 127.0.0.1:54917
20131110 07:57:17.023464Z 3400 TRACE connectEstablished [4] usecount=6 - TcpConnection.cc:243
20131110 07:57:17.023469Z 3400 TRACE newConnection [5] usecount=2 - TcpServer.cc:123
20131110 07:57:19.704918Z 3400 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:19.704958Z 3400 TRACE printActiveChannels {8: IN } - EventLoop.cc:271
20131110 07:57:19.704969Z 3400 TRACE handleEvent [6] usecount=2 - Channel.cc:67
onMessage(): received 11 bytes from connection [TestServer:0.0.0.0:8888#1] at 20131110 07:57:19.704916
20131110 07:57:19.705084Z 3400 TRACE handleEvent [12] usecount=2 - Channel.cc:69
20131110 07:57:22.728687Z 3400 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:22.728725Z 3400 TRACE printActiveChannels {8: IN} - EventLoop.cc:271
20131110 07:57:22.728735Z 3400 TRACE handleEvent [6] usecount=2 - Channel.cc:67
onMessage(): received 16 bytes from connection [TestServer:0.0.0.0:8888#1]at 20131110 07:57:22.728685
20131110 07:57:22.728786Z 3400 TRACE handleEvent [12] usecount=2 - Channel.cc:69
20131110 07:57:32.739020Z 3400 TRACE poll nothing happended - EPollPoller.cc:74
^C
输出中fd = 6是监听套接字,fd=8是返回的已连接套接字,连接建立调用OnConnection(),因为客户端输入两串数据,fd=8产生两次可读事件,调用两次onMessage().
客户端输出如下:
simba@ubuntu:~/Documents/build/debug/bin$ ./tcpclient_test
20131110 07:57:16.999262Z 3401 TRACE IgnoreSigPipe Ignore SIGPIPE - EventLoop.cc:51
20131110 07:57:17.001679Z 3401 INFO pid = 3401, tid = 3401 - TcpClient_test.cc:77
20131110 07:57:17.002535Z 3401 TRACE updateChannel fd = 4 events = 3 - EPollPoller.cc:104
20131110 07:57:17.003035Z 3401 TRACE EventLoop EventLoop created 0xBFE52018 in thread 3401 - EventLoop.cc:76
20131110 07:57:17.003367Z 3401 TRACE updateChannel fd = 5 events = 3 - EPollPoller.cc:104
20131110 07:57:17.003846Z 3401 DEBUG Connector ctor[0x9A946D0] - Connector.cc:33
20131110 07:57:17.004215Z 3401 INFO TcpClient::TcpClient[TestClient] - connector 0x9A946D0 - TcpClient.cc:72
20131110 07:57:17.004569Z 3401 TRACE updateChannel fd = 0 events = 3 - EPollPoller.cc:104
20131110 07:57:17.005017Z 3401 INFO TcpClient::connect[TestClient] - connecting to 127.0.0.1:8888 - TcpClient.cc:106
20131110 07:57:17.024071Z 3401 TRACE updateChannel fd = 6 events = 4 - EPollPoller.cc:104
20131110 07:57:17.024375Z 3401 TRACE loop EventLoop 0xBFE52018 start looping - EventLoop.cc:108
20131110 07:57:17.024561Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:17.024980Z 3401 TRACE printActiveChannels {6: OUT} - EventLoop.cc:271
20131110 07:57:17.025181Z 3401 TRACE handleWrite Connector::handleWrite 1 - Connector.cc:169
20131110 07:57:17.025326Z 3401 TRACE updateChannel fd = 6 events = 0 - EPollPoller.cc:104
20131110 07:57:17.025509Z 3401 TRACE removeChannel fd = 6- EPollPoller.cc:147
20131110 07:57:17.025804Z 3401 DEBUG TcpConnection TcpConnection::ctor[TestClient:127.0.0.1:8888#1] at 0x9A94808 fd=6 - TcpConnection.cc:65
20131110 07:57:17.026012Z 3401 TRACE connectEstablished [3] usecount=3 - TcpConnection.cc:238
20131110 07:57:17.026183Z 3401 TRACE updateChannel fd = 6 events = 3 - EPollPoller.cc:104
onConnection(): new connection [TestClient:127.0.0.1:8888#1] from 127.0.0.1:8888
20131110 07:57:17.026506Z 3401 TRACE connectEstablished [4] usecount=3 - TcpConnection.cc:243
aaaaaaaaaaa
20131110 07:57:19.704702Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:19.704765Z 3401 TRACE printActiveChannels {0: IN } - EventLoop.cc:271
20131110 07:57:19.705370Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:19.705408Z 3401 TRACE printActiveChannels {6: IN } - EventLoop.cc:271
20131110 07:57:19.705427Z 3401 TRACE handleEvent [6] usecount=2 - Channel.cc:67
onMessage(): recv a message [aaaaaaaaaaa]
20131110 07:57:19.705520Z 3401 TRACE onMessage TestClient:127.0.0.1:8888#1 recv 11 bytes at 20131110 07:57:19.705368 - TcpClient_test.cc:58
20131110 07:57:19.705538Z 3401 TRACE handleEvent [12] usecount=2 - Channel.cc:69
XXXXXXXXXXXXXXXX
20131110 07:57:22.728548Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:22.728616Z 3401 TRACE printActiveChannels {0: IN} - EventLoop.cc:271
20131110 07:57:22.729010Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:22.729035Z 3401 TRACE printActiveChannels {6: IN } - EventLoop.cc:271
20131110 07:57:22.729045Z 3401 TRACE handleEvent [6] usecount=2 - Channel.cc:67
onMessage(): recv a message [XXXXXXXXXXXXXXXX]
20131110 07:57:22.729070Z 3401 TRACE onMessage TestClient:127.0.0.1:8888#1 recv 16 bytes at 20131110 07:57:22.729009 - TcpClient_test.cc:58
20131110 07:57:22.729093Z 3401 TRACE handleEvent [12] usecount=2 - Channel.cc:69
20131110 07:57:32.739100Z 3401 TRACE poll nothing happended - EPollPoller.cc:74
20131110 07:57:36.887794Z 3401 TRACE poll 1 events happended - EPollPoller.cc:65
20131110 07:57:36.887848Z 3401 TRACE printActiveChannels {6: IN } - EventLoop.cc:271
20131110 07:57:36.887860Z 3401 TRACE handleEvent [6] usecount=2 - Channel.cc:67
20131110 07:57:36.887882Z 3401 TRACE handleClose fd = 6 state = 2 - TcpConnection.cc:369
20131110 07:57:36.887892Z 3401 TRACE updateChannel fd = 6 events = 0 - EPollPoller.cc:104
onConnection(): connection [TestClient:127.0.0.1:8888#1] is down
20131110 07:57:36.887948Z 3401 TRACE handleClose [7] usecount=3 - TcpConnection.cc:377
20131110 07:57:36.887966Z 3401 TRACE handleClose [11] usecount=3 - TcpConnection.cc:380
20131110 07:57:36.887984Z 3401 TRACE handleEvent [12] usecount=2 - Channel.cc:69
20131110 07:57:36.887994Z 3401 TRACE removeChannel fd = 6- EPollPoller.cc:147
20131110 07:57:36.888005Z 3401 DEBUG ~TcpConnection TcpConnection::dtor[TestClient:127.0.0.1:8888#1] at 0x9A94808 fd=6 - TcpConnection.cc:72
20131110 07:57:46.894605Z 3401 TRACE poll nothing happended - EPollPoller.cc:74
fd=0是标准输入,fd=6是客户端连接的套接字,刚开始连接成功,fd=6可写事件发生,但马上把connector的channel移除关注并析构,并构造TcpConnection。在命令行输入一串数据,标准输入可读事件发生,等服务器回射回来,fd=6可读事件发生,调用OnMessage(),重复两次。我们首先ctrl+c 掉服务器,客户端发现此连接已经down掉,就会析构TcpConnection,顺便关闭套接字,当然事件循环还在继续,因为如前面所说,有可能EventLoop绑定了多个TcpClient。
可以稍微举个例子,比如可以让EventLoopThreadPool开两个IO线程,每个IO线程管理4个TcpClient,如下程序中RecvFileClient 是一个封装了TcpClient类的类。
C++ Code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
|
int main(
int argc,
char* argv[])
{
LOG_INFO <<
"pid = " << getpid();
EventLoop loop;
g_loop = &loop;
// 用两个IO线程来发起多个连接 EventLoopThreadPool loopPool(&loop);
loopPool.setThreadNum(
2);
loopPool.start();
boost::ptr_vector<RecvFileClient> clients(
8);
InetAddress serverAddr(
"127.0.0.1",
2021);
for (
int i =
0; i <
8; ++i)
{
char buf[
32];
snprintf(buf,
sizeof buf,
"%d", i+
1);
clients.push_back(
new RecvFileClient(loopPool.getNextLoop(), serverAddr, buf));
clients[i].connect();
usleep(
200);
}
loop.loop();
usleep(
20000);
}
|
参考:
《UNP》
muduo manual.pdf
《linux 多线程服务器编程:使用muduo c++网络库》