linux c++ 非阻塞tcp socket client简单实现

需求还需上报下数据,服务本身就是单进程线程安全的,不能用阻塞socket,通过getsockopt() TCP_INFO弄了个简单判断连接存活的方法,数据不是很重要,可靠性要求不高,也可以考虑udp. 可以在优化一下重连间隔周期,本样例,send 失败就重连频率过高。

src:

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include  
#include 
#include 

using namespace std;

class TCPClient
{

enum class status: uint32_t {
	alive = 0,
	connecting = 1,
	disconnect = 2,
	unknown = 3
};

public:
	TCPClient();
	status get_tcp_status();
	bool connecting();
	bool setup(std::string address, int port);
	bool Send(char* data);
	string receive(int size = 4096);
	string read();
	void exit();

private:
	int sock;
	std::string address;
	int port;
	struct sockaddr_in server;
	status connection_status;
};

class QualityClient : public TCPClient {
public:
	QualityClient();
	~QualityClient();

	bool pub_quality(char* data);

private:
	std::queue<char*> send_buffer;
	int capacity;
	int size;
};

class QualityClientInstance
{
public:
    // 获取单实例对象
    static QualityClient *GetInstance();

    //释放单实例,进程退出时调用
    static void ReleaseInstance();

private:
    // 将其构造和析构成为私有的, 禁止外部构造和析构
    QualityClientInstance();
    ~QualityClientInstance();

    // 将其拷贝构造和赋值构造成为私有函数, 禁止外部拷贝和赋值
    QualityClientInstance(const QualityClientInstance &signal);
    const QualityClientInstance &operator=(const QualityClientInstance &signal);

private:
    // 唯一单实例对象指针
    static QualityClient* instance;
    //static std::mutex m_Mutex;
};

TCPClient::TCPClient()
{
	sock = -1;
	port = 0;
	address = "";
}

bool TCPClient::setup(string address , int port)
{
  	if(sock == -1)
	{
		sock = socket(AF_INET , SOCK_STREAM | SOCK_NONBLOCK , IPPROTO_TCP);
		if (sock == -1)
		{
      		cout << "Could not create socket" << endl;
			return false;
    	}
    }

  	if((signed)inet_addr(address.c_str()) == -1)
  	{
		struct hostent *he;
		struct in_addr **addr_list;
		if ( (he = gethostbyname( address.c_str() ) ) == NULL)
		{
			herror("gethostbyname");
			cout<<"Failed to resolve hostname\n";
			return false;
		}
		addr_list = (struct in_addr **) he->h_addr_list;
		for(int i = 0; addr_list[i] != NULL; i++)
		{
			server.sin_addr = *addr_list[i];
			break;
		}
  	}
  	else
  	{
    	server.sin_addr.s_addr = inet_addr( address.c_str() );
  	}

  	server.sin_family = AF_INET;
  	server.sin_port = htons( port );

	return connecting();
}

bool TCPClient::connecting() {
	int ret = connect(sock, (struct sockaddr *)&server, sizeof(server));
	int savedErrno = ret == 0 ? 0 : errno;

    std::cout << "connect... : " << ret << ":" << savedErrno <<  std::endl;

	switch(savedErrno)
	{
		case 0:
		case EINPROGRESS:      //Operation now in progress
		case EINTR:            //Interrupted system call
		case EISCONN:          //Transport endpoint is already connected
		std::cout << "connecting..." << std::endl;
		break;

		case EAGAIN:
		case EADDRINUSE:
		case EADDRNOTAVAIL:
		case ECONNREFUSED:
		case ENETUNREACH:
		std::cout << "reSave Error. " << savedErrno;
		break;

		case EACCES:
		case EPERM:
		case EAFNOSUPPORT:
		case EALREADY:
		case EBADF:
		case EFAULT:
		case ENOTSOCK:
		std::cout << "connect error in Connector::startInLoop " << savedErrno;
		::close(sock);
		break;

		default:
		std::cout << "Unexpected error in Connector::startInLoop " << savedErrno;
		::close(sock);
		break;
	}


  	return false;
}


TCPClient::status TCPClient::get_tcp_status()
{

	struct tcp_info info;
	int len = sizeof(info);

	getsockopt(sock, IPPROTO_TCP, TCP_INFO, &info, (socklen_t *)&len);
	
	if(info.tcpi_state == TCP_ESTABLISHED) {
		return status::alive;
	} else if (info.tcpi_state == TCP_SYN_SENT || info.tcpi_state == TCP_SYN_RECV) {
		return status::connecting;
	} else {
		printf("connection error : info.tcpi_state = %d != %d\n",info.tcpi_state, TCP_ESTABLISHED);
		return status::disconnect;
	}
}

/*bool Setsocketkeepalive(int sockfd)
{
    int keepAlive=1;//开启keepalive属性
    int keepIdle=10;//如该连接在10秒内没有任何数据往来,则进行探测
    int keepInterval=2;//探测时发包的时间间隔为2秒
    int keepCount=3;//探测尝试的次数。如果第1次探测包就收到响应了,则后2次的不再发送

    if(setsockopt(sockfd,SOL_SOCKET,SO_KEEPALIVE,(void *)&keepAlive,sizeof(keepAlive))!=0)//若无错误发生,setsockopt()返回值为0
    {
		return false;
    }
    if(setsockopt(sockfd, SOL_TCP, TCP_KEEPIDLE,(void *)&keepIdle, sizeof(keepIdle))!=0)
    {
		return false;
    }
    if(setsockopt(sockfd,SOL_TCP, TCP_KEEPINTVL,(void *)&keepInterval,sizeof(keepInterval))!=0)
    {
		return false;
    }
    if(setsockopt(sockfd,SOL_TCP, TCP_KEEPCNT,(void *)&keepCount,sizeof(keepCount))!=0)
    {
		return false;
    }
	return true;
}*/

bool TCPClient::Send(char* data)
{
	//check_tcp_alive(sock);
	auto stat = get_tcp_status();
	if( stat == status::alive)
	{
		int ret = send(sock , data , strlen(data) , 0);
		cout << "Send ret : " << ret << endl;
		if( ret < 0 )
		{
			cout << "Send failed : " << data << endl;
			return false;
		} else if (ret == 0) {
			cout << "connection closed : " << data << endl;
			abort();
		}
	} else if ( stat == status::connecting) {
		return false;
	} else {
		close(sock);
		close(sock);
		sock = -1;
		setup("192.168.6.45", 12358);
		return false;
	}

	return true;
}

string TCPClient::receive(int size)
{
  	char buffer[size];
	memset(&buffer[0], 0, sizeof(buffer));

  	string reply;
	if( recv(sock , buffer , size, 0) < 0)
  	{
	    	cout << "receive failed!" << endl;
		return nullptr;
  	}
	buffer[size-1]='\0';
  	reply = buffer;
  	return reply;
}

string TCPClient::read()
{
  	char buffer[1] = {};
  	string reply;
  	while (buffer[0] != '\n') {
    		if( recv(sock , buffer , sizeof(buffer) , 0) < 0)
    		{
      			cout << "receive failed!" << endl;
			return nullptr;
    		}
		reply += buffer[0];
	}
	return reply;
}

void TCPClient::exit()
{
    close( sock );
}

QualityClient::QualityClient() {
	capacity = 256;
	size = 0;
}

QualityClient::~QualityClient() {}

bool QualityClient::pub_quality(char* data) {
	bool success =  Send(data);
	if (!success) {
		if (send_buffer.size() == capacity) {
			send_buffer.pop();
		}
		send_buffer.push(data);
	}

	while (success && !send_buffer.empty())
	{
		success = Send(send_buffer.back());
		if (success) send_buffer.pop();
	}

}


//初始化静态成员变量
QualityClient *QualityClientInstance::instance = nullptr;
//std::mutex QualityClientInstance::m_Mutex;

// 注意:不能返回指针的引用,否则存在外部被修改的风险!
QualityClient * QualityClientInstance::GetInstance()
{

    //  这里使用了两个 if 判断语句的技术称为双检锁;好处是,只有判断指针为空的时候才加锁,
    //  避免每次调用 GetInstance的方法都加锁,锁的开销毕竟还是有点大的。
    //if (m_SingleInstance == nullptr) 
    //{
        //std::unique_lock lock(m_Mutex); // 加锁
	if (instance == nullptr)
	{
		instance = new QualityClient();
	}
    //}

    return instance;
}

void QualityClientInstance::ReleaseInstance()
{
    //std::unique_lock lock(m_Mutex); // 加锁
    if (instance)
    {
        delete instance;
        instance = nullptr;
    }
}


int main() {
    std::cout << "start" << std::endl;

    //TCPClient tcp;
	QualityClient* tcp = QualityClientInstance::GetInstance();
	tcp->setup("192.168.6.45", 12358);

	while(1)
	{
		tcp->pub_quality("haapy");

        sleep(1);
	}

	QualityClientInstance::ReleaseInstance();
}

测试服务:

package main

import(

"log"

"fmt"

"net"

//"time"

)

func connHandlerTcp(c net.Conn) {
    //1.conn是否有效
    if c == nil {
        log.Panic("无效的 socket 连接")
    }

    //2.新建网络数据流存储结构
    buf := make([]byte, 1024)
    //3.循环读取网络数据流
    for {
        //3.1 网络数据流写入 buffer
        cnt, err := c.Read(buf)
        //3.2 数据读尽、读取错误 关闭 socket 连接
        if cnt == 0 || err != nil {
			fmt.Println("connection close")
            c.Close()
            break
        }
		fmt.Println("get msg : ", cnt , ":", string(buf[0:cnt]))
    }
}

func main(){

//  tcp recv serv 

	listen, err := net.Listen("tcp", "0.0.0.0:12358")

	if err!=nil{
		panic(err)
		return
	}
	defer listen.Close() // 延时关闭listen
	
	fmt.Println("listening success:", listen.Addr())
	
	// 循环等待客户端来连接
	fmt.Println("等待客户端来连接..")
	for{
		conn, err := listen.Accept()
		if err!=nil{
			panic(err)
		}else{
			fmt.Printf("客户端%s已连接..", conn.RemoteAddr().String())
		}
		// 准备一个协程,为客户端服务
		go connHandlerTcp(conn)
	}
}

你可能感兴趣的:(Go,后端笔记,网络编程,linux,c++,tcp/ip)