(感谢chasenzhang5指导)
编译环境:Ubuntu 18.04,Qt5.11.2
有关技术:C++,epoll机制,线程池,Reactor模式
C10K_server.pro
TEMPLATE = app
CONFIG += console c++11
CONFIG -= app_bundle
CONFIG -= qt
LIBS += -lpthread
SOURCES += \
main.cpp \
threadpool.cpp \
wrap.cpp \
systeminfo.cpp \
queueoperation.cpp \
savedata.cpp \
buffer.cpp \
epoll_serv.cpp \
my_thread.cpp
HEADERS += \
threadpool.h \
commoninc.h \
wrap.h \
systeminfo.h \
queueoperation.h \
savedata.h \
buffer.h \
epoll_serv.h \
my_thread.h
commoninc.h
/******************
通用头文件
******************/
#ifndef COMMONINC_H
#define COMMONINC_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
main.cpp
#include "commoninc.h"
#include "my_thread.h"
#include "queueoperation.h"
#include "epoll_serv.h"
#include "threadpool.h"
#include "wrap.h"
using namespace std;
pthread_mutex_t count_mutex_main = PTHREAD_MUTEX_INITIALIZER;
int main()
{
cout << "Start main Reactor!" << endl;
Epoll_serv epollSer(8000, 10000); //主线程,监听活动的fd
MyThread::thread_subReactorRun(); //线程一,处理IO请求
MyThread::thread_monitorRun(); //线程二,与监视器通讯
//线程池处理数据
ThreadPool threadpool(5, MyThread::pthread_dealData, (void *)"start deal data");
while(1)
{
int nfds = epollSer.epollNfds();
for(int i = 0;i < nfds;i++)
{
//入队需要加锁
pthread_mutex_lock(&count_mutex_main);
QueueOperation::queEvsPush(epollSer.getEventsIndex(i));
pthread_mutex_unlock(&count_mutex_main);
}
usleep(100);
}
pthread_mutex_destroy(&count_mutex_main);
return 0;
}
buffer.h
/***************
对字符串进行的操作
***************/
#ifndef BUFFER_H
#define BUFFER_H
#include "savedata.h"
//反馈客户端的信息
void returnCliData(char *p, int len, int fd);
//系统信息 -> buff
void sysInfoToBuff(char *info, int len);
//监听的客户端信息 -> buff
void listenCliDataToBuff(char *info, int len, SaveData::s_sysInfo &saveData);
//集群信息 -> buff
void wholeRate2Buff(char *info, int len, int rateCpu, int rateMem);
//集群最大连接数 -> buff
void fdMinMax(char *info, int len, int fdMax);
//解析命令数据
void deCode_Command(const char *str, char *dst, int len);
//解析普通数据
void deCode_Data(const char *str, SaveData::s_sysInfo &sysData_t);
//检查信息是否有误
char checkData(const char *str);
#endif // BUFFER_H
buffer.cpp
#include "buffer.h"
#include "systeminfo.h"
//反馈客户端的信息
void returnCliData(char *p, int len, int fd)
{
char buf[10] = {0};
int j = 2;
int n = 0;
//发送格式化输出到buf所指向的字符串,%d包含了要被写入到字符串buf的文本
//成功则返回写入的字符总数,不包括字符串追加在字符串末尾的空字符。失败则返回一个负数
n = sprintf(buf, "%d", fd);
for(int i = 0;i < 10 && i < n;i++)
{
p[j++] = buf[i];
}
p[j++] = '|';
p[j++] = '$'; //结尾标志
if(j > len)
{
perror("return data error!");
return;
}
p[0] = j; //数据长度
p[1] = 'E'; //数据类型
}
//系统信息 -> buff
void sysInfoToBuff(char *info, int len)
{
char buf[10] = {0};
int n = 0;
int j = 2;
SystemInfo::captureCpuRute();
SystemInfo::captureMemory();
n = sprintf(buf, "%d", SystemInfo::getCpuRate());
for(int i = 0;i < len;i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
n = sprintf(buf, "%d", SystemInfo::getMemoryUse());
for(int i = 0;i < len;i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
n = sprintf(buf, "%d", SystemInfo::getMemoryTotal());
for(int i = 0;i < len;i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
info[j++] = '$';
info[0] = j;
info[1] = 'D';
}
//监听的客户端信息 -> buff
void listenCliDataToBuff(char *info, int len, SaveData::s_sysInfo &saveData)
{
char buf[10] = {0};
int j = info[0];
sprintf(buf, "%d", saveData.m_cpuRate);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
sprintf(buf, "%d", saveData.m_memoryUse);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
sprintf(buf, "%d", saveData.m_memoryTotal);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
info[j++] = '$';
info[0] = j;
//长度检查
if(j > len)
{
perror("data buf out of range!");
}
}
//集群信息 -> buff
void wholeRate2Buff(char *info, int len, int rateCpu, int rateMem)
{
char buf[10] = {0};
int j = info[0];
sprintf(buf, "%d", rateCpu);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
sprintf(buf, "%d", rateCpu);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
info[j++] = '$';
info[0] = j;
//长度检查
if(j > len)
{
perror("info buff out of range!");
}
}
//集群最大连接数 -> buff
void fdMinMax(char *info, int len, int fdMax)
{
char buf[10] = {0};
int j = info[0];
sprintf(buf, "%d", fdMax);
for(int i = 0;i < 10 && buf[i] != '\0';i++)
{
info[j++] = buf[i];
}
info[j++] = '|';
info[j++] = '$';
info[0] = j;
//长度检查
if(j > len)
{
perror("info buff out of range!");
}
}
//解析命令数据
void deCode_Command(const char *str, char *dst, int len)
{
memset(dst, 0, len);
int j = 0;
for(int i = 0;i < 20 && str[i] != '\0';i++)
{
dst[j++] = str[i];
}
}
//解析普通数据
void deCode_Data(const char *str, SaveData::s_sysInfo &sysData_t)
{
int tmp[10] = {0}; //保存临时变量
int temp = 0;
int j = 0;
//解码
for(int i = 0;str[i] != '$';i++) //退出条件为str[i] == '$'
{
if(str[i] >= '0' && str[i] < '9')
{
temp = temp * 10 + str[i] - '0';
}
else if(str[i] == '|')
{
tmp[j++] = temp;
temp = 0;
}
}
//提取数据
if(j == 3)
{
sysData_t.m_cpuRate = tmp[0];
sysData_t.m_memoryUse = tmp[1];
sysData_t.m_memoryTotal = tmp[2];
}
else
printf("receive data error!\n");
}
//检查信息是否有误
char checkData(const char *str)
{
//检查数据长度
int count = str[0];
if(str[count - 1] != '$')
{
perror("receive data error!\n");
return -1;
}
//返回数据类型
return str[1];
}
systeminfo.h
/*******************
系统信息的获取
*******************/
#ifndef SYSTEMINFO_H
#define SYSTEMINFO_H
#include "commoninc.h"
class SystemInfo
{
public:
typedef struct cpuInfo
{
char name[8];
//以下均从系统启动开始累计到当前时刻来计算
unsigned long user; //用户态的CPU时间,不包括nice值为负的进程
unsigned long nice; //nice值为负的进程所占用的CPU时间
unsigned long system; //核心时间
unsigned long idle; //除硬盘IO等待时间以外的其他等待时间
unsigned long iowait; //硬盘IO等待时间
unsigned long irq; //硬中断时间
unsigned long softirq; //软中断时间
}cpu_info;
static void captureCpuRute(); //捕获CPU使用率
static void captureMemory(); //捕获内存信息
static int getCpuRate(); //获得CPU使用率
static int getMemoryUse(); //获得已使用的内存
static int getMemoryTotal(); //获得内存总量
private:
static int m_cpuRate; //0-100
static int m_memoryTotal; //内存总量
static int m_memoryUse; //已使用的内存
static unsigned long getCpuTotalTime(cpuInfo *m_cpuInfo); //获取CPU总时间片
static void displayCpuInfo(cpuInfo *m_cpuInfo); //显示CPU信息
static int getCpuInfo(cpuInfo *m_cpuInfo); //获得CPU信息
static unsigned long getCpuUsageRate(); //获得CPU利用率
};
#endif // SYSTEMINFO_H
systeminfo.cpp
#include "systeminfo.h"
int SystemInfo::m_cpuRate = 0;
int SystemInfo::m_memoryTotal = 0;
int SystemInfo::m_memoryUse = 0;
unsigned long SystemInfo::getCpuTotalTime(cpuInfo *m_cpuInfo) //获取CPU总时间片
{
return m_cpuInfo->user + m_cpuInfo->system + m_cpuInfo->nice + m_cpuInfo->idle
+m_cpuInfo->iowait + m_cpuInfo->irq + m_cpuInfo->softirq;
}
int SystemInfo::getCpuInfo(cpuInfo *m_cpuInfo) //获得CPU信息
{
m_cpuInfo = new cpuInfo();
if (m_cpuInfo == nullptr)
{
perror("GetCpuInfo: malloc struct CPUInfo error"); //err_dump()
}
FILE *fd;
char buf[1024];
memset(buf, '\0', 1024);
fd = fopen("/proc/stat", "r");
fgets(buf, sizeof(buf), fd);
sscanf(buf, "%s %lu %lu %lu %lu %lu %lu %lu", m_cpuInfo->name, &m_cpuInfo->user,
&m_cpuInfo->nice, &m_cpuInfo->system, &m_cpuInfo->idle, &m_cpuInfo->iowait,
&m_cpuInfo->irq, &m_cpuInfo->softirq);
fclose(fd);
delete m_cpuInfo;
return 0;
}
unsigned long SystemInfo::getCpuUsageRate() //获得CPU利用率
{
unsigned long totalTime, idleTime;
unsigned long cpu_use = 0.0;
cpuInfo old_CPU_Time, new_CPU_Time;
memset(&old_CPU_Time, 0, sizeof(cpuInfo));
memset(&new_CPU_Time, 0, sizeof(cpuInfo));
getCpuInfo(&old_CPU_Time);
getCpuInfo(&new_CPU_Time);
totalTime = getCpuTotalTime(&new_CPU_Time) - getCpuTotalTime(&old_CPU_Time);
idleTime = new_CPU_Time.idle - old_CPU_Time.idle;
if (totalTime != 0)
{
cpu_use = 100 * (totalTime - idleTime) / totalTime;
}
else
{
cpu_use = 0.0;
}
return cpu_use;
}
void SystemInfo::captureCpuRute() //捕获CPU利用率
{
m_cpuRate = (static_cast(getCpuUsageRate()));
}
void SystemInfo::captureMemory() //捕获内存信息
{
//获取内存信息
struct sysinfo info; //sysinfo是用来获取系统相关信息的结构体
int ret = 0;
ret = sysinfo(&info);
if(ret == 0)
{
m_memoryTotal = static_cast(info.totalram / 1024 / 1024); //totalram:总可用主存大小
m_memoryUse = static_cast((info.totalram - info.freeram) / 1024 / 1024); //freeram:剩余主存大小
}
else
{
perror("GetMemInfo: sysinfo() error");
}
}
int SystemInfo::getCpuRate() //获得CPU使用率
{
return m_cpuRate;
}
int SystemInfo::getMemoryUse() //获得已使用的内存
{
return m_memoryUse;
}
int SystemInfo::getMemoryTotal() //获得内存总量
{
return m_memoryTotal;
}
void SystemInfo::displayCpuInfo(cpuInfo *m_cpuInfo) //显示CPU信息
{
printf("%s %lu %lu %lu %lu %lu %lu %lu\n", m_cpuInfo->name, m_cpuInfo->user, m_cpuInfo->nice, m_cpuInfo->system,
m_cpuInfo->idle, m_cpuInfo->iowait, m_cpuInfo->irq, m_cpuInfo->softirq);
}
queueoperation.h
/***********************
队列操作
***********************/
#ifndef QUEUEOPERATION_H
#define QUEUEOPERATION_H
#include "commoninc.h"
using namespace std;
class QueueOperation
{
public:
static queuequeEvs;
static queuequeStr;
static queuequeEvFd;
static void queEvsPush(struct epoll_event evs);
static void queStrPush(string str);
static void queEvFdPush(int fd);
static bool queEvsIsEmpty();
static bool queStrIsEmpty();
static bool queEvFdIsEmpty();
static epoll_event queEvsPop();
static string queStrPop();
static int queEvFdPop();
};
#endif // QUEUEOPERATION_H
queueoperation.cpp
#include "queueoperation.h"
queueQueueOperation::queEvs;
queueQueueOperation::queStr;
queueQueueOperation::queEvFd;
void QueueOperation::queEvsPush(struct epoll_event evs)
{
queEvs.push(evs); //在队列尾部插入数据
}
void QueueOperation::queStrPush(string str)
{
queStr.push(str);
}
void QueueOperation::queEvFdPush(int fd)
{
queEvFd.push(fd);
}
bool QueueOperation::queEvsIsEmpty()
{
return queEvs.empty();
}
bool QueueOperation::queStrIsEmpty()
{
return queStr.empty();
}
bool QueueOperation::queEvFdIsEmpty()
{
return queEvFd.empty();
}
epoll_event QueueOperation::queEvsPop()
{
struct epoll_event ev = queEvs.front(); //返回对队列中第一个元素的引用
queEvs.pop(); //删除第一个元素
return ev;
}
string QueueOperation::queStrPop()
{
string str = queStr.front();
queStr.pop();
return str;
}
int QueueOperation::queEvFdPop()
{
int evfd = queEvFd.front();
queEvFd.pop();
return evfd;
}
savedata.h
/*****************
数据封装及存储
*****************/
#ifndef SAVEDATA_H
#define SAVEDATA_H
#include "commoninc.h"
using namespace std;
class SaveData
{
public:
typedef struct s_sysInfo
{
int m_cpuRate; //0-100
int m_memoryTotal; //内存总量
int m_memoryUse; //已使用的内存
}s_sysInfo;
typedef struct s_cliData
{
int fd; //文件描述符
char ip[16]; //登录的IP地址
int port; //登录的端口号
}s_cliData;
typedef struct s_saveData
{
s_sysInfo s_sysinfo;
s_cliData s_clidata;
}s_saveData;
static map m_saveData;
};
#endif // SAVEDATA_H
savedata.cpp
#include "savedata.h"
map SaveData::m_saveData;
threadpool.h
/*******************
线程池的创建及操作
*******************/
#ifndef THREADPOOL_H
#define THREADPOOL_H
#include "commoninc.h"
//线程池状态
typedef struct condition
{
pthread_mutex_t p_mutex; //互斥锁
pthread_cond_t p_cond; //条件变量
}condition_t;
//封装线程池中的对象需要执行的任务对象
typedef struct task
{
void *(*run)(void *args); //函数指针,需要执行的任务
void *arg; //参数
struct task *next; //任务队列中下一个任务
}task_t;
//线程池结构体
typedef struct threadpool
{
condition_t ready; //状态量
task_t *first_task; //任务队列中的第一个任务
task_t *last_task; //任务队列中的最后一个任务
int max_threads; //线程池最大线程数
int idle_threads; //线程池空闲线程数
int had_threads; //线程池已有线程数
int shutdown; //是否退出标志
}threadpool_t;
class ThreadPool
{
public:
ThreadPool(int threadNum, void *(*run)(void *args), void *arg);
~ThreadPool();
//线程池初始化
static void threadPool_init(threadpool_t *pool, int threads);
//增加一个任务到线程池
static void threadPool_add_task(threadpool_t *pool, void *(*run)(void *arg), void *arg);
//线程池销毁
static void threadPool_destroy();
private:
static threadpool_t m_pool;
//初始化
static int condition_init(condition_t *cond);
//加锁
static int condition_lock(condition_t *cond);
//解锁
static int condition_unlock(condition_t *cond);
//等待条件变量被设置(这两个等待调用需要一个已经上锁的互斥锁)
static int condition_wait(condition_t *cond);
//指定时间等待
static int condition_timedwait(condition_t *cond, const struct timespec *fixedTime);
//唤醒一个睡眠线程
static int condition_signal(condition_t *cond);
//唤醒所有睡眠线程
static int condition_broadcast(condition_t *cond);
//释放
static int condition_destroy(condition_t *cond);
//创建的线程执行
static void *thread_routine(void *arg);
};
#endif // THREADPOOL_H
threadpool.cpp
#include "threadpool.h"
threadpool_t ThreadPool::m_pool;
ThreadPool::ThreadPool(int threadNum, void *(*run)(void *arg), void *arg)
{
//初始化线程池,最多threadNum个线程
threadPool_init(&m_pool, threadNum);
for(int i = 0;i < threadNum;i++)
{
threadPool_add_task(&m_pool, run, arg);
}
}
ThreadPool::~ThreadPool()
{
threadPool_destroy();
}
//线程池初始化
void ThreadPool::threadPool_init(threadpool_t *pool, int threads)
{
condition_init(&pool->ready);
pool->first_task = nullptr;
pool->last_task = nullptr;
pool->had_threads = 0;
pool->idle_threads = 0;
pool->max_threads = threads;
pool->shutdown = 0;
}
//增加一个任务到线程池
void ThreadPool::threadPool_add_task(threadpool_t *pool, void *(*run)(void *arg), void *arg)
{
task_t *newtask = static_cast(malloc(sizeof(task_t)));
newtask->run = run;
newtask->arg = arg;
newtask->next = nullptr; //新加入的任务放入队列尾部
//线程池状态被多个线程共享,操作前需要加锁
condition_lock(&pool->ready);
//第一个任务加入
if(pool->first_task == nullptr)
{
pool->first_task = newtask;
}
else
{
pool->last_task->next = newtask;
}
pool->last_task = newtask; //队尾指向新加入的线程
//如果线程池中有空闲线程,唤醒
if(pool->idle_threads > 0)
{
condition_signal(&pool->ready);
}
//如果当前线程池中线程个数没有达到设定的最大值,创建一个新的线程
else if(pool->had_threads < pool->max_threads)
{
pthread_t pid;
pthread_create(&pid, nullptr, thread_routine, pool);
pool->had_threads++;
}
//结束访问,解锁
condition_unlock(&pool->ready);
}
//线程池销毁
void ThreadPool::threadPool_destroy()
{
//如果已经调用销毁,直接返回
if(m_pool.shutdown)
{
return;
}
//加锁
condition_lock(&m_pool.ready);
//设置销毁标记为1
m_pool.shutdown = 1;
//线程池中线程个数>1
if(m_pool.had_threads > 0)
{
//将等待的线程唤醒
if(m_pool.idle_threads > 0)
{
condition_broadcast(&m_pool.ready);
}
//正在执行任务的线程,等待他们结束任务
while(m_pool.had_threads)
{
condition_wait(&m_pool.ready);
}
}
condition_unlock(&m_pool.ready);
condition_destroy(&m_pool.ready);
}
//初始化
int ThreadPool::condition_init(condition_t *cond)
{
int status = 0;
if(status == pthread_mutex_init(&cond->p_mutex, nullptr))
{
return status;
}
if(status == pthread_cond_init(&cond->p_cond, nullptr))
{
return status;
}
return 0;
}
//加锁
int ThreadPool::condition_lock(condition_t *cond)
{
return pthread_mutex_lock(&cond->p_mutex);
}
//解锁
int ThreadPool::condition_unlock(condition_t *cond)
{
return pthread_mutex_unlock(&cond->p_mutex);
}
//等待条件变量被设置(这两个等待调用需要一个已经上锁的互斥锁)
int ThreadPool::condition_wait(condition_t *cond)
{
return pthread_cond_wait(&cond->p_cond, &cond->p_mutex);
}
//指定时间等待
int ThreadPool::condition_timedwait(condition_t *cond, const struct timespec *fixedTime)
{
return pthread_cond_timedwait(&cond->p_cond, &cond->p_mutex, fixedTime);
}
//唤醒一个睡眠线程
int ThreadPool::condition_signal(condition_t *cond)
{
return pthread_cond_signal(&cond->p_cond);
}
//唤醒所有睡眠线程
int ThreadPool::condition_broadcast(condition_t *cond)
{
return pthread_cond_broadcast(&cond->p_cond);
}
//释放
int ThreadPool::condition_destroy(condition_t *cond)
{
int status;
if((status = pthread_mutex_destroy(&cond->p_mutex)))
return status;
if((status = pthread_cond_destroy(&cond->p_cond)))
return status;
return 0;
}
//创建的线程执行
void *ThreadPool::thread_routine(void *arg)
{
struct timespec fixedTime;
int timeout;
printf("thread %d is starting\n",static_cast(pthread_self())); //获取当前线程的标识符
threadpool_t *pool = (threadpool_t *)arg;
while(1)
{
timeout = 0;
//访问线程之前加锁
condition_lock(&pool->ready);
//空闲进程
pool->idle_threads++;
//等待队列有任务到来 或者 收到线程池销毁通知
while(pool->first_task == nullptr && !pool->shutdown)
{
//否则线程阻塞等待
printf("thread %d is waiting\n", static_cast(pthread_self()));
//获取当前时间,并加上等待时间,设置进程的超时睡眠时间
clock_gettime(CLOCK_REALTIME, &fixedTime);
fixedTime.tv_sec += 4; //线程等待4s后,自动退出
int status;
status = condition_timedwait(&pool->ready, &fixedTime);
if(status == ETIMEDOUT)
{
printf("thread %d wait timeout\n", static_cast(pthread_self()));
timeout = 1;
break;
}
}
pool->idle_threads--;
if(pool->first_task != nullptr)
{
//取出等待队列最前的任务,移除任务,并执行任务
task_t *t = pool->first_task;
pool->first_task = t->next;
//任务执行期间,先解锁让其他线程访问线程池
condition_unlock(&pool->ready);
//执行任务
t->run(t->arg);
//执行完任务释放内存
free(t);
//重新加锁
condition_lock(&pool->ready);
}
//退出线程池
if(pool->shutdown && pool->first_task == nullptr)
{
pool->had_threads--; //当前工作线程数-1
//若线程池中没有线程,通知等待线程(主线程)全部任务已经完成
if(pool->had_threads == 0)
{
condition_signal(&pool->ready);
}
condition_unlock(&pool->ready);
break;
}
//如果请求超时,跳出并销毁线程
if(timeout == 1)
{
pool->had_threads--; //当前工作线程数-1
condition_unlock(&pool->ready);
break;
}
condition_unlock(&pool->ready);
}
printf("thread %d is exiting\n", static_cast(pthread_self()));
return nullptr;
}
epoll_serv.h
#ifndef EPOLL_SERV_H
#define EPOLL_SERV_H
#include "commoninc.h"
#include "queueoperation.h"
#include "my_thread.h"
using namespace std;
class Epoll_serv
{
public:
Epoll_serv(const int servPort, const int cliNum);
~Epoll_serv();
int epollNfds(); //返回活动的events个数
struct epoll_event getEventsIndex(int index);
int getListenFd();
int getEpFd();
static void handleAccept(struct epoll_event *evs); //处理连接
static void handleRead(struct epoll_event *evs); //读数据
private:
static int m_epfd;
static int m_listenfd;
int m_cliNum;
struct epoll_event m_ev;
vector m_events;
static vector m_buff;
static pthread_mutex_t counter_mutex_map;
static void setnonblocking(int sock);
static void update_events(int epfd, int connfd, int events, int op);
};
#endif // EPOLL_SERV_H
epoll_serv.cpp
#include "epoll_serv.h"
#include "wrap.h"
#include "buffer.h"
//生成用于处理epoll专用的文件描述符
int Epoll_serv::m_epfd = epoll_create(10000+1); //m_cliNum + 1
//创建监听套接字
int Epoll_serv::m_listenfd = Socket(AF_INET, SOCK_STREAM, 0);
vector Epoll_serv::m_buff(100);
pthread_mutex_t Epoll_serv::counter_mutex_map = PTHREAD_MUTEX_INITIALIZER; //静态初始化互斥锁
Epoll_serv::Epoll_serv(const int servPort, const int cliNum)
{
m_cliNum = cliNum;
m_events.reserve(m_cliNum);
//buff init
m_buff.reserve(100); //reserve操作允许我们通知容器应该准备保存多少元素
for(int i = 0;i < 100;i++)
{
m_buff[i] = 0;
}
//准备服务器地址
struct sockaddr_in servaddr;
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(servPort);
//设置socket重用
int opt = 1;
setsockopt(m_listenfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
/*
extern int setsockopt (int __fd, int __level, int __optname,
const void *__optval, socklen_t __optlen)
1.在协议级别LEVEL上将套接字FD的选项OPTNAME设置为*OPTVAL(长度为OPTLEN字节)成功返回0,错误返回-1
2.SO_REUSEADDR
允许启动一个监听服务器并捆绑其众所周知端口,即使以前建立的将此端口用做他们的本地端口的连接仍存在。
这通常是重启监听服务器时出现,若不设置此选项,则bind时将出错。
允许在同一端口上启动同一服务器的多个实例,只要每个实例捆绑一个不同的本地IP地址即可。
对于TCP,我们根本不可能启动捆绑相同IP地址和相同端口号的多个服务器。
允许单个进程捆绑同一端口到多个套接口上,只要每个捆绑指定不同的本地IP地址即可。这一般不用于TCP服务器。
允许完全重复的捆绑:当一个IP地址和端口绑定到某个套接口上时,还允许此IP地址和端口捆绑到另一个套接口上。
一般来说,这个特性仅在支持多播的系统上才有,而且只对UDP套接口而言(TCP不支持多播)。
*/
//绑定
bind(m_listenfd,(struct sockaddr *)&servaddr, sizeof(servaddr));
//监听
listen(m_listenfd, m_cliNum);
m_ev.data.fd = m_listenfd; //设置epoll管理的socket
m_ev.events = EPOLLET | EPOLLIN; //设置事件类型为可读事件,工作方式为边沿触发
epoll_ctl(m_epfd, EPOLL_CTL_ADD, m_listenfd, &m_ev);
}
Epoll_serv::~Epoll_serv()
{
pthread_mutex_destroy(&counter_mutex_map);
close(m_epfd);
close(m_listenfd);
}
//把客户端的socket设置为非阻塞方式
void Epoll_serv::setnonblocking(int sock)
{
int opts;
opts = fcntl(sock, F_GETFL); //F_GETFL获取文件状态标志
if(opts < 0)
{
perror("fcntl(sock, GETFL)");
exit(1);
}
opts = opts | O_NONBLOCK;
//将文件状态标志设置为第三个参数的值 (取为整型值)。可以更改的几个标志是:O_APPEND,O_NONBLOCK,O_SYNC和O_ASYNC
if(fcntl(sock, F_SETFL, opts) < 0) //F_SETFL设置文件状态标志
{
perror("fcntl(sock,SETFL,opts)");
exit(1);
}
}
//改变connfd读写属性
void Epoll_serv::update_events(int epfd, int connfd, int events, int op)
{
struct epoll_event ev;
memset(&ev, 0, sizeof(ev));
ev.events = events;
ev.data.fd = connfd;
int ret = epoll_ctl(epfd, op, connfd, &ev);
if(ret != 0)
{
perror("epoll_ctl failed");
}
}
int Epoll_serv::epollNfds() //返回活动的events个数
{
int nfds = epoll_wait(m_epfd, m_events.data(), m_cliNum, 10); //10ms等待epoll事件的发生
return nfds;
}
struct epoll_event Epoll_serv::getEventsIndex(int index) //获得事件标识
{
return m_events[index];
}
int Epoll_serv::getListenFd()
{
return m_listenfd;
}
int Epoll_serv::getEpFd()
{
return m_epfd;
}
void Epoll_serv::handleAccept(struct epoll_event *ev) //处理连接
{
if(ev->data.fd == m_listenfd) //fd为监听的fd
{
char return_data[15] = {0};
struct sockaddr_in cliaddr;
socklen_t cliaddr_len = sizeof(cliaddr);
int connfd = Accept(m_listenfd, (struct sockaddr *)&cliaddr, &cliaddr_len);
if(connfd < 0)
{
perror("connfd < 0");
exit(1);
}
setnonblocking(connfd); //把客户端的socket设置为非阻塞方式
const char *cli_ip = inet_ntoa(cliaddr.sin_addr);
int cli_port = ntohs(cliaddr.sin_port);
printf("fd = %d, connect from IP:%s, port:%d\n",connfd, cli_ip, cli_port);
//全局表写入
pthread_mutex_lock(&counter_mutex_map);
SaveData::s_cliData cliData_t;
cliData_t.fd = connfd;
strcpy(cliData_t.ip, cli_ip);
SaveData::m_saveData[connfd].s_clidata = cliData_t;
pthread_mutex_unlock(&counter_mutex_map);
//设置事件类型为可读事件,边沿触发,并注册epoll事件
update_events(m_epfd, connfd, EPOLLIN | EPOLLET, EPOLL_CTL_ADD);
//return to client
returnCliData(return_data, 15, connfd);
write(connfd, return_data, 10);
}
}
void Epoll_serv::handleRead(struct epoll_event *ev) //读数据
{
if(ev->data.fd != m_listenfd && ev->events & (EPOLLIN | EPOLLERR | EPOLLRDHUP))
{
int sockfd = ev->data.fd;
if(sockfd < 0)
{
return;
}
if(ev->events & EPOLLRDHUP) //客户端掉线(EPOLLRDHUP表示对端断开连接)
{
close(sockfd);
printf("client is EPOLLRDHUP!\n");
}
int n = 0;
memset(m_buff.data(), 0, 100);
if((n = read(sockfd, m_buff.data(), 100) < 0)) //连接错误
{
if(sockfd == MyThread::getMonitorFd())
{
MyThread::setMonitorFd(-1);
MyThread::setCliToMonitorFd(-1);
}
close(sockfd);
printf("reading error!\n");
}
else if(n == 0) //客户端断开连接
{
if(sockfd == MyThread::getMonitorFd())
{
MyThread::setMonitorFd(-1);
MyThread::setCliToMonitorFd(-1);
}
close(sockfd);
printf("client close to client!\n");
}
else //正常
{
//接收到的信息入队
pthread_mutex_lock(&counter_mutex_map);
QueueOperation::queStrPush(m_buff.data());
QueueOperation::queEvFdPush(sockfd);
pthread_mutex_unlock(&counter_mutex_map);
}
}
}
my_thread.h
#ifndef MY_THREAD_H
#define MY_THREAD_H
#include "commoninc.h"
#include "queueoperation.h"
#include "savedata.h"
using namespace std;
class MyThread
{
public:
MyThread();
~MyThread();
static void setMonitorFd(int value); //设置监视器fd
static int getMonitorFd();
static void setCliToMonitorFd(int value); //设置监视器监视的客户端fd
static int getCliToMonitorFd();
static void thread_subReactorRun(); //执行子reactor模式
static void thread_dealDataRun(); //开始处理数据
static void thread_monitorRun(); //运行监视器
static void *pthread_subReactor(void *arg); //reactor事务
static void *pthread_dealData(void *arg); //数据处理事务
static void *pthread_monitor(void *arg); //监视器事务
private:
static char m_toMonitorData[100]; //发送给监视器的数据
static int m_monitorFd; //监视器fd
static int m_cliToMoniFd; //监视器监视的客户端fd
static char m_commandData[20]; //命令接收区
static int c_cpuScale;//集群CPU情况
static int c_memScale; //集群Mem情况
//线程标识符
static pthread_t mtid_subReactor;
static pthread_t mtid_dealData;
static pthread_t mtid_minotor;
static pthread_mutex_t counter_mutex_map; //全局表读写锁
static pthread_mutex_t counter_mutex_que; //队列读写锁
static void dataEvent_Command();
static void dataEvent_Data(int str_fd, SaveData::s_sysInfo &sysData_t);
};
#endif // MY_THREAD_H
my_thread.cpp
#include "my_thread.h"
#include "epoll_serv.h"
#include "buffer.h"
pthread_t MyThread::mtid_dealData;
pthread_t MyThread::mtid_minotor;
pthread_t MyThread::mtid_subReactor;
pthread_mutex_t MyThread::counter_mutex_map = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t MyThread::counter_mutex_que = PTHREAD_MUTEX_INITIALIZER;
char MyThread::m_toMonitorData[100];
int MyThread::m_monitorFd = -1;
int MyThread::m_cliToMoniFd = -1;
char MyThread::m_commandData[20];
int MyThread::c_cpuScale = 0;
int MyThread::c_memScale = 0;
MyThread::MyThread()
{
memset(m_toMonitorData, 0, 100);
memset(m_commandData, 0, 20);
}
MyThread::~MyThread()
{
pthread_mutex_destroy(&counter_mutex_map);
pthread_mutex_destroy(&counter_mutex_que);
}
void MyThread::setMonitorFd(int value) //设置监视器fd
{
m_monitorFd = value;
}
int MyThread::getMonitorFd()
{
return m_monitorFd;
}
void MyThread::setCliToMonitorFd(int value) //设置监视器监视的客户端fd
{
m_cliToMoniFd = value;
}
int MyThread::getCliToMonitorFd()
{
return m_cliToMoniFd;
}
void MyThread::thread_subReactorRun() //执行子reactor模式
{
pthread_create(&mtid_subReactor, nullptr, pthread_subReactor, (void *)"start subReadctor.");
}
void MyThread::thread_dealDataRun() //开始处理数据
{
pthread_create(&mtid_dealData, nullptr, pthread_dealData, (void *)"start dealData.");
}
void MyThread::thread_monitorRun() //运行监视器
{
pthread_create(&mtid_minotor, nullptr, pthread_monitor, (void *)"start monitor.");
}
void *MyThread::pthread_subReactor(void *arg) //reactor事务
{
cout << (char *)arg << endl;
while(1)
{
if(!QueueOperation::queEvsIsEmpty())
{
struct epoll_event events = QueueOperation::queEvsPop();
Epoll_serv::handleAccept(&events);
Epoll_serv::handleRead(&events);
}
else
usleep(10);
}
}
void *MyThread::pthread_dealData(void *arg) //数据处理事务
{
cout << (char *)arg << " " << endl;
while(1)
{
int deal_flag = false;
string str;
int str_fd = 0;
//使用线程池时,加锁,竞争获取资源
pthread_mutex_lock(&counter_mutex_que);
if(!QueueOperation::queStrIsEmpty())
{
str = QueueOperation::queStrPop();
str_fd = QueueOperation::queEvFdPop();
deal_flag = true;
}
else
usleep(10);
pthread_mutex_unlock(&counter_mutex_que);
if(deal_flag == true)
{
switch(checkData(str.data()))
{
case 'C':
{
deCode_Command(str.data() + 2, (char *)&m_commandData, 20);
dataEvent_Command();
break;
}
case 'D':
{
SaveData::s_sysInfo sysInfo_t;
deCode_Data(str.data() + 2, sysInfo_t); //解析数据
dataEvent_Data(str_fd, sysInfo_t); //执行操作
break;
}
default:
{
perror("checkData error!");
break;
}
}
}
}
}
void *MyThread::pthread_monitor(void *arg) //监视器事务
{
cout << (char *)arg << endl;
while(1)
{
if(m_monitorFd != 1)
{
memset(m_toMonitorData, 0, 100);
sysInfoToBuff(m_toMonitorData, 100);
pthread_mutex_lock(&counter_mutex_map);
m_toMonitorData[0]--; //删除$
if(m_cliToMoniFd != -1)
{
listenCliDataToBuff(m_toMonitorData, 100, SaveData::m_saveData[m_cliToMoniFd].s_sysinfo);
}
else
{
SaveData::s_sysInfo sysInfo_t;
sysInfo_t.m_cpuRate = 0;
sysInfo_t.m_memoryUse = 0;
sysInfo_t.m_memoryTotal = 0;
listenCliDataToBuff(m_toMonitorData, 100, sysInfo_t);
}
m_toMonitorData[0]--;
wholeRate2Buff(m_toMonitorData, 100, c_cpuScale, c_memScale);
m_toMonitorData[0]--;
int fdMax = SaveData::m_saveData.size();
fdMinMax(m_toMonitorData, 100, fdMax);
pthread_mutex_unlock(&counter_mutex_map);
write(m_monitorFd, m_toMonitorData, strlen(m_toMonitorData));
}
else
sleep(1);
//全局表读取
pthread_mutex_lock(&counter_mutex_map);
int i = 0;
c_cpuScale = 0;
c_memScale = 0;
for(map::iterator iter = SaveData::m_saveData.begin();
iter != SaveData::m_saveData.end();iter++,i++)
{
//begin()和end()返回只读迭代器,该迭代器指向map中的第一/最后一对
SaveData::s_saveData value = iter->second;
if(value.s_sysinfo.m_cpuRate > 70)
{
c_cpuScale++;
}
if(value.s_sysinfo.m_memoryUse > 6000)
{
c_memScale++;
}
}
if(i != 0)
{
c_cpuScale = c_cpuScale * 100 / i;
c_memScale = c_memScale * 1 / i;
}
cout.setf(ios::left);
cout << setw(8) << "cliNum:" << setw(6) << SaveData::m_saveData.size()
<< ",moniListenFd:" << setw(4) << m_cliToMoniFd << endl;
pthread_mutex_unlock(&counter_mutex_map);
}
}
void MyThread::dataEvent_Command()
{
int temp = 0;
int type = 0;
if (m_commandData[0] >= '0' && m_commandData[0] <= '9')
type = 1;
else type = 2;
if (type == 1)//获取某个客户端数据
{
for (int i = 0; m_commandData[i] != '|'; i++) //'|' is end
{
temp = temp * 10 + m_commandData[i] - '0';
}
pthread_mutex_lock(&counter_mutex_map);
map::iterator iterMin = SaveData::m_saveData.begin();
int minFd = iterMin->first;
//设置需要获取数据的客户端fd
map::iterator iter =
SaveData::m_saveData.find(temp + minFd);
if (iter != SaveData::m_saveData.end())
{
m_cliToMoniFd = temp + minFd;
//printf("m_cliToMoniFd = %d\n", m_cliToMoniFd);
}
pthread_mutex_unlock(&counter_mutex_map);
temp = 0;
}
if (type == 2)//执行命令,将'|'改为'\0'即可
{
for (int i = 0; ; i++) //'|' is end
{
if (m_commandData[i] == '|')
{
m_commandData[i] = '\0';
break;
}
}
//执行相关命令
//......
}
}
void MyThread::dataEvent_Data(int str_fd, SaveData::s_sysInfo &sysData_t)
{
//全局表写入
pthread_mutex_lock(&counter_mutex_map);
SaveData::m_saveData[str_fd].s_sysinfo = sysData_t;
pthread_mutex_unlock(&counter_mutex_map);
}
C10K_client.pro
TEMPLATE = app
CONFIG += console c++11
CONFIG -= app_bundle
CONFIG -= qt
LIBS += -lpthread
SOURCES += \
main.cpp \
wrap.cpp \
sysinfo.cpp \
buff.cpp \
connection.cpp
HEADERS += \
wrap.h \
sysinfo.h \
buff.h \
commoninc.h \
connection.h
commoninc.h
#ifndef COMMONINC_H
#define COMMONINC_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#endif // COMMONINC_H
buff.h
#ifndef BUFF_H
#define BUFF_H
//data经过处理赋值给buff
void data2Buff(char *data, int len, int k);
#endif // BUFF_H
buff.cpp
#include "buff.h"
#include "sysinfo.h"
void data2Buff(char *data, int len, int k)
{
char buf_temp[10] = {0};
int n = 0;
int j = 2;
if(k == 0) //k=0使用真实数据
{
SysInfo::captureMemory();
SysInfo::captureCpuRate(); //获取CPU使用率需要sleep(1)
n = sprintf(buf_temp, "%d", SysInfo::getCpuRate());
for(int i = 0;i < 10 && i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
n = sprintf(buf_temp, "%d", SysInfo::getMemoryUse());
for(int i = 0;i < 10 && i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
n = sprintf(buf_temp, "%d", SysInfo::getMemoryTotal());
for(int i = 0;i < 10 && i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
data[j++] = '$'; //结束标志
data[0] = static_cast(j);
data[1] = 'D';
}
else //k!=0使用模拟数据
{
n = sprintf(buf_temp, "%d", 35 + (k % 35));
for(int i = 0;i < 10 &&i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
n = sprintf(buf_temp, "%d", (4000 + (k % 3000)));
for(int i = 0;i < 10 && i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
n = sprintf(buf_temp, "%d", 8000);
for(int i = 0;i < 10 && i < n;i++)
{
data[j++] = buf_temp[i];
}
data[j++] = '|';
data[j++] = '$';
data[0] = static_cast(j);
data[1] = 'D';
}
//长度检查
if(j > len)
{
perror("data buff out of range!\n");
}
}
connection.h
#ifndef CONNECTION_H
#define CONNECTION_H
#include "commoninc.h"
#include "sysinfo.h"
#include "buff.h"
#include "wrap.h"
using namespace std;
//m_buff使用数组实现
#define MAXLINE 100
class Connection
{
public:
Connection(const char *servIP, const int port);
~Connection();
void connectServer(int cliNum); //模拟cliNum个客户端连接
void connectLoop(); //循环发送消息
private:
struct sockaddr_in m_servaddr; //服务器地址结构体
vectorm_sockFdArray; //保存client的fd
char m_buff[MAXLINE]; //buff
int m_cliNum; //客户端最大连接数
};
#endif // CONNECTION_H
connection.cpp
#include "connection.h"
Connection::Connection(const char *servIP, const int port)
{
bzero(&m_servaddr, sizeof(m_servaddr));
m_servaddr.sin_family = AF_INET;
inet_pton(AF_INET, servIP, &m_servaddr.sin_addr);
m_servaddr.sin_port = htons(port);
m_cliNum = 0;
m_sockFdArray.clear();
}
Connection::~Connection()
{
//close fd
for(int i = 0;i < m_cliNum;i++)
{
if(m_sockFdArray[i] != 0)
{
close(m_sockFdArray[i]);
}
}
}
void Connection::connectServer(int cliNum)
{
m_cliNum = cliNum;
m_sockFdArray.reserve(static_cast(m_cliNum)); //尝试为指定数量的元素预分配足够的内存
for(int i = 0;i < m_cliNum;i++)
{
m_sockFdArray[i] = 0;
}
//模拟m_cliNum个客户端
int sockfd = 0;
for(int i = 0;i < m_cliNum;i++)
{
sockfd = Socket(AF_INET,SOCK_STREAM,0);
Connect(sockfd, reinterpret_cast(&m_servaddr),sizeof(m_servaddr));
m_sockFdArray[i] = sockfd;
printf("i = %d\n",i);
//连接成功,服务器反馈信息
char receive_serv[15] = {0};
Read(sockfd, receive_serv,10);
printf("received = %d\n",receive_serv[0]);
printf("%s\n",receive_serv + 1);
}
printf("********************\n");
}
void Connection::connectLoop()
{
while(1)
{
for(int i = 0;i < m_cliNum;i++)
{
memset(m_buff, 0, MAXLINE);
data2Buff(m_buff, MAXLINE, i);
Write(m_sockFdArray[i], m_buff, sizeof(m_buff));
}
printf("sleep!\n");
}
}
sysinfo.h
#ifndef SYSINFO_H
#define SYSINFO_H
#include "commoninc.h"
class SysInfo
{
public:
typedef struct cpu_info
{
char name[8];
unsigned long user;
unsigned long nice;
unsigned long system;
unsigned long idle; //等待时间
unsigned long irq; //中断
unsigned long iowait;
unsigned long softirq; //软中断
}CPUInfo;
static void captureCpuRate();
static void captureMemory();
static int getCpuRate();
static int getMemoryUse();
static int getMemoryTotal();
private:
static int m_cpuRate;
static int m_memoryUse;
static int m_memoryTotal;
static unsigned long getCpuTotalTime(CPUInfo *m_cpuinfo);
static void displayCpuInfo(CPUInfo *m_cpuinfo);
static int getCpuInfo(CPUInfo *m_cpuinfo);
static unsigned long getCpuUsageRate();
};
#endif // SYSINFO_H
sysinfo.cpp
#include "sysinfo.h"
int SysInfo::m_cpuRate = 0;
int SysInfo::m_memoryTotal = 0;
int SysInfo::m_memoryUse = 0;
unsigned long SysInfo::getCpuTotalTime(CPUInfo *m_cpuinfo)
{
return m_cpuinfo->user + m_cpuinfo->system + m_cpuinfo->nice + m_cpuinfo->idle +
m_cpuinfo->iowait + m_cpuinfo->irq + m_cpuinfo->softirq;
}
void SysInfo::displayCpuInfo(CPUInfo *m_cpuinfo)
{
printf("%s %lu %lu %lu %lu %lu %lu %lu\n", m_cpuinfo->name, m_cpuinfo->user,
m_cpuinfo->nice, m_cpuinfo->system, m_cpuinfo->idle, m_cpuinfo->iowait,
m_cpuinfo->irq, m_cpuinfo->softirq);
}
int SysInfo::getCpuInfo(CPUInfo *m_cpuinfo)
{
FILE *fp = fopen("/proc/stat","r");
char buf[1024];
fgets(buf, sizeof(buf), fp);
sscanf(buf,"%s %lu %lu %lu %lu %lu %lu %lu",
m_cpuinfo->name, &m_cpuinfo->user, &m_cpuinfo->nice,&m_cpuinfo->system,
&m_cpuinfo->idle, &m_cpuinfo->iowait, &m_cpuinfo->irq,&m_cpuinfo->softirq);
fclose(fp);
return 0;
}
unsigned long SysInfo::getCpuUsageRate()
{
CPUInfo cpuInfo1;
CPUInfo cpuInfo2;
memset(&cpuInfo1, 0, sizeof(cpuInfo1));
memset(&cpuInfo2, 0, sizeof(cpuInfo2));
getCpuInfo(&cpuInfo1);
sleep(1);
getCpuInfo(&cpuInfo2);
unsigned long m_totalTime = getCpuTotalTime(&cpuInfo1);
unsigned long m_idleTime = cpuInfo2.idle - cpuInfo1.idle;
if(m_totalTime == 0)
{
return 0;
}
unsigned long usage = (m_totalTime - m_idleTime)*100 / m_totalTime;
printf("CPU use:%lu\n", usage);
return usage;
}
void SysInfo::captureCpuRate()
{
m_cpuRate = static_cast(getCpuUsageRate());
}
void SysInfo::captureMemory()
{
//获取内存信息
struct sysinfo info;
sysinfo(&info);
m_memoryTotal = static_cast(info.totalram/1024/1024);
m_memoryUse = static_cast((info.totalram - info.freeram)/1024/1024);
}
int SysInfo::getCpuRate()
{
return m_cpuRate;
}
int SysInfo::getMemoryUse()
{
return m_memoryUse;
}
int SysInfo::getMemoryTotal()
{
return m_memoryTotal;
}
main.cpp
#include
#include "connection.h"
using namespace std;
int main()
{
Connection connection("127.0.0.1", 8000);
connection.connectServer(10000);
connection.connectLoop();
return 0;
}
wrap.h
#ifndef WRAP_H
#define WRAP_H
#include "commoninc.h"
#ifdef __cplusplus
extern "C"
{
#endif
extern void perr_exit(const char *s);
extern int Accept(int fd, struct sockaddr *sa, socklen_t *salenptr);
extern void Bind(int fd, const struct sockaddr *sa, socklen_t salen);
extern void Connect(int fd, const struct sockaddr *sa, socklen_t salen);
extern void Listen(int fd, int backlog);
extern int Socket(int family, int type, int protocol);
extern ssize_t Read(int fd, void *ptr, size_t nbytes);
extern ssize_t Write(int fd, const void *ptr, size_t nbytes);
extern void Close(int fd);
extern ssize_t Readline(int fd, void *vptr, size_t maxlen);
extern ssize_t Readn(int fd, void *vptr, size_t n);
extern ssize_t Writen(int fd, const void *vptr, size_t n);
//static ssize_t my_read(int fd, char *ptr);
#ifdef __cplusplus
}
#endif
#endif // WRAP_H
wrap.cpp
#include "wrap.h"
#ifdef __cplusplus
extern "C"
{
#endif
//static ssize_t my_read(int fd, char *ptr);
void perr_exit(const char *s)
{
perror(s);
exit(1);
}
int Accept(int fd, struct sockaddr *sa, socklen_t *salenptr)
{
int n;
again:
if ( (n = accept(fd, sa, salenptr)) < 0 )
{
/*******这两种情况,重新发送**********
ECONNABORTED 意外关闭套接字
EINTR 被信号打断
************************************/
if ((errno == ECONNABORTED) || (errno == EINTR))
goto again;
else
perr_exit("accept error");
}
return n;
}
void Bind(int fd, const struct sockaddr *sa, socklen_t salen)
{
if (bind(fd, sa, salen) < 0)
perr_exit("bind error");
}
void Connect(int fd, const struct sockaddr *sa, socklen_t salen)
{
if (connect(fd, sa, salen) < 0)
perr_exit("connect error");
}
void Listen(int fd, int backlog)
{
if (listen(fd, backlog) < 0)
perr_exit("listen error");
}
int Socket(int family, int type, int protocol)
{
int n;
if ( (n = socket(family, type, protocol)) < 0)
perr_exit("socket error");
return n;
}
ssize_t Read(int fd, void *ptr, size_t nbytes)
{
ssize_t n;
again:
if ( (n = read(fd, ptr, nbytes)) == -1)
{
if (errno == EINTR)
goto again;
else
return -1;
}
return n;
}
ssize_t Write(int fd, const void *ptr, size_t nbytes)
{
ssize_t n;
again:
if ( (n = write(fd, ptr, nbytes)) == -1) {
if (errno == EINTR)
goto again;
else
return -1;
}
return n;
}
void Close(int fd)
{
if (close(fd) == -1)
perr_exit("close error");
}
//读取n个字符
ssize_t Readn(int fd, void *vptr, size_t n)
{
size_t nleft;
ssize_t nread;
char *ptr;
ptr = (char *)vptr;
nleft = n;
while (nleft > 0)
{
if ( (nread = read(fd, ptr, nleft)) < 0)
{
if (errno == EINTR)
nread = 0;
else
return -1;
}
else if (nread == 0)
break;
nleft -= nread;
ptr += nread;
}
return n - nleft;
}
//写入n个字符
ssize_t Writen(int fd, const void *vptr, size_t n)
{
size_t nleft;
ssize_t nwritten;
const char *ptr;
ptr = (char *)vptr;
nleft = n;
while (nleft > 0)
{
if ( (nwritten = write(fd, ptr, nleft)) <= 0)
{
if (nwritten < 0 && errno == EINTR)
nwritten = 0;
else
return -1;
}
nleft -= nwritten;
ptr += nwritten;
}
return n;
}
//读取1个字符
static ssize_t my_read(int fd, char *ptr)
{
static int read_cnt = 0;
static char *read_ptr;
static char read_buf[100];
if (read_cnt <= 0)
{
again:
if ( (read_cnt = read(fd, read_buf, sizeof(read_buf))) < 0)
{
if (errno == EINTR)
{
goto again;
}
return -1;
}
else if (read_cnt == 0)
{
return 0;
}
read_ptr = read_buf;
}
read_cnt--;
*ptr = *read_ptr++;
return 1;
}
ssize_t Readline(int fd, void *vptr, size_t maxlen)
{
ssize_t n, rc;
char c, *ptr;
ptr = (char *)vptr;
for (n = 1; n < maxlen; n++)
{
if ( (rc = my_read(fd, &c)) == 1)
{
*ptr++ = c;
if (c == '\n')
break;
}
else if (rc == 0)
{
*ptr = 0;
return n - 1;
}
else
return -1;
}
*ptr = 0;
return n;
}
#ifdef __cplusplus
}
#endif