本实例支持单个GET和POST请求的分布式压测。
已经验证GET部分,POST部分未测试。
另外在分配线程池时,需要注意本机的资源大小,不要分配太大,否则容易崩溃。
遗留的问题,
- 部分代码需要重构,让逻辑更为清晰
- 加入日志系统支持,去掉std::cerr输出。
- 支持多请求,就是在请求之间传递参数。目前暂时不想支持了。
程序目录结构如下,
程序代码如下,
CMakeLists.txt
cmake_minimum_required(VERSION 2.6)
if(APPLE)
message(STATUS "This is Apple, do nothing.")
elseif(UNIX)
message(STATUS "This is linux, set CMAKE_PREFIX_PATH.")
set(CMAKE_PREFIX_PATH /vcpkg/ports/cppwork/vcpkg_installed/x64-linux/share)
endif(APPLE)
project(perf_tool)
add_definitions(-std=c++14)
add_definitions(-g)
set(BOOST_DIR /usr/local/Cellar/boost/1.76.0)
find_package(Boost REQUIRED COMPONENTS
system
filesystem
serialization
program_options
coroutine
thread
)
find_package(crossguid REQUIRED)
find_package(ZLIB)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set INCLUDE_DIRS")
set(INCLUDE_DIRS ${Boost_INCLUDE_DIRS} ${CMAKE_CURRENT_SOURCE_DIR}/../../include /usr/local/include ${CMAKE_CURRENT_SOURCE_DIR}/../../ ${CMAKE_CURRENT_SOURCE_DIR}/utils)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set INCLUDE_DIRS")
set(INCLUDE_DIRS ${Boost_INCLUDE_DIRS} /usr/local/include
${CMAKE_CURRENT_SOURCE_DIR}/
${CMAKE_CURRENT_SOURCE_DIR}/../../include ${CMAKE_CURRENT_SOURCE_DIR}/utils)
endif(APPLE)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set LINK_DIRS")
set(LINK_DIRS /usr/local/lib /usr/local/iODBC/lib /opt/snowflake/snowflakeodbc/lib/universal)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set LINK_DIRS")
set(LINK_DIRS ${Boost_INCLUDE_DIRS} /usr/local/lib /vcpkg/ports/cppwork/vcpkg_installed/x64-linux/lib)
endif(APPLE)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set ODBC_LIBS")
set(ODBC_LIBS iodbc iodbcinst)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set LINK_DIRS")
set(ODBC_LIBS odbc odbcinst ltdl)
endif(APPLE)
include_directories(${INCLUDE_DIRS})
LINK_DIRECTORIES(${LINK_DIRS})
set(main_file_list "${CMAKE_CURRENT_SOURCE_DIR}/server.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/master.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/slave.cpp")
file( GLOB APP_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/tasks/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/*.h ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.h ${CMAKE_CURRENT_SOURCE_DIR}/tasks/*.h ${CMAKE_CURRENT_SOURCE_DIR}/../../include/http/impl/*.cpp)
list(REMOVE_ITEM APP_SOURCES ${main_file_list})
foreach( main_file ${main_file_list} )
file(RELATIVE_PATH filename ${CMAKE_CURRENT_SOURCE_DIR} ${main_file})
string(REPLACE ".cpp" "" file ${filename})
add_executable(${file} ${APP_SOURCES} ${main_file})
target_link_libraries(${file} ${Boost_LIBRARIES} ZLIB::ZLIB crossguid pystring)
target_link_libraries(${file} dl ssl crypto pthread)
endforeach( main_file ${main_file_list} )
chat_client.h
#ifndef _FREDRIC_CHAT_CLIENT_H_
#define _FREDRIC_CHAT_CLIENT_H_
#include "chat_message.h"
#include
#include
#include
#include
#include
#include
using boost::asio::ip::tcp;
using chat_message_queue = std::deque;
class chat_client {
public:
chat_client(boost::asio::io_service& io_service,
tcp::resolver::iterator endpoint_iterator)
: io_service_(io_service), socket_(io_service), work_(io_service) {
do_connect(endpoint_iterator);
}
void write(const chat_message& msg) {
// write是由主线程往子线程写东西
// 所以需要使用post提交到子线程运行
// 使得所有io操作都由io_service的子线程掌握
io_service_.post([this, msg]() {
bool write_in_progress = !write_msgs_.empty();
write_msgs_.push_back(msg);
if (!write_in_progress) {
do_write();
}
});
}
void close() {
io_service_.post([this]() { socket_.close(); work_.get_io_context().stop(); });
}
protected:
void do_connect(tcp::resolver::iterator endpoint_iterator) {
boost::asio::spawn(
socket_.get_executor(),
[this, endpoint_iterator](boost::asio::yield_context yield) {
boost::system::error_code conn_ec;
boost::asio::async_connect(socket_, endpoint_iterator,
yield[conn_ec]);
if (!conn_ec) {
do_read_header_and_body();
}
});
}
void do_read_header_and_body() {
boost::asio::spawn(
socket_.get_executor(), [this](boost::asio::yield_context yield) {
while (true) {
boost::system::error_code header_ec;
boost::asio::async_read(
socket_,
boost::asio::buffer(read_msg_.data(),
chat_message::header_length),
yield[header_ec]);
if (!header_ec && read_msg_.decode_header()) {
boost::system::error_code body_ec;
// 如果没有错误,并且Decode_header成功,成功读取到body_length
boost::asio::async_read(
socket_,
boost::asio::buffer(read_msg_.body(),
read_msg_.body_length()),
yield[body_ec]);
bool stop = receive_msg(body_ec);
if(stop) {
close();
break;
}
} else {
// 读取失败时关闭与服务端的连接,退出事件循环
socket_.close();
}
}
});
}
json to_json() {
std::string buffer(read_msg_.body(),
read_msg_.body() + read_msg_.body_length());
std::stringstream ss(buffer);
auto json_obj = json::parse(ss.str());
return json_obj;
}
virtual bool receive_msg(const boost::system::error_code& ec) = 0;
// 向服务端真正发送消息的函数
void do_write() {
boost::asio::spawn(
socket_.get_executor(), [this](boost::asio::yield_context yield) {
boost::system::error_code ec;
boost::asio::async_write(
socket_,
boost::asio::buffer(write_msgs_.front().data(),
write_msgs_.front().length()),
yield[ec]);
if (!ec) {
// 一直写直到写完
write_msgs_.pop_front();
if (!write_msgs_.empty()) {
do_write();
}
} else {
socket_.close();
}
});
}
// 注意使用了引用类型,
// io_service对象的生命周期必须要大于chat_client对象的生命周期
// 否则会出现引用失效,导致异常
boost::asio::io_service& io_service_;
tcp::socket socket_;
chat_message read_msg_;
boost::asio::io_service::work work_;
chat_message_queue write_msgs_;
};
bool parse_and_send_a_message(chat_client& c,
const std::string& input_msg_str) {
chat_message msg;
auto type = 0;
std::string output;
if (parseMessage(input_msg_str, &type, output)) {
msg.setMessage(type, output.data(), output.size());
c.write(msg);
return true;
} else {
std::cerr << "Parse message error!" << std::endl;
return false;
}
}
#endif
master.cpp
#include "chat_client.h"
#include "utils/cfg_reader.h"
#include "tasks/task_reader.h"
#include "tasks/task_summary.h"
int slave_count = 0;
std::map master_cfg {};
class master : public chat_client {
public:
master(boost::asio::io_service& io_service,
tcp::resolver::iterator endpoint_iterator)
: chat_client(io_service, endpoint_iterator) {}
private:
std::vector task_result_strs;
bool receive_msg(const boost::system::error_code& ec) {
// 有ec 直接return true退出
if (ec) {
socket_.close();
return true;
}
// 校验一下消息长度和消息类型,
// 证明确实发过来的是RomInformation消息
if (read_msg_.type() != MT_SEND_TASK_INFO_MSG) {
return false;
}
auto json_obj = to_json();
auto task_result_s = json_obj["information"].get();
std::cout << "slave ";
std::cout << json_obj["name"].get();
std::cout << " says: ";
std::cout << task_result_s;
std::cout << "\n";
task_result_strs.push_back(task_result_s);
// 还没到那么多slave个数,不用计算,return false,接着等
++receive_slave_cout;
if (receive_slave_cout != slave_count) {
return false;
}
// Add summary result logic
// 汇总计算结果
std::cerr << "开始汇总计算性能测试结果" << std::endl;
task_summary t_summary{task_result_strs};
std::cerr << "结束汇总计算性能测试结果" << std::endl;
std::cerr << "性能结果汇总: " << std::endl;
t_summary.dump_summary_results();
close();
return true;
}
int receive_slave_cout{0};
};
int main(int argc, char* argv[]) {
try {
bool read_res = cfg_reader::read_file(master_cfg_file_path, master_cfg);
if(!read_res) {
std::cerr << "读取配置文件失败!" << std::endl;
return 1;
}
slave_count = std::atoi(master_cfg["slave"].c_str());
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
auto endpoint_iterator = resolver.resolve({master_cfg["host"], master_cfg["port"]});
auto c = std::make_shared(io_service, endpoint_iterator);
std::thread t([&io_service]() { io_service.run(); });
auto type = 0;
std::string master_name = master_cfg["name"];
std::string bind_name_cmd = "BindName " + master_name;
auto total_count = std::atoi(master_cfg["total_count"].c_str());
auto thread_count = total_count / slave_count;
auto stress_hold_on_time = std::atoi(master_cfg["stress_hold_on_time"].c_str());
task task_{thread_count, stress_hold_on_time};
task_reader tr {task_};
auto task_str = tr.parse_tasks(cases_file_path);
std::string launch_task_str = "LaunchTask " + task_str;
std::string msgs_[] = {bind_name_cmd, launch_task_str};
for (const auto& msg_str : msgs_) {
parse_and_send_a_message(*c, msg_str);
}
t.join();
} catch (std::exception& ex) {
std::cerr << "Exception: " << ex.what() << std::endl;
}
return 0;
}
slave.cpp
#include "chat_client.h"
#include "utils/cfg_reader.h"
#include "utils/guid_gen.h"
#include "tasks/task_launcher.h"
std::map slave_cfg {};
class slave : public chat_client {
public:
slave(boost::asio::io_service& io_service,
tcp::resolver::iterator endpoint_iterator)
: chat_client(io_service, endpoint_iterator) {}
private:
bool receive_msg(const boost::system::error_code& ec) {
// 有ec return true 退出
if (ec) {
socket_.close();
return true;
}
// 没有ec 消息不对, 接着等
if (read_msg_.type() != MT_LAUNCH_TASK_MSG) {
return false;
}
// 没有ec,消息正确,做事儿,return true 退出
// 启动性能测试,完事以后发送
// send_task_info_msg
auto json_obj = to_json();
std::cout << "master ";
std::cout << json_obj["name"].get();
std::cout << " says: ";
std::cout << json_obj["information"].get();
std::cout << "\n";
std::cerr << "开始做性能测试..." << std::endl;
auto task_info = json_obj["information"].get();
task_launcher launcher {task_info};
launcher.dump_requests();
launcher.run();
auto task_run_results_ = launcher.dump_results();
std::cerr << "结束做性能测试..." << std::endl;
chat_message msg;
auto type = 0;
std::string input("SendTaskInfo ");
input = input + task_run_results_;
std::string output;
if (parseMessage(input, &type, output)) {
msg.setMessage(type, output.data(), output.size());
write(msg);
}
close();
return true;
}
};
int main(int argc, char* argv[]) {
try {
bool read_res = cfg_reader::read_file(slave_cfg_file_path, slave_cfg);
if(!read_res) {
std::cerr << "Read slave config file failed!" << std::endl;
return 1;
}
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
auto endpoint_iterator = resolver.resolve({slave_cfg["host"], slave_cfg["port"]});
auto c = std::make_shared(io_service, endpoint_iterator);
std::string slave_name_prefix = slave_cfg["name"];
std::string guid_str = GUIDGen::gen_uuid_str();
std::string slave_name = slave_name_prefix + "_";
slave_name = slave_name + guid_str;
std::string input = "BindName " + slave_name;
parse_and_send_a_message(*c, input);
std::thread t([&io_service]() { io_service.run(); });
t.join();
} catch (std::exception& ex) {
std::cerr << "Exception: " << ex.what() << std::endl;
}
return 0;
}
server.cpp
#include "utils/chat_message.h"
#include "utils/cfg_reader.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using boost::asio::ip::tcp;
using chat_message_queue = std::deque;
class chat_session;
using chat_session_ptr = std::shared_ptr;
std::string master_name = "";
std::map server_cfg {};
// 聊天室类的声明
class chat_room {
public:
chat_room(boost::asio::io_service& io_service_): m_strand(io_service_) {}
void join(chat_session_ptr);
void leave(chat_session_ptr);
void deliver(const chat_message&);
void deliver_to(const chat_message&, const std::string& paticipant_name);
private:
boost::asio::io_service::strand m_strand;
std::set participants_;
};
class chat_session : public std::enable_shared_from_this {
public:
chat_session(tcp::socket socket, chat_room& room)
: socket_(std::move(socket)), room_(room){
}
void start() {
room_.join(shared_from_this());
// 使用协程 同时读取头和body
read_header_and_body();
}
void deliver(const chat_message& msg) {
bool write_in_progress = !write_msgs_.empty();
write_msgs_.push_back(msg);
// 为了保护do_write线程里面的deque,避免两个线程同时写
if (!write_in_progress) {
do_write();
}
}
std::string& get_client_name() { return m_name; }
private:
void read_header_and_body() {
auto self(shared_from_this());
boost::asio::spawn(
socket_.get_executor(),
[this, self](boost::asio::yield_context yield) {
while (true) {
boost::system::error_code ec_header;
boost::asio::async_read(
socket_,
boost::asio::buffer(read_msg_.data(),
chat_message::header_length),
yield[ec_header]);
if (!ec_header & read_msg_.decode_header()) {
boost::system::error_code ec_body;
// 在这里read body
boost::asio::async_read(
socket_,
boost::asio::buffer(read_msg_.body(),
read_msg_.body_length()),
yield[ec_body]);
// 如果读取消息成功,没有error
if (!ec_body) {
// 调用各个Session的Deliver message
// 将消息发给对应的client
// room_.deliver(read_msg_);
handleMessage();
} else {
room_.leave(shared_from_this());
}
} else {
room_.leave(shared_from_this());
}
}
});
}
json to_json() {
std::string buffer(read_msg_.body(),
read_msg_.body() + read_msg_.body_length());
std::cout << "raw message server: " << buffer << std::endl;
std::stringstream ss(buffer);
json json_obj;
try {
json_obj = json::parse(ss.str());
} catch (std::exception& ex) {
std::cerr << "解析 json对象 失败!!" << std::endl;
std::cerr << ex.what() << std::endl;
}
return json_obj;
}
// 处理接收到的客户端的消息的函数
void handleMessage() {
// master 和 slave都会发这个,注册自己的名字
if (read_msg_.type() == MT_BIND_NAME) {
auto json_obj = to_json();
m_name = json_obj["name"].get();
std::cerr << "Bind Name: " << m_name << std::endl;
// 只有master会发launch task message
} else if (read_msg_.type() == MT_LAUNCH_TASK_MSG) {
master_name = m_name;
std::cerr << "MT_LAUNCH_TASK_MSG: " << std::endl;
std::cerr << "Master name: " << master_name << std::endl;
auto json_obj = to_json();
m_chatInformation = json_obj["information"].get();
auto rinfo = buildRoomInfo();
chat_message msg;
msg.setMessage(MT_LAUNCH_TASK_MSG, rinfo);
room_.deliver(msg);
// 所有slave执行完性能测试之后,都会发这个消息
} else if (read_msg_.type() == MT_SEND_TASK_INFO_MSG) {
std::cerr << "send task info" << std::endl;
std::cerr << "Master name in task info: " << master_name
<< std::endl;
auto json_obj = to_json();
m_chatInformation = json_obj["information"].get();
auto rinfo = buildRoomInfo();
chat_message msg;
msg.setMessage(MT_SEND_TASK_INFO_MSG, rinfo);
room_.deliver_to(msg, master_name);
} else {
// 不可用消息,啥也不做
}
}
// 构建一个RoomInformation信息
std::string buildRoomInfo() const {
json msg_body;
msg_body["name"] = m_name;
msg_body["information"] = m_chatInformation;
std::string msg_body_str = msg_body.dump();
std::cout << "Room info: " << msg_body_str << std::endl;
return std::move(msg_body_str);
}
void do_write() {
auto self(shared_from_this());
boost::asio::spawn(
socket_.get_executor(),
[this, self](boost::asio::yield_context yield) {
boost::system::error_code ec;
boost::asio::async_write(
socket_,
boost::asio::buffer(write_msgs_.front().data(),
write_msgs_.front().length()),
yield[ec]);
if (!ec) {
write_msgs_.pop_front();
// 如果还有得写,就接着写
if (!write_msgs_.empty()) {
do_write();
}
} else {
room_.leave(shared_from_this());
}
});
}
tcp::socket socket_;
// room的生命周期必须长于session的生命周期,
// 否则会因为持有无效的引用而翻车
chat_room& room_;
chat_message read_msg_;
chat_message_queue write_msgs_;
std::string m_name; // 客户端姓名
std::string m_chatInformation; // 客户端当前的消息
};
void chat_room::join(chat_session_ptr participant) {
m_strand.post([this, participant] {
// 不需要广播历史消息这里
participants_.insert(participant);
});
}
void chat_room::leave(chat_session_ptr participant) {
m_strand.post([this, participant]{
participants_.erase(participant);
});
}
// 消息分发函数
void chat_room::deliver(const chat_message& msg) {
m_strand.post([this, msg] {
// 给每个群聊参与者群发消息
for (auto& participant : participants_) {
participant->deliver(msg);
}
});
}
void chat_room::deliver_to(const chat_message& msg,
const std::string& paticipant_name) {
m_strand.post([this, msg, paticipant_name] {
// 给每个群聊参与者群发消息
for (auto& participant : participants_) {
if (participant->get_client_name() == paticipant_name) {
participant->deliver(msg);
}
}
});
}
class chat_server {
public:
chat_server(boost::asio::io_service& io_service,
const tcp::endpoint& endpoint)
: acceptor_(io_service, endpoint), socket_(boost::asio::make_strand(io_service)), room_(io_service) {
do_accept();
}
// 接收来自客户端的连接的函数
void do_accept() {
boost::asio::spawn(
socket_.get_executor(), [this](boost::asio::yield_context yield) {
while (true) {
boost::system::error_code ec;
acceptor_.async_accept(socket_, yield[ec]);
if (!ec) {
auto session = std::make_shared(
std::move(socket_), room_);
session->start();
}
}
});
}
private:
tcp::acceptor acceptor_;
tcp::socket socket_;
chat_room room_;
};
int main(int argc, char* argv[]) {
try {
bool read_res = cfg_reader::read_file(server_cfg_file_path, server_cfg);
if(!read_res) {
std::cerr << "Read server config file failed! " << std::endl;
return 1;
}
boost::asio::io_service io_service;
tcp::endpoint endpoint(tcp::v4(), std::atoi(server_cfg["port"].c_str()));
chat_server server{io_service, endpoint};
io_service.run();
} catch (std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
return 0;
}
utils/cfg_reader.h
#ifndef _FREDRIC_CFG_READER_H_
#define _FREDRIC_CFG_READER_H_
#include
utils/cfg_reader.cpp
#include "cfg_reader.h"
#include
#include
#include
bool parse_line(const std::string& str, std::pair& pair_) {
auto pos = str.find_first_of(":");
// 消息中没找到 :
if(pos == std::string::npos) {
return false;
}
if(pos == 0) {
return false;
}
auto first_ = str.substr(0, pos);
auto second_ = str.substr(pos + 1);
auto first_t_ = pystring::strip(first_);
auto second_t_ = pystring::strip(second_);
pair_.first = first_t_;
pair_.second = second_t_;
return true;
}
bool cfg_reader::read_file(const std::string& file_name_, std::map& cfg_) {
std::fstream fs {file_name_};
if(!fs.is_open()) {
std::cerr << "open file failed!" << std::endl;
return false;
}
std::string cfg_line;
while(std::getline(fs, cfg_line)) {
std::pair pair_{};
if(parse_line(cfg_line, pair_)) {
cfg_.insert(std::move(pair_));
}
}
fs.close();
return true;
}
utils/chat_message.h
#ifndef _CHAT_MESSAGE_H_
#define _CHAT_MESSAGE_H_
#include "parse_msg.h"
#include "const.h"
#include
#include
#include
#include
#include
#include
class chat_message {
public:
// Header的大小变为8个字节,使用sizeof关键字进行计算
enum { header_length = sizeof(Header) };
enum { max_body_length = MAX_BODY_LEN};
chat_message() {
data_ = std::shared_ptr(new char[header_length+max_body_length], std::default_delete());
}
// 这里返回的data不可以修改
const char* data() const { return data_.get(); }
char* data() { return data_.get(); }
// 计算总长度时,需要通过m_header获取到bodySize
std::size_t length() const { return header_length + m_header.bodySize; }
// body为 data_往后面移动 head_length个字节
const char* body() const { return data_.get() + header_length; }
char* body() { return data_.get() + header_length; }
int type() const { return m_header.type; }
std::size_t body_length() const { return m_header.bodySize; }
void setMessage(int messageType, const void* buffer, size_t bufferSize) {
// 确认body大小未超过限制
assert(bufferSize < max_body_length);
m_header.bodySize = bufferSize;
m_header.type = messageType;
std::memcpy(body(), buffer, bufferSize);
std::memcpy(data(), &m_header, sizeof(m_header));
char* body_ = body();
std::cerr << "set message body=" << body_ << std::endl;
}
void setMessage(int messageType, const std::string& buffer) {
setMessage(messageType, buffer.data(), buffer.size());
}
bool decode_header() {
std::memcpy(&m_header, data(), header_length);
if(m_header.bodySize > max_body_length) {
std::cout <<"body size: " << m_header.bodySize << " header type:" << m_header.type << std::endl;
return false;
}
return true;
}
private:
std::shared_ptr data_;
Header m_header;
};
#endif
utils/const.h
#ifndef _FREDRIC_CONST_H_
#define _FREDRIC_CONST_H_
#include
const int MIN_TIME_BOUND = 180000;
const int MAX_TIME_BOUND = -1;
const int MAX_BODY_LEN = 655360;
const std::string server_cfg_file_path = "../conf/server.conf";
const std::string master_cfg_file_path = "../conf/master.conf";
const std::string slave_cfg_file_path = "../conf/slave.conf";
const std::string cases_file_path = "../cases/single_test.json";
#endif
utils/guid_gen.h
#ifndef _FREDRIC_GUID_GEN_H_
#define _FREDRIC_GUID_GEN_H_
#include
#include
struct GUIDGen {
static std::string gen_uuid_str();
static xg::Guid from_str(const std::string& src_str);
static xg::Guid gen_guid();
};
#endif
utils/guid_gen.cpp
#include "utils/guid_gen.h"
std::string GUIDGen::gen_uuid_str() {
return xg::newGuid().str();
}
xg::Guid GUIDGen::from_str(const std::string& src_str) {
xg::Guid g(src_str);
return std::move(g);
}
xg::Guid GUIDGen::gen_guid() {
return xg::newGuid();
}
utils/parse_msg.h
#ifndef _FREDRIC_PARSE_MSG_H_
#define _FREDRIC_PARSE_MSG_H_
#include "json/json.hpp"
#include
#include
#include
#include
#include
using json = nlohmann::json;
struct Header {
int bodySize; // 包体大小
int type; // 消息类型
};
enum MessageType {
MT_BIND_NAME = 1,
MT_LAUNCH_TASK_MSG = 2,
MT_SEND_TASK_INFO_MSG = 3,
};
bool parseMessage(const std::string& input, int* type, std::string& outbuffer);
#endif
utils/parse_msg.cpp
#include "parse_msg.h"
#include "const.h"
#include
// 消息解析函数
// input 输入的消息字符串
// type 传出的消息类型指针
// outbuffer 输出的用于发送的消息内容字符串
bool parseMessage(const std::string& input, int* type, std::string& outbuffer) {
auto pos = input.find_first_of(" ");
// 消息中没找到空格
if(pos == std::string::npos) {
return false;
}
if(pos == 0) {
return false;
}
auto command = input.substr(0, pos);
// Bind姓名消息
if(command == "BindName") {
std::string name = input.substr(pos+1);
if(name.size() > MAX_BODY_LEN) {
std::cerr << "姓名的长度大于" << MAX_BODY_LEN << "个字节!" << std::endl;
return false;
}
if(type) {
*type = MT_BIND_NAME;
}
json msg_body;
msg_body["name"] = name;
outbuffer = msg_body.dump();
return true;
// 聊天消息
}else if(command == "LaunchTask") {
std::string task = input.substr(pos+1);
if(task.size() > MAX_BODY_LEN) {
std::cerr << "消息的长度大于" << MAX_BODY_LEN << "个字节!" << std::endl;
return false;
}
json msg_body;
msg_body["information"] = task;
outbuffer = msg_body.dump();
if(type) {
*type = MT_LAUNCH_TASK_MSG;
}
return true;
} else if(command == "SendTaskInfo") {
std::string task_res = input.substr(pos+1);
if(task_res.size() > MAX_BODY_LEN) {
std::cerr << "消息的长度大于" << MAX_BODY_LEN << "个字节!" << std::endl;
return false;
}
json msg_body;
msg_body["information"] = task_res;
outbuffer = msg_body.dump();
if(type) {
*type = MT_SEND_TASK_INFO_MSG;
}
return true;
}
// 不支持的消息类型,返回false
return false;
}
tasks/request.h
#ifndef _FREDRIC_REQUEST_H_
#define _FREDRIC_REQUEST_H_
#include "json/json.hpp"
#include
tasks/task.h
#ifndef _FREDRIC_TASK_H_
#define _FREDRIC_TASK_H_
struct task {
int thread_count{}; // Single node threads count
int stress_hold_on_time{0}; // how many times do we want to hold for the requests
};
#endif
tasks/task_launcher.h
#ifndef _FREDRIC_TASK_LAUNCHER_H_
#define _FREDRIC_TASK_LAUNCHER_H_
#include "tasks/task.h"
#include "tasks/request.h"
struct task_launcher {
task_launcher(const std::string& task_info);
void run();
std::string dump_results();
void dump_requests();
int get_per_thread_sleep_mills(int thread_count_, int stress_hold_on_time_);
void run_a_request(request& req_value, req_result& req_res);
private:
requests reqs_;
results results_;
multi_requests multi_reqs_; // TODO: 后面做multi_reqs,现在不做,因为涉及到中间变量的保存问题
task task_;
};
#endif
tasks/task_launcher.cpp
#include "tasks/task_launcher.h"
#include
#include
#include
#include
#include
#include
#include "http/http_util.h"
#include "utils/const.h"
#include "tasks/url_parse.h"
using json = nlohmann::json;
task_launcher::task_launcher(const std::string& task_info) {
auto task_info_js = json::parse(task_info);
auto thread_count_ = task_info_js["thread_count"].get();
auto stress_hold_on_time_ = task_info_js["stress_hold_on_time"].get();
task_.thread_count = thread_count_;
task_.stress_hold_on_time = stress_hold_on_time_;
auto requests_ = task_info_js["requests"];
if (requests_.find("single") != requests_.end()) {
auto single_reqs_js = requests_["single"];
for (auto &&req_it = single_reqs_js.begin(), end = single_reqs_js.end();
req_it != end; ++req_it) {
auto key = req_it.key();
auto value = req_it.value();
auto method_str = value["method"].get();
request::request_method method_;
if (method_str == "GET") {
method_ = request::request_method::GET;
} else if (method_str == "POST") {
method_ = request::request_method::POST;
} else {
throw std::runtime_error("Not supported request method type!");
}
auto url = value["url"].get();
auto body = value["body"].get();
auto headers =
value["headers"].get>();
request tmp_req_{url, method_, body, headers};
reqs_.single_reqs_[key] = tmp_req_;
}
}
if (requests_.find("multiple") != requests_.end()) {
// TODO: Add multiple requests parse logic
}
}
int task_launcher::get_per_thread_sleep_mills(int thread_count_,
int stress_hold_on_time_) {
if (stress_hold_on_time_ == 0) {
return 0;
}
int per_thread_sleep_time_ =
(int)(((float)(stress_hold_on_time_ * 1000) / (float)thread_count_));
return per_thread_sleep_time_;
}
void task_launcher::run_a_request(request& req_value, req_result& req_res) {
auto url = req_value.url;
auto method = req_value.req_method;
auto body = req_value.body;
auto headers = req_value.headers;
std::string res{};
url_parse parse_{url};
parse_.parse_host_and_path();
auto host = parse_.get_host();
auto path = parse_.get_path();
if(path == "") {
path = "/";
}
auto start = std::chrono::system_clock::now();
bool r_res {false};
if (method == request::request_method::GET) {
r_res = HttpUtil::get_str(host, path, headers, res);
} else if(method == request::request_method::POST) {
r_res = HttpUtil::post_and_get_str(host, path, headers, body, res);
}
auto end = std::chrono::system_clock::now();
auto duration = std::chrono::duration_cast(end - start).count();
req_res.request_time = duration;
req_res.is_failed = (!r_res);
}
void task_launcher::run() {
auto thread_count_ = (std::size_t)task_.thread_count;
auto stress_hold_on_time_ = task_.stress_hold_on_time;
std::cerr << "Stress hold on time: " << stress_hold_on_time_ << std::endl;
auto sleep_mill_secs =
get_per_thread_sleep_mills(thread_count_, stress_hold_on_time_);
std::cerr << "Sleep milliseconds: " << sleep_mill_secs << std::endl;
std::vector req_results;
for(int i=0; i 0) {
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_mill_secs));
}
boost::asio::post(pool, [this, &req_value, &req_results, i](){
run_a_request(req_value, req_results[i]);
});
}
pool.join();
auto end = std::chrono::system_clock::now();
int min_time{MIN_TIME_BOUND};
int max_time{MAX_TIME_BOUND};
int avg_time{0};
int through_out{0};
int total_success_count {0};
for(auto&& req_res: req_results) {
if(!req_res.is_failed) {
++total_success_count;
if(min_time > req_res.request_time) {
min_time = req_res.request_time;
}
if(max_time < req_res.request_time) {
max_time = req_res.request_time;
}
avg_time += req_res.request_time;
}
}
avg_time = (int)((float)avg_time/(float)total_success_count);
if(min_time == MIN_TIME_BOUND || max_time == MAX_TIME_BOUND) {
std::cerr << "All requests failed for request: [" << req_name << "]" << std::endl;
}
auto total_time_ = std::chrono::duration_cast(end-start).count();
through_out = (int)((double)total_success_count/((double)total_time_/(double)1000));
perf_result p_result{req_value, min_time, max_time, avg_time, through_out};
results_.single_results_[req_name] = p_result;
}
// TODO: Handle multiple requests
for (auto& multi_req_ : multi_reqs_.multi_reqs_) {
}
}
// results object to json method
void to_json(json& j, const results& p) {
json single_res_j{};
for (auto&& item : p.single_results_) {
auto request_name = item.first;
auto perf_value = item.second;
single_res_j[request_name]["url"] = perf_value.req_.url;
if (perf_value.req_.req_method == request::request_method::GET) {
single_res_j[request_name]["method"] = "GET";
} else if (perf_value.req_.req_method ==
request::request_method::POST) {
single_res_j[request_name]["method"] = "POST";
}
single_res_j[request_name]["body"] = perf_value.req_.body;
single_res_j[request_name]["headers"] = perf_value.req_.headers;
single_res_j[request_name]["min_request_time"] =
perf_value.min_request_time;
single_res_j[request_name]["max_request_time"] =
perf_value.max_request_time;
single_res_j[request_name]["avg_request_time"] =
perf_value.avg_request_time;
single_res_j[request_name]["through_out"] = perf_value.through_out;
}
j["requests"]["single"] = single_res_j;
}
// This one will be used in client task sender.
std::string task_launcher::dump_results() {
json j = results_;
return j.dump();
}
void task_launcher::dump_requests() {
std::stringstream ss;
if (reqs_.single_reqs_.size() != 0) {
ss << "Single Requests: " << std::endl;
for (auto&& request_item : reqs_.single_reqs_) {
auto req_name = request_item.first;
auto req_value = request_item.second;
ss << "Request Name: " << req_name << std::endl;
ss << "URL: " << req_value.url << std::endl;
if (req_value.req_method == request::request_method::GET) {
ss << "Method: "
<< "GET" << std::endl;
} else if (req_value.req_method == request::request_method::POST) {
ss << "Method: "
<< "POST" << std::endl;
}
ss << "Body: " << req_value.body << std::endl;
ss << "Headers: " << std::endl;
for (auto&& head_item : req_value.headers) {
ss << " " << head_item.first << " " << head_item.second
<< std::endl;
}
ss << "Thread Count: " << task_.thread_count << std::endl;
ss << "Stress hold on time: " << task_.stress_hold_on_time << std::endl;
}
}
if (multi_reqs_.multi_reqs_.size() != 0) {
ss << "Multiple Requests: " << std::endl;
// TODO: Add multiple requests dump logic
}
std::cout << ss.str();
}
tasks/task_reader.h
#ifndef _FREDRIC_TASK_HANDLER_H_
#define _FREDRIC_TASK_HANDLER_H_
#include "tasks/task.h"
#include
struct task_reader {
task_reader(task t_);
std::string parse_tasks(const std::string& task_file_path);
private:
task task_;
};
#endif
tasks/task_reader.cpp
#include "tasks/task_reader.h"
#include "json/json.hpp"
#include
#include
#include
using json = nlohmann::json;
task_reader::task_reader(task t_): task_(t_) {
}
std::string task_reader::parse_tasks(const std::string& task_file_path) {
std::fstream fs(task_file_path);
if(!fs.is_open()) {
std::cerr << "Open file [" << task_file_path << "] failed" << std::endl;
return "";
}
std::stringstream buffer;
buffer << fs.rdbuf();
std::string cases_str = buffer.str();
auto case_js = json::parse(cases_str);
case_js["thread_count"] = task_.thread_count;
case_js["stress_hold_on_time"] = task_.stress_hold_on_time;
return std::move(case_js.dump());
}
tasks/task_summary.h
#ifndef _FREDRIC_TASK_SUMMARY_H_
#define _FREDRIC_TASK_SUMMARY_H_
#include "tasks/request.h"
#include
#include
struct task_summary {
task_summary(const std::vector& task_infos);
void dump_summary_results();
private:
std::vector all_results_;
json summary_j {};
};
#endif
tasks/task_summary.cpp
#include "tasks/task_summary.h"
#include "utils/const.h"
#include
task_summary::task_summary(const std::vector& task_infos) {
json results_j {};
for(auto&& task_info: task_infos) {
auto task_info_js = json::parse(task_info);
all_results_.push_back(task_info_js);
}
auto i = 0;
int min_req_time = MIN_TIME_BOUND;
int max_req_time = MAX_TIME_BOUND;
for(auto&& task_info_js: all_results_) {
auto requests_js = task_info_js["requests"]["single"];
for(auto &&req_it = requests_js.begin(), end = requests_js.end();
req_it != end; ++req_it) {
auto req_name = req_it.key();
auto req_value = req_it.value();
if(req_value["min_request_time"].get() < min_req_time) {
min_req_time = req_value["min_request_time"].get();
results_j[req_name]["min_request_time"] = min_req_time;
}
if(req_value["max_request_time"].get() > max_req_time) {
max_req_time = req_value["max_request_time"].get();
results_j[req_name]["max_request_time"] = max_req_time;
}
// 第一次, 赋值results_j
if(i == 0) {
results_j[req_name]["url"] = req_value["url"].get();
results_j[req_name]["method"] = req_value["method"].get();
results_j[req_name]["body"] = req_value["body"].get();
results_j[req_name]["headers"] = req_value["headers"].get>();
results_j[req_name]["avg_request_time"] = req_value["avg_request_time"].get();
results_j[req_name]["through_out"] = req_value["through_out"].get();
// 不是第一次,直接用 += 运算符
} else {
auto old_avg = results_j[req_name]["avg_request_time"].get();
results_j[req_name]["avg_request_time"] = old_avg + req_value["avg_request_time"].get();
auto old_through_out = results_j[req_name]["through_out"].get();
results_j[req_name]["through_out"] = old_through_out + req_value["through_out"].get();
}
}
++i;
}
for(auto &&req_it = results_j.begin(), end = results_j.end();
req_it != end; ++req_it) {
auto req_name = req_it.key();
auto avg_req_time = results_j[req_name]["avg_request_time"].get();
results_j[req_name]["avg_request_time"] = avg_req_time/i;
}
summary_j["requests"]["single"] = results_j;
}
void task_summary::dump_summary_results() {
std::string res = summary_j.dump();
std::cerr << res << std::endl;
}
tasks/url_parse.h
#ifndef _FREDRIC_URL_PARSE_H_
#define _FREDRIC_URL_PARSE_H_
#include
// URL 解析器,将URL解析成host和path两个部分
struct url_parse {
url_parse(const std::string& url);
void parse_host_and_path();
std::string get_host() const;
std::string get_path() const;
private:
std::string url_;
std::string host_;
std::string path_;
};
#endif
tasks/url_parse.cpp
#include "tasks/url_parse.h"
url_parse::url_parse(const std::string& url): url_(url) {
}
void url_parse::parse_host_and_path() {
auto host_index = this->url_.find("://");
host_index = host_index + 3;
auto host_and_path_str = this->url_.substr(host_index);
auto host_end_index = host_and_path_str.find_first_of('/');
if(host_end_index == std::string::npos) {
host_ = host_and_path_str;
path_ = "";
return;
}
host_ = host_and_path_str.substr(0, host_end_index);
path_ = host_and_path_str.substr(host_end_index);
}
std::string url_parse::get_host() const {
return host_;
}
std::string url_parse::get_path() const {
return path_;
}
test/CMakeLists.txt
cmake_minimum_required(VERSION 2.6)
if(APPLE)
message(STATUS "This is Apple, set boost dir.")
set(BOOST_DIR /usr/local/Cellar/boost/1.76.0)
elseif(UNIX)
message(STATUS "This is linux, set CMAKE_PREFIX_PATH.")
set(CMAKE_PREFIX_PATH /vcpkg/ports/cppwork/vcpkg_installed/x64-linux/share)
endif(APPLE)
project(yaml)
add_definitions(-std=c++14)
add_definitions(-g)
find_package(ZLIB)
find_package(OpenCV REQUIRED )
find_package(crossguid REQUIRED)
find_package(Boost REQUIRED COMPONENTS
system
filesystem
serialization
program_options
thread
coroutine
)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set INCLUDE_DIRS")
set(INCLUDE_DIRS ${Boost_INCLUDE_DIRS} /usr/local/include /usr/local/iODBC/include /opt/snowflake/snowflakeodbc/include/ ${CMAKE_CURRENT_SOURCE_DIR}/../ ${CMAKE_CURRENT_SOURCE_DIR}/../../../include)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set INCLUDE_DIRS")
set(INCLUDE_DIRS ${Boost_INCLUDE_DIRS} /usr/local/include ${CMAKE_CURRENT_SOURCE_DIR}/../../ ${CMAKE_CURRENT_SOURCE_DIR}/../../../include ${CMAKE_CURRENT_SOURCE_DIR}/../)
endif(APPLE)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set LINK_DIRS")
set(LINK_DIRS /usr/local/lib /usr/local/iODBC/lib /opt/snowflake/snowflakeodbc/lib/universal)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set LINK_DIRS")
set(LINK_DIRS ${Boost_INCLUDE_DIRS} /usr/local/lib /vcpkg/ports/cppwork/vcpkg_installed/x64-linux/lib)
endif(APPLE)
if(APPLE)
MESSAGE(STATUS "This is APPLE, set ODBC_LIBS")
set(ODBC_LIBS iodbc iodbcinst)
elseif(UNIX)
MESSAGE(STATUS "This is linux, set LINK_DIRS")
set(ODBC_LIBS odbc odbcinst ltdl)
endif(APPLE)
include_directories(${INCLUDE_DIRS})
LINK_DIRECTORIES(${LINK_DIRS})
set(test_file_list ${CMAKE_CURRENT_SOURCE_DIR}/cfg_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/guid_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/tasks_test.cpp)
file( GLOB APP_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/../utils/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../utils/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../../include/http/impl/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../../include/img_util/impl/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../tasks/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../tasks/*.h)
foreach( sourcefile ${test_file_list} )
file(RELATIVE_PATH filename ${CMAKE_CURRENT_SOURCE_DIR} ${sourcefile})
string(REPLACE ".cpp" "" file ${filename})
add_executable(${file} ${APP_SOURCES})
target_link_libraries(${file} ${Boost_LIBRARIES} ZLIB::ZLIB ${OpenCV_LIBS})
target_link_libraries(${file} dl ssl crypto libgtest.a libgtest_main.a pystring libyaml-cpp.a libgmock.a crossguid ${ODBC_LIBS} libnanodbc.a pthread)
endforeach( sourcefile ${test_file_list})
test/cfg_test.cpp
#include "utils/cfg_reader.h"
#include
GTEST_TEST(CfgTests, CfgReaderTest) {
std::map cfg_;
auto res = cfg_reader::read_file("../../conf/master.conf", cfg_);
ASSERT_TRUE(res);
ASSERT_EQ(6, cfg_.size());
}
test/guid_test.cpp
#include "utils/guid_gen.h"
#include
GTEST_TEST(GuidTests, GenUUIDStr) {
auto uuid_str = GUIDGen::gen_uuid_str();
std::cerr << uuid_str << std::endl;
ASSERT_NE("", uuid_str);
}
test/tasks_test.cpp
#include "tasks/task_reader.h"
#include "tasks/task_launcher.h"
#include "tasks/url_parse.h"
#include
std::string task_file_path = "../../cases/single_test.json";
GTEST_TEST(TasksTests, ReaderTest) {
task t {10, 5};
auto reader = task_reader(t);
auto task_res_str = reader.parse_tasks(task_file_path);
ASSERT_NE("", task_res_str);
}
GTEST_TEST(TasksTests, LauncherDumpRequests) {
auto task_info = R"({"requests":{"single":{"BingRequest":{"body":"","headers":{"Content-Type":"text/html"},"method":"GET","url":"https://cn.bing.com"}}},"stress_hold_on_time":30,"thread_count":50})";
task_launcher launcher {task_info};
launcher.dump_requests();
ASSERT_TRUE(true);
}
GTEST_TEST(TasksTests, UrlParseTest) {
url_parse parse_ {"https://cn.bing.com"};
parse_.parse_host_and_path();
std::cerr << "URL: " << parse_.get_host() << std::endl;
std::cerr << "Path: " << parse_.get_path() << std::endl;
ASSERT_EQ("cn.bing.com", parse_.get_host());
ASSERT_EQ("", parse_.get_path());
url_parse parse1_ {"https://www.baidu.com/"};
parse1_.parse_host_and_path();
std::cerr << "URL: " << parse1_.get_host() << std::endl;
std::cerr << "Path: " << parse1_.get_path() << std::endl;
ASSERT_EQ("www.baidu.com", parse1_.get_host());
ASSERT_EQ("/", parse1_.get_path());
url_parse parse2_ {"https://www.appannie.com/v2/query"};
parse2_.parse_host_and_path();
std::cerr << "URL: " << parse2_.get_host() << std::endl;
std::cerr << "Path: " << parse2_.get_path() << std::endl;
ASSERT_EQ("www.appannie.com", parse2_.get_host());
ASSERT_EQ("/v2/query", parse2_.get_path());
}
GTEST_TEST(TasksTests, GetPerThreadSleepMills) {
auto task_info = R"({"requests":{"single":{"BingRequest":{"body":"","headers":{"Content-Type":"text/html"},"method":"GET","url":"https://cn.bing.com"}}},"stress_hold_on_time":30,"thread_count":50})";
task_launcher launcher_{task_info};
auto per_mills = launcher_.get_per_thread_sleep_mills(30, 5);
ASSERT_EQ(166, per_mills);
auto per_mills_zero = launcher_.get_per_thread_sleep_mills(30, 0);
ASSERT_EQ(0, per_mills_zero);
}
GTEST_TEST(TasksTests, TestRunOneRequest) {
auto task_info = R"({"requests":{"single":{"BingRequest":{"body":"","headers":{"Content-Type":"text/html"},"method":"GET","url":"https://cn.bing.com"}}},"stress_hold_on_time":30,"thread_count":50})";
task_launcher launcher_{task_info};
request req_value {"https://cn.bing.com", request::request_method::GET, "", {{"Content-Type", "text/html"}}};
req_result req_res{0, false};
launcher_.run_a_request(req_value, req_res);
ASSERT_TRUE(!req_res.is_failed);
}
GTEST_TEST(TasksTests, TestRunAndGetResults) {
auto task_info = R"({"requests":{"single":{"BingRequest":{"body":"","headers":{"Content-Type":"text/html"},"method":"GET","url":"https://cn.bing.com"}}},"stress_hold_on_time":0,"thread_count":50})";
task_launcher launcher_{task_info};
launcher_.dump_requests();
launcher_.run();
auto results_str = launcher_.dump_results();
std::cerr << results_str << std::endl;
ASSERT_TRUE(results_str.find("single") != std::string::npos);
}
conf/master.conf
name: master
host: localhost
port: 9099
total_count: 50
slave: 2
stress_hold_on_time: 0
conf/server.conf
port: 9099
conf/slave.conf
name: node
host: localhost
port: 9099
cases/single_test.json
{
"requests": {
"single": {
"BingRequest": {
"url": "https://cn.bing.com",
"method": "GET",
"body": "",
"headers": {
"Content-Type": "text/html"
}
}
}
}
}
程序输出如下,