同步 RPC 调用一直会阻塞直到从服务端获得一个应答。
class GRPC_SyncServer final : public ModemManager::Service
{
public:
Status GetInfo(ServerContext* context, const ModemManagerFilterMessage* request, ModemInfoMessage* reply);
Status GetModemDataStream(ServerContext* context, const ModemManagerFilterMessage* request, ServerWriter<ModemInfoMessage>* responder);
void RunServer(void);
};
里面包含要实现的rpc功能接口。
C++11的关键字final有两个用途:
C++11中允许将类标记为final,直接在类名称后面使用关键字final,继承该类会导致编译错误。
c++类禁止继承final
GRPC_SyncServer继承ModemManager::Service,咋一看觉得奇怪怎么还能继承呢?跟进源码上一看,原来继承ModemManager成员Service这个类并没有final字段修饰。
class ModemManager final {
public:
static constexpr char const* service_full_name() {
return "cater.ModemManager";
}
.........
class Service : public ::grpc::Service {
public:
Service();
virtual ~Service();
// sync reads
virtual ::grpc::Status GetInfo(::grpc::ServerContext* context, const ::cater::ModemManagerFilterMessage* request, ::cater::ModemInfoMessage* response);
// stream async updates
virtual ::grpc::Status GetModemDataStream(::grpc::ServerContext* context, const ::cater::ModemManagerFilterMessage* request, ::grpc::ServerWriter< ::cater::ModemInfoMessage>* writer);
};
添加constexpr关键字的表达式是在编译期执行,其他表达式在运行期间执行。
constexpr关键字的作用
virtual析构函数的作用
当定义了一个基类,并用来继承的。当其他类用到基类,其析构函数必须是虚函数,不然在调用基类析构函数的时候,不会去调用子类的析构函数,从而会造成内存泄漏。
virtual继承作用
当基类的函数为虚函数的时候,可以通过基类去访问子类的函数。
C++中Virtual的作用
全局作用域符号:当全局变量在局部函数中与其中某个变量重名,可用::来区分。
::grpc::Status
C++中双冒号::的作用浅析
ModemManagerFilterMessage和ModemInfoMessage,是根据proto文件定义的message生成的。
针对stream流式rpc需要通过ServerWriter来进行数据更新。
/************************************************************************
**函数:GRPC_SyncServerStart
**功能:运行SyncServer服务
**参数:无
**返回:无
************************************************************************/
void GRPC_SyncServerStart()
{
GRPC_SyncServer server;
server.RunServer();
}
/************************************************************************
**函数:get_file_contents
**功能:读取文件中的内容
**参数:[in] fpath:文件路径
**返回:返回文件内容
************************************************************************/
std::string get_file_contents(const char *fpath)
{
std::ifstream finstream(fpath);
std::string contents;
contents.assign((std::istreambuf_iterator<char>(finstream)),
std::istreambuf_iterator<char>());
finstream.close();
return contents;
}
/************************************************************************
**函数:RunServer
**功能:创建并运行grpc服务器
**参数:无
**返回:无
************************************************************************/
void GRPC_SyncServer::RunServer(void) {
std::string server_address("localhost:50051");
GRPC_SyncServer service;
/* 构建并运行支持openssl双向验证服务器 */
auto clientcert = get_file_contents(clientcert_path);
auto servercert = get_file_contents(servercert_path);
auto serverkey = get_file_contents(serverkey_path);
grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp;
pkcp.private_key = serverkey;
pkcp.cert_chain = servercert;
grpc::SslServerCredentialsOptions ssl_opts;
ssl_opts.pem_root_certs = clientcert;
ssl_opts.pem_key_cert_pairs.push_back(pkcp);
std::shared_ptr<grpc::ServerCredentials> creds;
creds = grpc::SslServerCredentials(ssl_opts);
ServerBuilder builder;
builder.AddListeningPort(server_address, creds);
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "SyncServer listening on " << server_address << std::endl;
server->Wait();
}
构建完ssl证书、绑定好域名端口,即可注册同步服务。
异步rpc服务器,能够在不阻塞当前线程的情况下启动RPC。
文档基本概述
gRPC 的异步操作使用CompletionQueue。 基本工作流如下:
gRPC 官方文档中文版_V1.0 异步基础: C++
class GRPC_AsyncServer final {
public:
~GRPC_AsyncServer() {
server_->Shutdown();
// Always shutdown the completion queue after the server.
cq_->Shutdown();
}
void RunServer(void);
private:
// Class encompasing the state and logic needed to serve a request.
class CallData {
public:
// Take in the "service" instance (in this case representing an asynchronous
// server) and the completion queue "cq" used for asynchronous communication
// with the gRPC runtime.
CallData(ModemManager::AsyncService* service, ServerCompletionQueue* cq)
: service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
// Invoke the serving logic right away.
Proceed();
}
void Proceed();
private:
// The means of communication with the gRPC runtime for an asynchronous
// server.
ModemManager::AsyncService* service_;
// The producer-consumer queue where for asynchronous server notifications.
ServerCompletionQueue* cq_;
// Context for the rpc, allowing to tweak aspects of it such as the use
// of compression, authentication, as well as to send metadata back to the
// client.
ServerContext ctx_;
// What we get from the client.
ModemManagerFilterMessage request_;
// What we send back to the client.
ModemInfoMessage reply_;
// The means to get back to the client.
ServerAsyncWriter<ModemInfoMessage> responder_;
// Let's implement a tiny state machine with the following states.
enum CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_; // The current serving state.
};
// This can be run in multiple threads if needed.
void HandleRpcs();
std::unique_ptr<ServerCompletionQueue> cq_;
ModemManager::AsyncService service_;
std::unique_ptr<Server> server_;
};
创建个完成队列ServerCompletionQueue,并与rpc进行绑定。
通过CallData对象去维护每个RPC 的状态,并且使用这个对象的地址作为调用的唯一标签。
/************************************************************************
**函数:RunServer
**功能:创建并运行grpc服务器
**参数:无
**返回:无
************************************************************************/
void GRPC_AsyncServer::RunServer(void)
{
std::string server_address("localhost:50051");
auto clientcert = get_file_contents(clientcert_path); // for verifying clients
auto servercert = get_file_contents(servercert_path);
auto serverkey = get_file_contents(serverkey_path);
grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp;
pkcp.private_key = serverkey;
pkcp.cert_chain = servercert;
grpc::SslServerCredentialsOptions ssl_opts;
ssl_opts.pem_root_certs = clientcert;
ssl_opts.pem_key_cert_pairs.push_back(pkcp);
std::shared_ptr<grpc::ServerCredentials> creds;
creds = grpc::SslServerCredentials(ssl_opts);
//GRPC_SyncServer sync_service;
ServerBuilder builder;
// Listen on the given address with openssl authentication mechanism.
builder.AddListeningPort(server_address, creds);
// Register "service_" as the instance through which we'll communicate with
// clients. In this case it corresponds to an *asynchronous* service.
builder.RegisterService(&service_);
// Get hold of the completion queue used for the asynchronous communication
// with the gRPC runtime.
cq_ = builder.AddCompletionQueue();
// Finally assemble the server.
server_ = builder.BuildAndStart();
std::cout << "AsyncServer listening on " << server_address << std::endl;
// Proceed to the server's main loop.
HandleRpcs();
}
/************************************************************************
**函数:HandleRpcs
**功能:运行一个主循环去查询队列
**参数:无
**返回:无
************************************************************************/
void GRPC_AsyncServer::HandleRpcs() {
// Spawn a new CallData instance to serve new clients.
new CallData(&service_, cq_.get());
void* tag; // uniquely identifies a request.
bool ok;
while (true) {
// Block waiting to read the next event from the completion queue. The
// event is uniquely identified by its tag, which in this case is the
// memory address of a CallData instance.
// The return value of Next should always be checked. This return value
// tells us whether there is any kind of event or cq_ is shutting down.
GPR_ASSERT(cq_->Next(&tag, &ok));
GPR_ASSERT(ok);
static_cast<CallData*>(tag)->Proceed();
}
}
/************************************************************************
**函数:Proceed
**功能:rpc服务实现
**参数:无
**返回:无
************************************************************************/
void GRPC_AsyncServer::CallData::Proceed()
{
if (status_ == CREATE) {
// Make this instance progress to the PROCESS state.
status_ = PROCESS;
// As part of the initial CREATE state, we *request* that the system
// start processing SayHello requests. In this request, "this" acts are
// the tag uniquely identifying the request (so that different CallData
// instances can serve different requests concurrently), in this case
// the memory address of this CallData instance.
service_->RequestGetModemDataStream(&ctx_, &request_, &responder_, cq_, cq_,
this);
} else if (status_ == PROCESS) {
// Spawn a new CallData instance to serve new clients while we process
// the one for this CallData. The instance will deallocate itself as
// part of its FINISH state.
new CallData(service_, cq_);
// The actual processing.
do {
if (request_.modemmanagerfilter() == cater::HARDWARE_INFO) {
char *hard_version_buf = NULL;
hard_version_buf = get_hardware_version();
if (NULL == hard_version_buf) {
FK_TRACE_ERROR("get hardware version fail\n");
break;
}
reply_.mutable_hardwareinfovalue()->set_hardwareversion(hard_version_buf);
std::cout << reply_.mutable_hardwareinfovalue()->hardwareversion() << std::endl;
}
if (request_.modemmanagerfilter() == cater::SOFTWARE_INFO) {
char *soft_version_buf = NULL;
soft_version_buf = get_software_version();
if (NULL == soft_version_buf) {
FK_TRACE_ERROR("get software version fail\n");
break;
}
reply_.mutable_softwareinfovalue()->set_softwareversion(soft_version_buf);
std::cout << reply_.mutable_softwareinfovalue()->softwareversion() << std::endl;
}
} while(0);
responder_.Write(reply_, this);
// And we are done! Let the gRPC runtime know we've finished, using the
// memory address of this instance as the uniquely identifying tag for
// the event.
status_ = FINISH;
responder_.Finish(Status::OK, this);
} else {
GPR_ASSERT(status_ == FINISH);
// Once in the FINISH state, deallocate ourselves (CallData).
delete this;
}
}