/*
一面為網路新增層(或是Plugin,PluginExt,PluginV2),
一面把層輸出的張量都存到mBlobNameToTensor這個字典內
如果建構過程都正常,最後會回傳mBlobNameToTensor,否則回傳nullptr
*/
const IBlobNameToTensor* CaffeParser::parse(INetworkDefinition& network,
DataType weightType,
bool hasModel)
{
bool ok = true;
/*
這裡用到的mModel是在parse(INetworkDefinition&,DataType,bool)的caller:
parse(const char*,const char*, INetworkDefinition&,DataType)中
設定好的
*/
/*
CaffeWeightFactory::CaffeWeightFactory
定義於TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp
CaffeWeightFactory::CaffeWeightFactory(const trtcaffe::NetParameter& msg, DataType dataType, std::vector& tmpAllocs, bool isInitialized)
此類別用於獲取、做型別轉換或隨機生成 神經網路的權重(nvinfer1::Weights)
*/
CaffeWeightFactory weights(*mModel.get(), weightType, mTmpAllocs, hasModel);
/*
class BlobNameToTensor
定義於TensorRT/parsers/caffe/blobNameToTensor.h
其核心為mMap這個字典,用於把blob name對應到ITensor*
*/
mBlobNameToTensor = new (BlobNameToTensor);
// Get list of all available plugin creators
int numCreators = 0;
/*
getPluginRegistry
宣告於TensorRT/include/NvInferRuntimeCommon.h
extern "C" TENSORRTAPI nvinfer1::IPluginRegistry* getPluginRegistry();
回傳plugin registry
*/
/*
IPluginRegistry::getPluginCreatorList
宣告於TensorRT/include/NvInferRuntimeCommon.h
virtual IPluginCreator* const* getPluginCreatorList(int* numCreators) const noexcept = 0;
回傳所有註冊過的Plugin creators及其數量
如果沒有則回傳nullptr
*/
/*
解析plugin creator list並將mPluginRegistry設定好
mPluginRegistry的角色是將plugin的名字對應到nvinfer1::IPluginCreator*
*/
nvinfer1::IPluginCreator* const* tmpList = getPluginRegistry()->getPluginCreatorList(&numCreators);
//至此numCreators已被更新
for (int k = 0; k < numCreators; ++k)
{
if (!tmpList[k])
{
std::cout << "Plugin Creator for plugin " << k << " is a nullptr." << std::endl;
continue;
}
/*
IPluginCreator::getPluginName
宣告於TensorRT/include/NvInferRuntimeCommon.h
virtual const char* getPluginName() const TRTNOEXCEPT = 0;
Return the plugin name
*/
std::string pluginName = tmpList[k]->getPluginName();
//將該nvinfer1::IPluginCreator*存到mPluginRegistry這個字典裡
mPluginRegistry[pluginName] = tmpList[k];
}
/*
這裡用到的mDeploy是在parse(INetworkDefinition&,DataType,bool)的caller:
parse(const char*,const char*, INetworkDefinition&,DataType)中
設定好的
*/
//解析mDeploy的輸入,為network新增輸入,並將該ITensor存到mBlobNameToTensor這個數據結構裡
for (int i = 0; i < mDeploy->input_size(); i++)
{
Dims dims;
/*
INetworkDefinition::hasImplicitBatchDimension
宣告於TensorRT/include/NvInfer.h
virtual bool hasImplicitBatchDimension() const TRTNOEXCEPT = 0;
Query whether the network was created with an implicit batch dimension.
檢查該網路是否有隱含的維度?
*/
if (network.hasImplicitBatchDimension())
{
if (mDeploy->input_shape_size())
{
dims = DimsCHW{(int) mDeploy->input_shape().Get(i).dim().Get(1), (int) mDeploy->input_shape().Get(i).dim().Get(2), (int) mDeploy->input_shape().Get(i).dim().Get(3)};
}
else
{
// Deprecated, but still used in a lot of networks
//看不懂?
dims = DimsCHW{(int) mDeploy->input_dim().Get(i * 4 + 1), (int) mDeploy->input_dim().Get(i * 4 + 2), (int) mDeploy->input_dim().Get(i * 4 + 3)};
}
}
else
{
std::cout << "Warning, setting batch size to 1. Update the dimension after parsing due to using explicit batch size." << std::endl;
if (mDeploy->input_shape_size())
{
//顯式地將batch dimension指定為1
dims = DimsNCHW{1, (int) mDeploy->input_shape().Get(i).dim().Get(1), (int) mDeploy->input_shape().Get(i).dim().Get(2), (int) mDeploy->input_shape().Get(i).dim().Get(3)};
}
else
{
// Deprecated, but still used in a lot of networks
dims = DimsNCHW{1, (int) mDeploy->input_dim().Get(i * 4 + 1), (int) mDeploy->input_dim().Get(i * 4 + 2), (int) mDeploy->input_dim().Get(i * 4 + 3)};
}
}
/*
INetworkDefinition::addInput
宣告於TensorRT/include/NvInfer.h
virtual ITensor* addInput(const char* name, DataType type, Dims dimensions) TRTNOEXCEPT = 0;
二維圖像的末三維永遠是 {C,H,W}
三維圖像的末四維永遠是 {C,D,H,W}
return The new tensor or nullptr if there is an error.
*/
ITensor* tensor = network.addInput(mDeploy->input().Get(i).c_str(), DataType::kFLOAT, dims);
//將輸入的張量記錄於mBlobNameToTensor這個字典內
(*mBlobNameToTensor)[mDeploy->input().Get(i)] = tensor;
}
//解析mDeploy除輸入外的各層,為模型各層新建ITensor,並存到mBlobNameToTensor這個數據結構裡
for (int i = 0; i < mDeploy->layer_size() && ok; i++)
{
const trtcaffe::LayerParameter& layerMsg = mDeploy->layer(i);
/*
trtcaffe::TEST
enum Phase {
TRAIN = 0;
TEST = 1;
}
*/
//如果是測試階段就跳過這一層?
if (layerMsg.has_phase() && layerMsg.phase() == trtcaffe::TEST)
{
continue;
}
// If there is a inplace operation and the operation is
// modifying the input, emit an error as
//檢查該層是否有輸入同時扮演著輸出的角色,且為網路的輸入,如果有,則將ok設為false
for (int j = 0; ok && j < layerMsg.top_size(); ++j)
{
for (int k = 0; ok && k < layerMsg.bottom_size(); ++k)
{
//該層的第k個輸入即第j個輸出?
if (layerMsg.top().Get(j) == layerMsg.bottom().Get(k))
{
auto iter = mBlobNameToTensor->find(layerMsg.top().Get(j).c_str());
//該層為網路輸入
/*
ITensor::isNetworkInput
定義於TensorRT/include/NvInfer.h
檢查該張量是否為網路輸入
*/
if (iter != nullptr && iter->isNetworkInput())
{
ok = false;
std::cout << "TensorRT does not support in-place operations on input tensors in a prototxt file." << std::endl;
}
}
}
}
// If there is a pluginFactory provided, use layer name matching to handle the plugin construction
/*
如果該層是Plugin,則為網路新增Plugin(即該層),
並將該層的輸出張量更新到mBlobNameToTensor這個字典裡
*/
if (mPluginFactory && mPluginFactory->isPlugin(layerMsg.name().c_str()))
{
/*
Weights
定義於TensorRT/include/NvInferRuntime.h
An array of weights used as a layer parameter.
*/
/*
getAllWeights
定義於TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h
std::vector CaffeWeightFactory::getAllWeights(const std::string& layerName);
將某層的各blob的權重轉為DataType::kFLOAT型別,並放入一個向量後回傳
*/
std::vector<Weights> w = weights.getAllWeights(layerMsg.name());
/*
class IPlugin
定義於TensorRT/include/NvInferRuntime.h
Plugin class for user-implemented layers.
Plugins are a mechanism for applications to implement custom layers. Each plugin is owned by the application, and its lifetime
must span any use of it by TensorRT
*/
/*
IPluginFactory::createPlugin
宣告於TensorRT/include/NvCaffeParser.h
virtual nvinfer1::IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights) TRTNOEXCEPT = 0;
Creates a plugin.
*/
/*
std::vector::empty
bool empty() const noexcept;
Test whether vector is empty
*/
IPlugin* plugin = mPluginFactory->createPlugin(layerMsg.name().c_str(), w.empty() ? nullptr : &w[0], w.size());
/*
ITensor
定義於TensorRT/include/NvInfer.h
A tensor in a network definition.
*/
//設定該層的輸入inputs
std::vector<ITensor*> inputs;
for (int i = 0, n = layerMsg.bottom_size(); i < n; i++)
{
/*
該層的輸入張量必為網路輸入或前一層的輸出,
所以一定存在於mBlobNameToTensor這個字典中
*/
inputs.push_back((*mBlobNameToTensor)[layerMsg.bottom(i)]);
}
/*
nvcaffeparser1::IPluginFactoryExt
定義於TensorRT/include/NvCaffeParser.h
為IPluginFactory的子類別
Plugin factory used to configure plugins with added support for TRT versioning
*/
/*
nvcaffeparser1::IPluginFactoryExt::isPluginExt
A user implemented function that determines if a layer configuration is provided by an IPluginExt.
*/
//將mPluginFactory由IPluginFactory*轉型為IPluginFactoryExt*
//可以由上往下轉?
bool isExt = mPluginFactoryIsExt && static_cast<IPluginFactoryExt*>(mPluginFactory)->isPluginExt(layerMsg.name().c_str());
/*
nvinfer1::ILayer
定義於TensorRT/include/NvInfer.h
Base class for all layer classes in a network definition.
*/
/*
namespace::INetworkDefinition::addPlugin
定義於TensorRT/include/NvInfer.h
TRT_DEPRECATED virtual IPluginLayer* addPlugin(
ITensor* const* inputs, int nbInputs, IPlugin& plugin) TRTNOEXCEPT = 0;
Add a plugin layer to the network.
*/
/*
IPluginExt
定義於TensorRT/include/NvInferRuntime.h
IPlugin的子類別
*/
// *static_cast(plugin):由父類別轉為子類別?
//為其輸入添加Plugin
ILayer* layer = isExt ? network.addPluginExt(&inputs[0], int(inputs.size()), *static_cast<IPluginExt*>(plugin))
: network.addPlugin(&inputs[0], int(inputs.size()), *plugin);
/*
nvinfer1::ILayer::setName
定義於TensorRT/include/NvInfer.h
virtual void setName(const char* name) TRTNOEXCEPT = 0;
Set the name of a layer.
*/
layer->setName(layerMsg.name().c_str());
//plugin的輸出個數要等於layerMsg裡指定的輸出個數才合理
if (plugin->getNbOutputs() != layerMsg.top_size())
{
std::cout << "Plugin layer output count is not equal to caffe output count" << std::endl;
ok = false;
}
//把該層輸出張量的資訊更新到mBlobNameToTensor這個字典裡
for (int i = 0, n = std::min(layer->getNbOutputs(), layerMsg.top_size()); i < n; i++)
{
(*mBlobNameToTensor)[layerMsg.top(i)] = layer->getOutput(i);
}
//為何到這邊才檢查layer是否為空?
if (layer == nullptr)
{
std::cout << "error parsing layer type " << layerMsg.type() << " index " << i << std::endl;
ok = false;
}
//如果做了mPluginFactory的部份,則略過下面getInferLibVersion() >= 5000的部份
continue;
}
/*
getInferLibVersion
宣告於TensorRT/include/NvInferRuntimeCommon.h
extern "C" TENSORRTAPI int getInferLibVersion();
Return the library version number.
The format is as for TENSORRT_VERSION: (TENSORRT_MAJOR * 1000) + (TENSORRT_MINOR * 100) + TENSOR_PATCH.
*/
if (getInferLibVersion() >= 5000)
{
/*
如果該層是PluginV2,則為網路新增PluginV2(即該層),
並將該層的輸出張量更新到mBlobNameToTensor這個字典裡
*/
if (mPluginFactoryV2 && mPluginFactoryV2->isPluginV2(layerMsg.name().c_str()))
{
//不能同時使用IPluginFactory及IPluginFactoryV2
if (mPluginFactory)
{
/*
RETURN_AND_LOG_ERROR
定義於TensorRT/parsers/caffe/caffeMacros.h
輸出錯誤訊息message,並回傳nullptr
*/
RETURN_AND_LOG_ERROR(nullptr, "Both IPluginFactory and IPluginFactoryV2 are set. If using TensorRT 5.0 or later, switch to IPluginFactoryV2");
}
std::vector<Weights> w = weights.getAllWeights(layerMsg.name());
nvinfer1::IPluginV2* plugin = mPluginFactoryV2->createPlugin(layerMsg.name().c_str(), w.empty() ? nullptr : &w[0], w.size(), mPluginNamespace.c_str());
std::vector<ITensor*> inputs;
for (int i = 0, n = layerMsg.bottom_size(); i < n; i++)
{
inputs.push_back((*mBlobNameToTensor)[layerMsg.bottom(i)]);
}
//V2不再需要區分isExt了
ILayer* layer = network.addPluginV2(&inputs[0], int(inputs.size()), *plugin);
layer->setName(layerMsg.name().c_str());
if (plugin->getNbOutputs() != layerMsg.top_size())
{
std::cout << "Plugin layer output count is not equal to caffe output count" << std::endl;
ok = false;
}
for (int i = 0, n = std::min(layer->getNbOutputs(), layerMsg.top_size()); i < n; i++)
{
(*mBlobNameToTensor)[layerMsg.top(i)] = layer->getOutput(i);
}
if (layer == nullptr)
{
std::cout << "error parsing layer type " << layerMsg.type() << " index " << i << std::endl;
ok = false;
}
continue;
}
// Use the TRT5 plugin creator method to check for built-in plugin support
//上面mPluginFactory及mPluginFactoryV2都是使用者自定義的?
//此處則是TRT內建的plugin?
/*
如果該層是Normalize,PriorBox,DetectionOutput,RPROI其中之一,
則為網路新增PluginV2(即該層),
並將該PluginV2更新到mNewPlugins這個向量裡,
且將該層的輸出張量更新到mBlobNameToTensor這個字典裡
*/
std::string pluginName;
/*
struct PluginFieldCollection
{
int nbFields; //!< Number of PluginField entries
const PluginField* fields; //!< Pointer to PluginField entries
};
*/
nvinfer1::PluginFieldCollection fc;
/*
struct PluginField
定義於TensorRT/include/NvInferRuntimeCommon.h
struct PluginField
{
const char* name{nullptr};
const void* data{nullptr};
PluginFieldType type{PluginFieldType::kUNKNOWN};
int32_t length{0};
PluginField(const char* name_ = nullptr, const void* data_ = nullptr, const PluginFieldType type_ = PluginFieldType::kUNKNOWN, int32_t length_ = 0)
: name(name_)
, data(data_)
, type(type_)
, length(length_)
{
}
};
Structure containing plugin attribute field names and associated data
This information can be parsed to decode necessary plugin metadata
記錄plugin屬性的struct
*/
std::vector<nvinfer1::PluginField> f;
if (layerMsg.type() == "Normalize")
{
pluginName = "Normalize_TRT";
f = parseNormalizeParam(layerMsg, weights, *mBlobNameToTensor);
}
else if (layerMsg.type() == "PriorBox")
{
pluginName = "PriorBox_TRT";
f = parsePriorBoxParam(layerMsg, weights, *mBlobNameToTensor);
}
else if (layerMsg.type() == "DetectionOutput")
{
pluginName = "NMS_TRT";
f = parseDetectionOutputParam(layerMsg, weights, *mBlobNameToTensor);
}
else if (layerMsg.type() == "RPROI")
{
pluginName = "RPROI_TRT";
f = parseRPROIParam(layerMsg, weights, *mBlobNameToTensor);
}
if (mPluginRegistry.find(pluginName) != mPluginRegistry.end())
{
// Set fc
fc.nbFields = f.size();
//std::vector::data(): Returns a direct pointer to the memory array used internally by the vector to store its owned elements
fc.fields = f.empty() ? nullptr : f.data();
//std::map::at() : Returns a reference to the mapped value of the element identified with key k
/*
IPluginFactoryV2::createPlugin
宣告於TensorRT/include/NvInferRuntimeCommon.h
virtual IPluginV2* createPlugin(const char* name, const PluginFieldCollection* fc) TRTNOEXCEPT = 0;
Return a plugin object. Return nullptr in case of error.
*/
nvinfer1::IPluginV2* pluginV2 = mPluginRegistry.at(pluginName)->createPlugin(layerMsg.name().c_str(), &fc);
assert(pluginV2);
//TRT5以來的新特性?,將IPluginV2收集到mNewPlugins這個向量裡
mNewPlugins.push_back(pluginV2);
std::vector<ITensor*> inputs;
for (int i = 0, n = layerMsg.bottom_size(); i < n; i++)
{
inputs.push_back((*mBlobNameToTensor)[layerMsg.bottom(i)]);
}
/*
namespace::INetworkDefinition::addPlugin
定義於TensorRT/include/NvInfer.h
virtual IPluginV2Layer* addPluginV2(ITensor* const* inputs, int nbInputs, IPluginV2& plugin) TRTNOEXCEPT = 0;
Add a plugin layer to the network using the IPluginV2 interface.
*/
auto layer = network.addPluginV2(&inputs[0], int(inputs.size()), *pluginV2);
layer->setName(layerMsg.name().c_str());
if (pluginV2->getNbOutputs() != layerMsg.top_size())
{
std::cout << "Plugin layer output count is not equal to caffe output count" << std::endl;
ok = false;
}
for (int i = 0, n = std::min(layer->getNbOutputs(), layerMsg.top_size()); i < n; i++)
{
(*mBlobNameToTensor)[layerMsg.top(i)] = layer->getOutput(i);
}
if (layer == nullptr)
{
std::cout << "error parsing layer type " << layerMsg.type() << " index " << i << std::endl;
ok = false;
}
continue;
}
}
/*
對Dropout,Input,Flatten等層做特殊的處理
*/
if (layerMsg.type() == "Dropout")
{
//如果是Dropout層,在推理時直接把輸入當作輸出?
//沒有network.addxxx,不需要為網路加層!
(*mBlobNameToTensor)[layerMsg.top().Get(0)] = (*mBlobNameToTensor)[layerMsg.bottom().Get(0)];
continue;
}
if (layerMsg.type() == "Input")
{
/*
InputParameter
定義於TensorRT/parsers/caffe/proto/trtcaffe.proto
message InputParameter {
repeated BlobShape shape = 1;
}
*/
const trtcaffe::InputParameter& p = layerMsg.input_param();
for (int i = 0; i < layerMsg.top_size(); i++)
{
//第i個輸出的形狀
const trtcaffe::BlobShape& shape = p.shape().Get(i);
if (shape.dim_size() != 4)
{
RETURN_AND_LOG_ERROR(nullptr, "error parsing input layer, TensorRT only supports 4 dimensional input");
}
else
{
Dims d;
if (network.hasImplicitBatchDimension())
{
d = DimsCHW{(int) shape.dim().Get(1), (int) shape.dim().Get(2), (int) shape.dim().Get(3)};
}
else
{
std::cout << "Warning, setting batch size to 1. Update the dimension after parsing due to "
"using explicit batch size."
<< std::endl;
d = DimsNCHW{1, (int) shape.dim().Get(1), (int) shape.dim().Get(2), (int) shape.dim().Get(3)};
}
//把輸入這一層的輸出張量記錄到mBlobNameToTensor這個字典裡
ITensor* tensor = network.addInput(layerMsg.top(i).c_str(), DataType::kFLOAT, d);
(*mBlobNameToTensor)[layerMsg.top().Get(i)] = tensor;
}
}
continue;
}
if (layerMsg.type() == "Flatten")
{
//如果是Flatten層,直接把輸入當作輸出?
//不需要為網路加層!
ITensor* tensor = (*mBlobNameToTensor)[layerMsg.bottom().Get(0)];
(*mBlobNameToTensor)[layerMsg.top().Get(0)] = tensor;
std::cout << "Warning: Flatten layer ignored. TensorRT implicitly"
" flattens input to FullyConnected layers, but in other"
" circumstances this will result in undefined behavior."
<< std::endl;
continue;
}
//到了這裡才是TensorRT內建的層
// Use parser table to lookup the corresponding parse function to handle the rest of the layers
/*
定義於TensorRT/parsers/caffe/caffeParser/opParsers/opParsers.h
static std::unordered_map gParseTable;
包含以下函數:
Convolution
Pooling
InnerProduct
ReLU
Softmax
SoftmaxWithLoss
LRN
Power
Eltwise
Concat
Deconvolution
Sigmoid
TanH
BatchNorm
Scale
Crop
Reduction
Reshape
Permute
ELU
BNLL
Clip
AbsVal
PReLU
用於將層的名稱對應到解析該層的函數
*/
auto v = gParseTable.find(layerMsg.type());
if (v == gParseTable.end())
{
std::cout << "could not parse layer type " << layerMsg.type() << std::endl;
ok = false;
}
else
{
/*
(*v->second)是一個函數
如:ILayer* parseInnerProduct(INetworkDefinition& network, const trtcaffe::LayerParameter& msg, CaffeWeightFactory& weightFactory, BlobNameToTensor& tensors);
在(*v->second)函數內會為network新加層
*/
/*
mBlobNameToTensor本來就是BlobNameToTensor*型別的,
為何此處還要static_cast
static_cast(mBlobNameToTensor)
*/
ILayer* layer = (*v->second)(network, layerMsg, weights, *static_cast<BlobNameToTensor*>(mBlobNameToTensor));
if (layer == nullptr)
{
std::cout << "error parsing layer type " << layerMsg.type() << " index " << i << std::endl;
ok = false;
}
else
{
layer->setName(layerMsg.name().c_str());
//把該層的輸出更新到mBlobNameToTensor這個字典裡
(*mBlobNameToTensor)[layerMsg.top(0)] = layer->getOutput(0);
}
}
}
//將mBlobNameToTensor自帶的字典裡的ITensor的名字都設為它所對應的key
mBlobNameToTensor->setTensorNames();
//如果過程都正常的話就回傳mBlobNameToTensor這個字典,否則回傳nullptr
return ok && weights.isOK() && mBlobNameToTensor->isOK() ? mBlobNameToTensor : nullptr;
}
在parse
函數中有這麼一句:
mBlobNameToTensor = new (BlobNameToTensor);
不確定new CLASSNAME
與new (CLASSNAME)
有何不同。
在parse
函數中有這麼一句:
mBlobNameToTensor = new (BlobNameToTensor);
在類別名前後加括號有何用意?
在parse
函數中有這麼一句:
nvinfer1::IPluginCreator* const* tmpList = getPluginRegistry()->getPluginCreatorList(&numCreators);
在const
後面加上*
的用意是?
在parse
函數中有這麼一句:
/*
class IPluginFactory
{
public:
virtual bool isPlugin(const char* layerName) TRTNOEXCEPT = 0;
virtual nvinfer1::IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights) TRTNOEXCEPT = 0;
virtual ~IPluginFactory() {}
};
*/
/*
class IPluginFactoryExt : public IPluginFactory
{
public:
virtual int getVersion() const TRTNOEXCEPT
{
return NV_TENSORRT_VERSION;
}
virtual bool isPluginExt(const char* layerName) TRTNOEXCEPT = 0;
};
*/
//nvcaffeparser1::IPluginFactory* mPluginFactory{nullptr};
/**/static_cast<IPluginFactoryExt*>(mPluginFactory)/**/;
將IPluginFactory*
轉為IPluginFactoryExt*
。
還有:
/*
class IPlugin
{
public:
virtual int getNbOutputs() const TRTNOEXCEPT = 0;
virtual Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRTNOEXCEPT = 0;
virtual void configure(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, int maxBatchSize) TRTNOEXCEPT = 0;
virtual int initialize() TRTNOEXCEPT = 0;
virtual void terminate() TRTNOEXCEPT = 0;
virtual size_t getWorkspaceSize(int maxBatchSize) const TRTNOEXCEPT = 0;
virtual int enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) TRTNOEXCEPT = 0;
virtual size_t getSerializationSize() TRTNOEXCEPT = 0;
virtual void serialize(void* buffer) TRTNOEXCEPT = 0;
virtual ~IPlugin() {}
};
class IPluginExt : public IPlugin
{
public:
virtual int getTensorRTVersion() const TRTNOEXCEPT
{
return NV_TENSORRT_VERSION;
}
virtual bool supportsFormat(DataType type, PluginFormat format) const TRTNOEXCEPT = 0;
virtual void configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) TRTNOEXCEPT = 0;
virtual ~IPluginExt() {}
protected:
void configure(const Dims*, int, const Dims*, int, int) _TENSORRT_FINAL TRTNOEXCEPT {}
};
*/
//IPlugin* plugin = mPluginFactory->createPlugin(layerMsg.name().c_str(), w.empty() ? nullptr : &w[0], w.size());
/**/static_cast<IPluginExt*>(plugin)/**/;
將IPlugin*
轉為IPluginExt*
。
注意到在上面兩個例子中,欲轉換的指標本來都是抽象類別的指標。並且它們都是使用static_cast
做downcasting?
以下兩行:
//std::shared_ptr mModel;
mModel = std::unique_ptr<trtcaffe::NetParameter>(new trtcaffe::NetParameter);
//std::shared_ptr mDeploy;
mDeploy = std::unique_ptr<trtcaffe::NetParameter>(new trtcaffe::NetParameter);
都把unique_ptr
型別的指標指定給宣告為shared_ptr
型別的指標,其用意為?
parse
函數中用到了如:
/**/mDeploy->input_size()/**/
/**/layerMsg.has_phase()/**/
這些帶_size
及has_
的函數,但是這些函數並沒有出現在trtcaffe.proto
中。他們是從何而來的呢?詳見Protocol Buffer(proto2)及C++ API。
parse
函數中用到了google::protobuf::RepeatedField::Get
函數:
dims = DimsCHW{(int) mDeploy->input_shape().Get(i).dim().Get(1), (int) mDeploy->input_shape().Get(i).dim().Get(2), (int) mDeploy->input_shape().Get(i).dim().Get(3)};
詳見C++ google protobuf。
INetworkDefinition::hasImplicitBatchDimension
函數用於檢查網路是否含有隱含的維度?
std::vector::empty
Protocol Buffer(proto2)及C++ API
C++ google protobuf