// Information we maintain about each operand during execution that
// may change during execution.
struct RunTimeOperandInfo {
// TODO Storing the type here is redundant, as it won't change during execution.
OperandType type;
// The type and dimensions of the operand. The dimensions can
// change at runtime. We include the type because it's useful
// to pass together with the dimension to the functions implementing
// the operators.
std::vector dimensions;
float scale;
int32_t zeroPoint;
// Where the operand's data is stored. Check the corresponding
// location information in the model to figure out if this points
// to memory we have allocated for an temporary operand.
uint8_t* buffer;
// The length of the buffer.
uint32_t length;
// Whether this is a temporary variable, a model input, a constant, etc.
OperandLifeTime lifetime;
// Keeps track of how many operations have yet to make use
// of this temporary variable. When the count is decremented to 0,
// we free the buffer. For non-temporary variables, this count is
// always 0.
uint32_t numberOfUsesLeft;
Shape shape() const {
return Shape{.type = type, .dimensions = dimensions, .scale = scale, .offset = zeroPoint};
}
};
// Used to keep a pointer to each of the memory pools.
//
// In the case of an "mmap_fd" pool, owns the mmap region
// returned by getBuffer() -- i.e., that region goes away
// when the RunTimePoolInfo is destroyed or is assigned to.
class RunTimePoolInfo {
public:
// If "fail" is not nullptr, and construction fails, then set *fail = true.
// If construction succeeds, leave *fail unchanged.
// getBuffer() == nullptr IFF construction fails.
explicit RunTimePoolInfo(const hidl_memory& hidlMemory, bool* fail);
explicit RunTimePoolInfo(uint8_t* buffer);
// Implement move
RunTimePoolInfo(RunTimePoolInfo&& other);
RunTimePoolInfo& operator=(RunTimePoolInfo&& other);
// Forbid copy
RunTimePoolInfo(const RunTimePoolInfo&) = delete;
RunTimePoolInfo& operator=(const RunTimePoolInfo&) = delete;
~RunTimePoolInfo() { release(); }
uint8_t* getBuffer() const { return mBuffer; }
bool update() const;
private:
void release();
void moveFrom(RunTimePoolInfo&& other);
hidl_memory mHidlMemory; // always used
uint8_t* mBuffer = nullptr; // always used
sp mMemory; // only used when hidlMemory.name() == "ashmem"
};
bool setRunTimePoolInfosFromHidlMemories(std::vector* poolInfos,
const hidl_vec& pools);
// This class is used to execute a model on the CPU.
class CpuExecutor {
public:
// Executes the model. The results will be stored at the locations
// specified in the constructor.
// The model must outlive the executor. We prevent it from being modified
// while this is executing.
int run(const V1_0::Model& model, const Request& request,
const std::vector& modelPoolInfos,
const std::vector& requestPoolInfos);
int run(const V1_1::Model& model, const Request& request,
const std::vector& modelPoolInfos,
const std::vector& requestPoolInfos);
private:
bool initializeRunTimeInfo(const std::vector& modelPoolInfos,
const std::vector& requestPoolInfos);
// Runs one operation of the graph.
int executeOperation(const Operation& entry);
// Decrement the usage count for the operands listed. Frees the memory
// allocated for any temporary variable with a count of zero.
void freeNoLongerUsedOperands(const std::vector& inputs);
// The model and the request that we'll execute. Only valid while run()
// is being executed.
const Model* mModel = nullptr;
const Request* mRequest = nullptr;
// We're copying the list of all the dimensions from the model, as
// these may be modified when we run the operatins. Since we're
// making a full copy, the indexes used in the operand description
// stay valid.
// std::vector mDimensions;
// Runtime information about all the operands.
std::vector mOperands;
};
// Class for setting reasonable OpenMP threading settings. (OpenMP is used by
// the Eigen matrix library.)
//
// Currently sets a low blocktime: the time OpenMP threads busy-wait for more
// work before going to sleep. See b/79159165, https://reviews.llvm.org/D18577.
// The default is 200ms, we set to 20ms here, see b/109645291. This keeps the
// cores enabled throughout inference computation without too much extra power
// consumption afterwards.
//
// The OpenMP settings are thread-local (applying only to worker threads formed
// from that thread), see https://software.intel.com/en-us/node/522688 and
// http://lists.llvm.org/pipermail/openmp-dev/2016-July/001432.html. This class
// ensures that within the scope in which an object is instantiated we use the
// right settings (scopes may be nested), as long as no other library changes
// them. (Note that in current NNAPI usage only one instance is used in the
// CpuExecutor thread).
//
// TODO(mikie): consider also setting the number of threads used. Using as many
// threads as there are cores results in more variable performance: if we don't
// get all cores for our threads, the latency is doubled as we wait for one core
// to do twice the amount of work. Reality is complicated though as not all
// cores are the same. Decision to be based on benchmarking against a
// representative set of workloads and devices. I'm keeping the code here for
// reference.
class ScopedOpenmpSettings {
public:
ScopedOpenmpSettings();
~ScopedOpenmpSettings();
DISALLOW_COPY_AND_ASSIGN(ScopedOpenmpSettings);
private:
int mBlocktimeInitial;
#if NNAPI_LIMIT_CPU_THREADS
int mMaxThreadsInitial;
#endif
};
namespace {
template
T getScalarData(const RunTimeOperandInfo& info) {
// TODO: Check buffer is at least as long as size of data.
T* data = reinterpret_cast(info.buffer);
return data[0];
}
inline bool IsNullInput(const RunTimeOperandInfo *input) {
return input->lifetime == OperandLifeTime::NO_VALUE;
}
inline int NumInputsWithValues(const Operation &operation,
std::vector &operands) {
const std::vector &inputs = operation.inputs;
return std::count_if(inputs.begin(), inputs.end(),
[&operands](uint32_t i) {
return !IsNullInput(&operands[i]);
});
}
inline int NumOutputs(const Operation &operation) {
return operation.outputs.size();
}
inline size_t NumDimensions(const RunTimeOperandInfo *operand) {
return operand->shape().dimensions.size();
}
inline uint32_t SizeOfDimension(const RunTimeOperandInfo *operand, int i) {
return operand->shape().dimensions[i];
}
inline RunTimeOperandInfo *GetInput(const Operation &operation,
std::vector &operands,
int index) {
return &operands[operation.inputs[index]];
}
inline RunTimeOperandInfo *GetOutput(const Operation &operation,
std::vector &operands,
int index) {
return &operands[operation.outputs[index]];
}
} // anonymous namespace
} // namespace nn
} // namespace android
#endif // ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
bool convFloat32(const float* inputData, const Shape& inputShape,
const float* filterData, const Shape& filterShape,
const float* biasData, const Shape& biasShape,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
int32_t stride_width, int32_t stride_height,
int32_t activation,
float* outputData, const Shape& outputShape) {
ANDROID_NN_CONV_PARAMETERS(float)
float output_activation_min, output_activation_max;
CalculateActivationRangeFloat(activation, &output_activation_min,
&output_activation_max);
// Prevent concurrent executions that may access the scratch buffer.
std::unique_lock lock(executionMutex);
//调度算法 具体在 TensorFlow Lite Delegate Api 定义的 Conv 方法
tflite::optimized_ops::Conv(
inputData, convertShapeToDims(inputShape),
filterData, convertShapeToDims(filterShape),
biasData, convertShapeToDims(biasShape),
stride_width, stride_height, paddingWidth, paddingHeight,
output_activation_min, output_activation_max,
outputData, convertShapeToDims(outputShape),
im2colData, im2colDim);
return true;
}
32位数据和 8位数据的卷积还是使用了 TensorFlow Lite Delegate API 所提供的 Conv 方法。
enum class Select {
RANDOM, // evict random entries
LRU, // evict least-recently-used entries
DEFAULT = RANDOM,
};
enum class Capacity {
// cut back to no more than half capacity; new/replacement
// entry still might not fit
HALVE,
// cut back to whatever is necessary to fit new/replacement
// entry
FIT,
// cut back to no more than half capacity and ensure that
// there's enough space for new/replacement entry
FIT_HALVE,
DEFAULT = HALVE,
};
// Driver 定义
class SampleDriver : public IDevice {
public:
SampleDriver(const char* name) : mName(name) {}
~SampleDriver() override {}
//获取
Return getCapabilities(getCapabilities_cb cb) override;
Return getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb cb) override;
Return prepareModel(const V1_0::Model& model,
const sp& callback) override;
Return prepareModel_1_1(const V1_1::Model& model, ExecutionPreference preference,
const sp& callback) override;
Return getStatus() override;
// Starts and runs the driver service. Typically called from main().
// This will return only once the service shuts down.
int run();
protected:
std::string mName;
};
// fixme 模型定义
class SamplePreparedModel : public IPreparedModel {
public:
SamplePreparedModel(const Model& model) : mModel(model) {}
~SamplePreparedModel() override {}
bool initialize();
Return execute(const Request& request,
const sp& callback) override;
private:
void asyncExecute(const Request& request, const sp& callback);
Model mModel;
std::vector mPoolInfos;
};
} // namespace sample_driver
} // namespace nn
} // namespace android
可以从上面的头文件发现,可以获取相应的支持算子列表、导入模型、运行函数的基本函数。并且模型预测函数的代码看,这里是一个异步的设计,和 CL 的调用方式大同小异。
Return SamplePreparedModel::execute(const Request& request,
const sp& callback) {
VLOG(DRIVER) << "execute(" << SHOW_IF_DEBUG(toString(request)) << ")";
if (callback.get() == nullptr) {
LOG(ERROR) << "invalid callback passed to execute";
return ErrorStatus::INVALID_ARGUMENT;
}
if (!validateRequest(request, mModel)) {
callback->notify(ErrorStatus::INVALID_ARGUMENT);
return ErrorStatus::INVALID_ARGUMENT;
}
// This thread is intentionally detached because the sample driver service
// is expected to live forever.
std::thread([this, request, callback]{ asyncExecute(request, callback); }).detach();
return ErrorStatus::NONE;
}
service neuralnetworks_hal_service_sample_float_fast /vendor/bin/hw/[email protected]
class hal
user system
group system
service neuralnetworks_hal_service_sample_float_slow /vendor/bin/hw/[email protected]
class hal
user system
group system