本机:Ubuntu18.04
我选择的版本是:tensorflow-2.6.0
下载网址:
https://github.com/tensorflow/tensorflow
点master,选择tag,v2.6.0
(注:我是直接下载文件夹,进行解压。也可以 git clone,由于网速不行,clone不下来,就直接下载文件了,结果都一样)
#切换目录,切换到解压的tensorflow-2.6.0目录
cd tensorflow-2.6.0
#执行download_dependencies.sh脚本,下载依赖,下载时,资源可能因网速下载失败,多试几次,增量下载。或者,哪一个下载不下来,会提示网址,手动下载,然后在sh脚本里将其下载代码注释掉即可。
./tensorflow/lite/tools/make/download_dependencies.sh
上述命令,会在make目录下生成downloads目录,目录下依赖目录如下:
在make目录下:ls
absl eigen fft2d fp16 googletest ruy
cpuinfo farmhash flatbuffers gemmlowp neon_2_sse
交叉编译选择的是:arm-linux-gnueabihf-gcc 7.5.0
位置:
https://releases.linaro.org/components/toolchain/binaries/7.5-2019.12/arm-linux-gnueabihf/
gcc-linaro-7.5.0-2019.12-x86_64_arm-linux-gnueabihf.tar.xz
这里本文配置用户私有环境变量
vim .bashrc
#末尾添加:
export PATH=$PATH:/解压位置/gcc-linaro-arm-linux-gnueabihf-4.7-2013.03-20130313_linux/bin
#配置生效
source .bashrc
直接在命令窗口输入命令:
arm-linux-gnueabihf-gcc --version
#所在目录:tensorflow/lite/tools/make
#执行下面脚本
./build_rpi_lib.sh
主要目的:生成libtensorflow-lite.a静态库,交叉编译的时候用到
结果展示:
#所在目录:tensorflow/lite/tools/make
cd cd gen/rpi_armv7l
ls
bin lib obj
cd lib
benchmark-lib.a libtensorflow-lite.a
cd tensorflow-2.6.0/tensorflow/lite
find -name "*.h" | tar -cf headers.tar -T -#这一步,是将lite头文件压缩打包
#找另外任意一个地方(离开tensorflow-2.6.0目录),新建一个目录,将我们需要进行编译相关的文件放在一起
mkdir test
cd test
mkdir -p include/tensorflow/lite#这一步
tar xvf headers.tar -C include/tensorflow/lite#将前面压缩打包的文件拷贝过来,解压
将absl flatbuffers gmock gtest tensorflow头文件也移动include目录下:
演示:flatbuffers,其他同理
#在test目录下操作
mkdir -p include/flatbuffers
cp tensorflow-2.6.0/tensorflow/lite/tools/make/downloads/flatbuffers/include/flatbuffers /* ./include/flatbuffers/
将tensorflow-2.6.0/tensorflow/lite/examples目录下的label_image目录也拷贝到test目录下
将tensorflow-2.6.0/tensorflow/lite/tools/make/gen/rpi_armv7l目录下的lib目录也拷贝到test目录下
即:test目录下目录
include label_image lib
cd test/label_image
修改CMakeLists.txt;#alter 标识,是我修改的地方
#
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Builds the minimal Tensorflow Lite example.
cmake_minimum_required(VERSION 2.9)
project(label_image)
set(CMAKE_VERBOSE_MAKEFILE on)
#alter
set(tools /opt/gcc-linaro-7.5.0-2019.12-x86_64_arm-linux-gnueabihf)
#include_directories(../../tensorflow-2.6.3/tensorflow/lite)
include_directories(../include/flatbuffers)
include_directories(../include/tensorflow/lite)
include_directories(../include)
set(CMAKE_C_COMPILER ${tools}/bin/arm-linux-gnueabihf-gcc)
set(CMAKE_CXX_COMPILER ${tools}/bin/arm-linux-gnueabihf-g++)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "-O3 -static -pthread")
#alter
LINK_LIBRARIES("/mnt/toArm/test/lib/libtensorflow-lite.a")
add_executable(label_image
label_image.cc bitmap_helpers.cc
)
TARGET_LINK_LIBRARIES(label_image dl)
修改label_image.cc:
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/examples/label_image/label_image.h"
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/examples/label_image/label_image.h"
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "tensorflow/lite/tools/make/downloads/absl/absl/memory/memory.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/examples/label_image/log.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
namespace tflite {
namespace label_image {
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr;
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
#if 0
template <class T>
void resize(T* out, uint8_t* in, int image_height, int image_width,
int image_channels, int wanted_height, int wanted_width,
int wanted_channels, Settings* s) {
cout<< "11111111111"<<endl
int number_of_pixels = image_height * image_width * image_channels;
std::unique_ptr<Interpreter> interpreter(new Interpreter);
int base_index = 0;
LOG(INFO)<< "11111111111"
// two inputs: input and new_sizes
interpreter->AddTensors(2, &base_index);
// one output
interpreter->AddTensors(1, &base_index);
// set input and output tensors
interpreter->SetInputs({0, 1});
interpreter->SetOutputs({2});
LOG(INFO)<< "11111111111"
// set parameters of tensors
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "input",
{1, image_height, image_width, image_channels}, quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", {2},
quant);
LOG(INFO)<< "11111111111"
interpreter->SetTensorParametersReadWrite(
2, kTfLiteFloat32, "output",
{1, wanted_height, wanted_width, wanted_channels}, quant);
LOG(INFO)<< "11111111111"
ops::builtin::BuiltinOpResolver resolver;
const TfLiteRegistration* resize_op =
resolver.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1);
auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(
malloc(sizeof(TfLiteResizeBilinearParams)));
params->align_corners = false;
params->half_pixel_centers = false;
interpreter->AddNodeWithParameters({0, 1}, {2}, nullptr, 0, params, resize_op,
nullptr);
LOG(INFO)<< "interpreter->AllocateTensors()"
interpreter->AllocateTensors();
// fill input image
// in[] are integers, cannot do memcpy() directly
auto input = interpreter->typed_tensor<float>(0);
for (int i = 0; i < number_of_pixels; i++) {
input[i] = in[i];
}
// fill new_sizes
interpreter->typed_tensor<int>(1)[0] = wanted_height;
interpreter->typed_tensor<int>(1)[1] = wanted_width;
LOG(INFO)<< "interpreter->Invoke()"
interpreter->Invoke();
auto output = interpreter->typed_tensor<float>(2);
auto output_number_of_pixels = wanted_height * wanted_width * wanted_channels;
for (int i = 0; i < output_number_of_pixels; i++) {
switch (s->input_type) {
case kTfLiteFloat32:
out[i] = (output[i] - s->input_mean) / s->input_std;
break;
case kTfLiteInt8:
out[i] = static_cast<int8_t>(output[i] - 128);
break;
case kTfLiteUInt8:
out[i] = static_cast<uint8_t>(output[i]);
break;
default:
break;
}
}
}
#endif
// Takes a file name, and loads a list of labels from it, one per line, and
// returns a vector of the strings. It pads with empty strings so the length
// of the result is a multiple of 16, because our model expects that.
TfLiteStatus ReadLabelsFile(const string& file_name,
std::vector<string>* result,
size_t* found_label_count) {
std::ifstream file(file_name);
if (!file) {
LOG(ERROR) << "Labels file " << file_name << " not found";
return kTfLiteError;
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
*found_label_count = result->size();
const int padding = 16;
while (result->size() % padding) {
result->emplace_back();
}
return kTfLiteOk;
}
void PrintProfilingInfo(const profiling::ProfileEvent* e,
uint32_t subgraph_index, uint32_t op_index,
TfLiteRegistration registration) {
// output something like
// time (ms) , Node xxx, OpCode xxx, symbolic name
// 5.352, Node 5, OpCode 4, DEPTHWISE_CONV_2D
LOG(INFO) << std::fixed << std::setw(10) << std::setprecision(3)
<< (e->end_timestamp_us - e->begin_timestamp_us) / 1000.0
<< ", Subgraph " << std::setw(3) << std::setprecision(3)
<< subgraph_index << ", Node " << std::setw(3)
<< std::setprecision(3) << op_index << ", OpCode " << std::setw(3)
<< std::setprecision(3) << registration.builtin_code << ", "
<< EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration.builtin_code));
}
/// 使用的是mobilenet_v2_1.0_224_quant.tflite 量化后模型
void RunInference(Settings* settings)
{
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
if (!model) {
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
exit(-1);
}
// settings->model = model.get();
LOG(INFO) << "Loaded model " << settings->model_name;
//model->error_reporter();
//LOG(INFO) << "resolved reporter";
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (!interpreter) {
LOG(ERROR) << "Failed to construct interpreter";
exit(-1);
}
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
if (settings->verbose) {
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
LOG(INFO) << "inputs: " << interpreter->inputs().size();
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
if (interpreter->tensor(i)->name)
LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", "
<< interpreter->tensor(i)->bytes << ", "
<< interpreter->tensor(i)->type << ", "
<< interpreter->tensor(i)->params.scale << ", "
<< interpreter->tensor(i)->params.zero_point;
}
}
if (settings->number_of_threads != -1) {
interpreter->SetNumThreads(settings->number_of_threads);
}
LOG(INFO) << "load picture ";
LOG(INFO) << "load picture ";
int image_width = 224;
int image_height = 224;
int image_channels = 3;
std::vector in = read_bmp(settings->input_bmp_name, &image_width,
&image_height, &image_channels, settings);
int input = interpreter->inputs()[0];
if (settings->verbose) LOG(INFO) << "input: " << input;
const std::vector inputs = interpreter->inputs();
const std::vector outputs = interpreter->outputs();
if (settings->verbose) {
LOG(INFO) << "number of inputs: " << inputs.size();
LOG(INFO) << "number of outputs: " << outputs.size();
}
if (interpreter->AllocateTensors() != kTfLiteOk) {
LOG(ERROR) << "Failed to allocate tensors!";
exit(-1);
}
if (settings->verbose) PrintInterpreterState(interpreter.get());
// get input dimension from the input tensor metadata
// assuming one input only
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int wanted_height = dims->data[1];
int wanted_width = dims->data[2];
int wanted_channels = dims->data[3];
settings->input_type = interpreter->tensor(input)->type;
LOG(INFO) << "wanted width, height, channels: " < interpreter1(new Interpreter);
int base_index = 0;
// two inputs: input and new_sizes
interpreter1->AddTensors(2, &base_index);
// one output
interpreter1->AddTensors(1, &base_index);
// set input and output tensors
interpreter1->SetInputs({0, 1});
interpreter1->SetOutputs({2});
// set parameters of tensors
TfLiteQuantizationParams quant;
interpreter1->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "input",
{1, image_height, image_width, image_channels}, quant);
interpreter1->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", {2},
quant);
interpreter1->SetTensorParametersReadWrite(
2, kTfLiteFloat32, "output",
{1, wanted_height, wanted_width, wanted_channels}, quant);
ops::builtin::BuiltinOpResolver resolver1;
const TfLiteRegistration* resize_op =
resolver1.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1);
auto* params = reinterpret_cast(
malloc(sizeof(TfLiteResizeBilinearParams)));
params->align_corners = false;
params->half_pixel_centers = false;
interpreter1->AddNodeWithParameters({0, 1}, {2}, nullptr, 0, params, resize_op,
nullptr);
interpreter1->AllocateTensors();
// fill input image
// in[] are integers, cannot do memcpy() directly
auto input1 = interpreter1->typed_tensor(0);
for (int i = 0; i < number_of_pixels; i++) {
input1[i] = in.data()[i];
}
// fill new_sizes
interpreter1->typed_tensor(1)[0] = wanted_height;
interpreter1->typed_tensor(1)[1] = wanted_width;
interpreter1->Invoke();
auto outputxx = interpreter1->typed_tensor(2);
auto output_number_of_pixels = wanted_height * wanted_width * wanted_channels;
LOG(INFO)<< "output_number_of_pixels@@@@@@@@@@@@@@@@@@@:" << output_number_of_pixels ;
printf("pinput11^^^^^^^^^^^^^^^^^^^^^^^");
printf("typed_tensor(0):%p\n",interpreter->typed_tensor(0));
printf("typed_tensor(171):%p\n",interpreter->typed_tensor(171));
auto input11 = interpreter->typed_tensor(171);
///将修改好尺寸的图像数据输入进模型
for (int i = 0; i < output_number_of_pixels; i++) {
input11[i] = static_cast(outputxx[i]);
}
printf("pinput11 赋值完毕^^^^^^^^^^^^^^^^^^^^^^^");
/* switch (settings->input_type) {
case kTfLiteFloat32:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteInt8:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteUInt8:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
default:
LOG(ERROR) << "cannot handle input type "
<< interpreter->tensor(input)->type << " yet";
exit(-1);
}
*/
#if 1
auto profiler = absl::make_unique(
settings->max_profiling_buffer_entries);
interpreter->SetProfiler(profiler.get());
if (settings->profiling) profiler->StartProfiling();
if (settings->loop_count > 1) {
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
}
#endif
/* LOG(INFO) << "interpreter->Invoke() start ";
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
} */
struct timeval start_time, stop_time;
gettimeofday(&start_time, nullptr);
for (int i = 0; i < settings->loop_count; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
gettimeofday(&stop_time, nullptr);
LOG(INFO) << "invoked";
LOG(INFO) << "average time: "
<< (get_us(stop_time) - get_us(start_time)) /
(settings->loop_count * 1000)
<< " ms";
#if 1
if (settings->profiling) {
profiler->StopProfiling();
auto profile_events = profiler->GetProfileEvents();
for (int i = 0; i < profile_events.size(); i++) {
auto subgraph_index = profile_events[i]->extra_event_metadata;
auto op_index = profile_events[i]->event_metadata;
const auto subgraph = interpreter->subgraph(subgraph_index);
const auto node_and_registration =
subgraph->node_and_registration(op_index);
const TfLiteRegistration registration = node_and_registration->second;
PrintProfilingInfo(profile_events[i], subgraph_index, op_index,
registration);
}
}
#endif
const float threshold = 0.001f;
std::vector> top_results;
int output = interpreter->outputs()[0];
TfLiteIntArray* output_dims = interpreter->tensor(output)->dims;
// assume output dims to be something like (1, 1, ... ,size)
auto output_size = output_dims->data[output_dims->size - 1];
switch (interpreter->tensor(output)->type) {
case kTfLiteFloat32:
get_top_n(interpreter->typed_output_tensor(0), output_size,
settings->number_of_results, threshold, &top_results,
settings->input_type);
break;
case kTfLiteInt8:
get_top_n(interpreter->typed_output_tensor(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
case kTfLiteUInt8:
get_top_n(interpreter->typed_output_tensor(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
default:
LOG(ERROR) << "cannot handle output type "
<< interpreter->tensor(output)->type << " yet";
exit(-1);
}
std::vector labels;
size_t label_count;
if (ReadLabelsFile(settings->labels_file_name, &labels, &label_count) !=
kTfLiteOk)
exit(-1);
for (const auto& result : top_results) {
const float confidence = result.first;
const int index = result.second;
LOG(INFO) << confidence << ": " << index << " " << labels[index];
}
}
void display_usage() {
LOG(INFO)
<< "label_image\n"
<< "--accelerated, -a: [0|1], use Android NNAPI or not\n"
<< "--allow_fp16, -f: [0|1], allow running fp32 models with fp16 or not\n"
<< "--count, -c: loop interpreter->Invoke() for certain times\n"
<< "--gl_backend, -g: [0|1]: use GL GPU Delegate on Android\n"
<< "--hexagon_delegate, -j: [0|1]: use Hexagon Delegate on Android\n"
<< "--input_mean, -b: input mean\n"
<< "--input_std, -s: input standard deviation\n"
<< "--image, -i: image_name.bmp\n"
<< "--labels, -l: labels for the model\n"
<< "--tflite_model, -m: model_name.tflite\n"
<< "--profiling, -p: [0|1], profiling or not\n"
<< "--num_results, -r: number of results to show\n"
<< "--threads, -t: number of threads\n"
<< "--verbose, -v: [0|1] print more information\n"
<< "--warmup_runs, -w: number of warmup runs\n"
<< "--xnnpack_delegate, -x [0:1]: xnnpack delegate\n";
}
i nt Main(int argc, char** argv) {
Settings s;
int c;
while (true) {
static struct option long_options[] = {
{"accelerated", required_argument, nullptr, 'a'},
{"allow_fp16", required_argument, nullptr, 'f'},
{"count", required_argument, nullptr, 'c'},
{"verbose", required_argument, nullptr, 'v'},
{"image", required_argument, nullptr, 'i'},
{"labels", required_argument, nullptr, 'l'},
{"tflite_model", required_argument, nullptr, 'm'},
{"profiling", required_argument, nullptr, 'p'},
{"threads", required_argument, nullptr, 't'},
{"input_mean", required_argument, nullptr, 'b'},
{"input_std", required_argument, nullptr, 's'},
{"num_results", required_argument, nullptr, 'r'},
{"max_profiling_buffer_entries", required_argument, nullptr, 'e'},
{"warmup_runs", required_argument, nullptr, 'w'},
{"gl_backend", required_argument, nullptr, 'g'},
{"hexagon_delegate", required_argument, nullptr, 'j'},
{"xnnpack_delegate", required_argument, nullptr, 'x'},
{nullptr, 0, nullptr, 0}};
/* getopt_long stores the option index here. */
int option_index = 0;
c = getopt_long(argc, argv,
"a:b:c:d:e:f:g:i:j:l:m:p:r:s:t:v:w:x:", long_options,
&option_index);
/* Detect the end of the options. */
if (c == -1) break;
switch (c) {
case 'a':
s.accel = strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'b':
s.input_mean = strtod(optarg, nullptr);
break;
case 'c':
s.loop_count =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'e':
s.max_profiling_buffer_entries =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'f':
s.allow_fp16 =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'g':
s.gl_backend =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'i':
s.input_bmp_name = optarg;
break;
case 'j':
s.hexagon_delegate = optarg;
break;
case 'l':
s.labels_file_name = optarg;
break;
case 'm':
s.model_name = optarg;
break;
case 'p':
s.profiling =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'r':
s.number_of_results =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 's':
s.input_std = strtod(optarg, nullptr);
break;
case 't':
s.number_of_threads = strtol( // NOLINT(runtime/deprecated_fn)
optarg, nullptr, 10);
break;
case 'v':
s.verbose =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'w':
s.number_of_warmup_runs =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'x':
s.xnnpack_delegate =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'h':
case '?':
/* getopt_long already printed an error message. */
display_usage();
exit(-1);
default:
exit(-1);
}
}
//delegate_providers.MergeSettingsIntoParams(s);
RunInference(&s);
return 0;
}
} // namespace label_image
} // namespace tflite
int main(int argc, char** argv) {
return tflite::label_image::Main(argc, argv);
}
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "tensorflow/lite/tools/make/downloads/absl/absl/memory/memory.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/examples/label_image/log.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
namespace tflite {
namespace label_image {
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr;
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
#if 0
template <class T>
void resize(T* out, uint8_t* in, int image_height, int image_width,
int image_channels, int wanted_height, int wanted_width,
int wanted_channels, Settings* s) {
cout<< "11111111111"<<endl
int number_of_pixels = image_height * image_width * image_channels;
std::unique_ptr<Interpreter> interpreter(new Interpreter);
int base_index = 0;
LOG(INFO)<< "11111111111"
// two inputs: input and new_sizes
interpreter->AddTensors(2, &base_index);
// one output
interpreter->AddTensors(1, &base_index);
// set input and output tensors
interpreter->SetInputs({0, 1});
interpreter->SetOutputs({2});
LOG(INFO)<< "11111111111"
// set parameters of tensors
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "input",
{1, image_height, image_width, image_channels}, quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", {2},
quant);
LOG(INFO)<< "11111111111"
interpreter->SetTensorParametersReadWrite(
2, kTfLiteFloat32, "output",
{1, wanted_height, wanted_width, wanted_channels}, quant);
LOG(INFO)<< "11111111111"
ops::builtin::BuiltinOpResolver resolver;
const TfLiteRegistration* resize_op =
resolver.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1);
auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(
malloc(sizeof(TfLiteResizeBilinearParams)));
params->align_corners = false;
params->half_pixel_centers = false;
interpreter->AddNodeWithParameters({0, 1}, {2}, nullptr, 0, params, resize_op,
nullptr);
LOG(INFO)<< "interpreter->AllocateTensors()"
interpreter->AllocateTensors();
// fill input image
// in[] are integers, cannot do memcpy() directly
auto input = interpreter->typed_tensor<float>(0);
for (int i = 0; i < number_of_pixels; i++) {
input[i] = in[i];
}
// fill new_sizes
interpreter->typed_tensor<int>(1)[0] = wanted_height;
interpreter->typed_tensor<int>(1)[1] = wanted_width;
LOG(INFO)<< "interpreter->Invoke()"
interpreter->Invoke();
auto output = interpreter->typed_tensor<float>(2);
auto output_number_of_pixels = wanted_height * wanted_width * wanted_channels;
for (int i = 0; i < output_number_of_pixels; i++) {
switch (s->input_type) {
case kTfLiteFloat32:
out[i] = (output[i] - s->input_mean) / s->input_std;
break;
case kTfLiteInt8:
out[i] = static_cast<int8_t>(output[i] - 128);
break;
case kTfLiteUInt8:
out[i] = static_cast<uint8_t>(output[i]);
break;
default:
break;
}
}
}
#endif
// Takes a file name, and loads a list of labels from it, one per line, and
// returns a vector of the strings. It pads with empty strings so the length
// of the result is a multiple of 16, because our model expects that.
TfLiteStatus ReadLabelsFile(const string& file_name,
std::vector<string>* result,
size_t* found_label_count) {
std::ifstream file(file_name);
if (!file) {
LOG(ERROR) << "Labels file " << file_name << " not found";
return kTfLiteError;
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
*found_label_count = result->size();
const int padding = 16;
while (result->size() % padding) {
result->emplace_back();
}
return kTfLiteOk;
}
void PrintProfilingInfo(const profiling::ProfileEvent* e,
uint32_t subgraph_index, uint32_t op_index,
TfLiteRegistration registration) {
// output something like
// time (ms) , Node xxx, OpCode xxx, symbolic name
// 5.352, Node 5, OpCode 4, DEPTHWISE_CONV_2D
LOG(INFO) << std::fixed << std::setw(10) << std::setprecision(3)
<< (e->end_timestamp_us - e->begin_timestamp_us) / 1000.0
<< ", Subgraph " << std::setw(3) << std::setprecision(3)
<< subgraph_index << ", Node " << std::setw(3)
<< std::setprecision(3) << op_index << ", OpCode " << std::setw(3)
<< std::setprecision(3) << registration.builtin_code << ", "
<< EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration.builtin_code));
}
/// 使用的是mobilenet_v2_1.0_224_quant.tflite 量化后模型
void RunInference(Settings* settings)
{
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
if (!model) {
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
exit(-1);
}
// settings->model = model.get();
LOG(INFO) << "Loaded model " << settings->model_name;
//model->error_reporter();
//LOG(INFO) << "resolved reporter";
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (!interpreter) {
LOG(ERROR) << "Failed to construct interpreter";
exit(-1);
}
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
if (settings->verbose) {
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
LOG(INFO) << "inputs: " << interpreter->inputs().size();
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
if (interpreter->tensor(i)->name)
LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", "
<< interpreter->tensor(i)->bytes << ", "
<< interpreter->tensor(i)->type << ", "
<< interpreter->tensor(i)->params.scale << ", "
<< interpreter->tensor(i)->params.zero_point;
}
}
if (settings->number_of_threads != -1) {
interpreter->SetNumThreads(settings->number_of_threads);
}
LOG(INFO) << "load picture ";
LOG(INFO) << "load picture ";
int image_width = 224;
int image_height = 224;
int image_channels = 3;
std::vector in = read_bmp(settings->input_bmp_name, &image_width,
&image_height, &image_channels, settings);
int input = interpreter->inputs()[0];
if (settings->verbose) LOG(INFO) << "input: " << input;
const std::vector inputs = interpreter->inputs();
const std::vector outputs = interpreter->outputs();
if (settings->verbose) {
LOG(INFO) << "number of inputs: " << inputs.size();
LOG(INFO) << "number of outputs: " << outputs.size();
}
if (interpreter->AllocateTensors() != kTfLiteOk) {
LOG(ERROR) << "Failed to allocate tensors!";
exit(-1);
}
if (settings->verbose) PrintInterpreterState(interpreter.get());
// get input dimension from the input tensor metadata
// assuming one input only
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int wanted_height = dims->data[1];
int wanted_width = dims->data[2];
int wanted_channels = dims->data[3];
settings->input_type = interpreter->tensor(input)->type;
LOG(INFO) << "wanted width, height, channels: " < interpreter1(new Interpreter);
int base_index = 0;
// two inputs: input and new_sizes
interpreter1->AddTensors(2, &base_index);
// one output
interpreter1->AddTensors(1, &base_index);
// set input and output tensors
interpreter1->SetInputs({0, 1});
interpreter1->SetOutputs({2});
// set parameters of tensors
TfLiteQuantizationParams quant;
interpreter1->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "input",
{1, image_height, image_width, image_channels}, quant);
interpreter1->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", {2},
quant);
interpreter1->SetTensorParametersReadWrite(
2, kTfLiteFloat32, "output",
{1, wanted_height, wanted_width, wanted_channels}, quant);
ops::builtin::BuiltinOpResolver resolver1;
const TfLiteRegistration* resize_op =
resolver1.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1);
auto* params = reinterpret_cast(
malloc(sizeof(TfLiteResizeBilinearParams)));
params->align_corners = false;
params->half_pixel_centers = false;
interpreter1->AddNodeWithParameters({0, 1}, {2}, nullptr, 0, params, resize_op,
nullptr);
interpreter1->AllocateTensors();
// fill input image
// in[] are integers, cannot do memcpy() directly
auto input1 = interpreter1->typed_tensor(0);
for (int i = 0; i < number_of_pixels; i++) {
input1[i] = in.data()[i];
}
// fill new_sizes
interpreter1->typed_tensor(1)[0] = wanted_height;
interpreter1->typed_tensor(1)[1] = wanted_width;
interpreter1->Invoke();
auto outputxx = interpreter1->typed_tensor(2);
auto output_number_of_pixels = wanted_height * wanted_width * wanted_channels;
LOG(INFO)<< "output_number_of_pixels@@@@@@@@@@@@@@@@@@@:" << output_number_of_pixels ;
printf("pinput11^^^^^^^^^^^^^^^^^^^^^^^");
printf("typed_tensor(0):%p\n",interpreter->typed_tensor(0));
printf("typed_tensor(171):%p\n",interpreter->typed_tensor(171));
auto input11 = interpreter->typed_tensor(171);
///将修改好尺寸的图像数据输入进模型
for (int i = 0; i < output_number_of_pixels; i++) {
input11[i] = static_cast(outputxx[i]);
}
printf("pinput11 赋值完毕^^^^^^^^^^^^^^^^^^^^^^^");
/* switch (settings->input_type) {
case kTfLiteFloat32:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteInt8:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteUInt8:
resize(interpreter->typed_tensor(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
default:
LOG(ERROR) << "cannot handle input type "
<< interpreter->tensor(input)->type << " yet";
exit(-1);
}
*/
#if 1
auto profiler = absl::make_unique(
settings->max_profiling_buffer_entries);
interpreter->SetProfiler(profiler.get());
if (settings->profiling) profiler->StartProfiling();
if (settings->loop_count > 1) {
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
}
#endif
/* LOG(INFO) << "interpreter->Invoke() start ";
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
} */
struct timeval start_time, stop_time;
gettimeofday(&start_time, nullptr);
for (int i = 0; i < settings->loop_count; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
gettimeofday(&stop_time, nullptr);
LOG(INFO) << "invoked";
LOG(INFO) << "average time: "
<< (get_us(stop_time) - get_us(start_time)) /
(settings->loop_count * 1000)
<< " ms";
#if 1
if (settings->profiling) {
profiler->StopProfiling();
auto profile_events = profiler->GetProfileEvents();
for (int i = 0; i < profile_events.size(); i++) {
auto subgraph_index = profile_events[i]->extra_event_metadata;
auto op_index = profile_events[i]->event_metadata;
const auto subgraph = interpreter->subgraph(subgraph_index);
const auto node_and_registration =
subgraph->node_and_registration(op_index);
const TfLiteRegistration registration = node_and_registration->second;
PrintProfilingInfo(profile_events[i], subgraph_index, op_index,
registration);
}
}
#endif
const float threshold = 0.001f;
std::vector> top_results;
int output = interpreter->outputs()[0];
TfLiteIntArray* output_dims = interpreter->tensor(output)->dims;
// assume output dims to be something like (1, 1, ... ,size)
auto output_size = output_dims->data[output_dims->size - 1];
switch (interpreter->tensor(output)->type) {
case kTfLiteFloat32:
get_top_n(interpreter->typed_output_tensor(0), output_size,
settings->number_of_results, threshold, &top_results,
settings->input_type);
break;
case kTfLiteInt8:
get_top_n(interpreter->typed_output_tensor(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
case kTfLiteUInt8:
get_top_n(interpreter->typed_output_tensor(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
default:
LOG(ERROR) << "cannot handle output type "
<< interpreter->tensor(output)->type << " yet";
exit(-1);
}
std::vector labels;
size_t label_count;
if (ReadLabelsFile(settings->labels_file_name, &labels, &label_count) !=
kTfLiteOk)
exit(-1);
for (const auto& result : top_results) {
const float confidence = result.first;
const int index = result.second;
LOG(INFO) << confidence << ": " << index << " " << labels[index];
}
}
void display_usage() {
LOG(INFO)
<< "label_image\n"
<< "--accelerated, -a: [0|1], use Android NNAPI or not\n"
<< "--allow_fp16, -f: [0|1], allow running fp32 models with fp16 or not\n"
<< "--count, -c: loop interpreter->Invoke() for certain times\n"
<< "--gl_backend, -g: [0|1]: use GL GPU Delegate on Android\n"
<< "--hexagon_delegate, -j: [0|1]: use Hexagon Delegate on Android\n"
<< "--input_mean, -b: input mean\n"
<< "--input_std, -s: input standard deviation\n"
<< "--image, -i: image_name.bmp\n"
<< "--labels, -l: labels for the model\n"
<< "--tflite_model, -m: model_name.tflite\n"
<< "--profiling, -p: [0|1], profiling or not\n"
<< "--num_results, -r: number of results to show\n"
<< "--threads, -t: number of threads\n"
<< "--verbose, -v: [0|1] print more information\n"
<< "--warmup_runs, -w: number of warmup runs\n"
<< "--xnnpack_delegate, -x [0:1]: xnnpack delegate\n";
}
i nt Main(int argc, char** argv) {
Settings s;
int c;
while (true) {
static struct option long_options[] = {
{"accelerated", required_argument, nullptr, 'a'},
{"allow_fp16", required_argument, nullptr, 'f'},
{"count", required_argument, nullptr, 'c'},
{"verbose", required_argument, nullptr, 'v'},
{"image", required_argument, nullptr, 'i'},
{"labels", required_argument, nullptr, 'l'},
{"tflite_model", required_argument, nullptr, 'm'},
{"profiling", required_argument, nullptr, 'p'},
{"threads", required_argument, nullptr, 't'},
{"input_mean", required_argument, nullptr, 'b'},
{"input_std", required_argument, nullptr, 's'},
{"num_results", required_argument, nullptr, 'r'},
{"max_profiling_buffer_entries", required_argument, nullptr, 'e'},
{"warmup_runs", required_argument, nullptr, 'w'},
{"gl_backend", required_argument, nullptr, 'g'},
{"hexagon_delegate", required_argument, nullptr, 'j'},
{"xnnpack_delegate", required_argument, nullptr, 'x'},
{nullptr, 0, nullptr, 0}};
/* getopt_long stores the option index here. */
int option_index = 0;
c = getopt_long(argc, argv,
"a:b:c:d:e:f:g:i:j:l:m:p:r:s:t:v:w:x:", long_options,
&option_index);
/* Detect the end of the options. */
if (c == -1) break;
switch (c) {
case 'a':
s.accel = strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'b':
s.input_mean = strtod(optarg, nullptr);
break;
case 'c':
s.loop_count =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'e':
s.max_profiling_buffer_entries =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'f':
s.allow_fp16 =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'g':
s.gl_backend =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'i':
s.input_bmp_name = optarg;
break;
case 'j':
s.hexagon_delegate = optarg;
break;
case 'l':
s.labels_file_name = optarg;
break;
case 'm':
s.model_name = optarg;
break;
case 'p':
s.profiling =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'r':
s.number_of_results =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 's':
s.input_std = strtod(optarg, nullptr);
break;
case 't':
s.number_of_threads = strtol( // NOLINT(runtime/deprecated_fn)
optarg, nullptr, 10);
break;
case 'v':
s.verbose =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'w':
s.number_of_warmup_runs =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'x':
s.xnnpack_delegate =
strtol(optarg, nullptr, 10); // NOLINT(runtime/deprecated_fn)
break;
case 'h':
case '?':
/* getopt_long already printed an error message. */
display_usage();
exit(-1);
default:
exit(-1);
}
}
//delegate_providers.MergeSettingsIntoParams(s);
RunInference(&s);
return 0;
}
} // namespace label_image
} // namespace tflite
int main(int argc, char** argv) {
return tflite::label_image::Main(argc, argv);
}
cmake CMakeLists.txt
make
生成的label_image可执行文件使我们需要的。
需要传到开发板上的数据(共4个文件),目录结构如下:
.
├── grace_hopper.bmp
├── label_image
├── labels_mobilenet_quant_v1_224.txt
└── model
└── mobilenet_v2_1.0_224_quant.tflite
grace_hopper.bmp:从test/label_image/testdata目录下获取。测试图片
label_image:是上面我们编译出来的可执行文件。
labels_mobilenet_quant_v1_224.txt:获取网址:
https://tensorflow.google.cn/lite/examples/image_classification/overview?hl=zh-cn
里面有个下载新手图像分类及变迁,只取里面的标签文件即可。
mobilenet_v2_1.0_224_quant.tflite:获取网址:
https://tensorflow.google.cn/lite/guide/hosted_models
登录开发板,切换到传过来的项目目录里,执行下面第一行命令(最后 0.717647: 653 military uniform概率最大,预测效果还可以):
./label_image -v 1 -m ./model/mobilenet_v2_1.0_224_quant.tflite -i ./grace_hopper.bmp -l ./labels_mobilenet_quant_v1_224.txt -t 1
INFO: Loaded model ./model/mobilenet_v2_1.0_224_quant.tflite
INFO: tensors size: 173
INFO: nodes size: 65
INFO: inputs: 1
INFO: input(0) name: input
INFO: 0: MobilenetV2/Conv/Conv2D_Fold_bias, 128, 2, 0.000265382, 0
INFO: 1: MobilenetV2/Conv/Relu6, 401408, 3, 0.0235285, 0
INFO: 2: MobilenetV2/Conv/weights_quant/FakeQuantWithMinMaxVars, 864, 3, 0.0339689, 122
INFO: 3: MobilenetV2/Conv_1/Conv2D_Fold_bias, 5120, 2, 0.000604263, 0
INFO: 4: MobilenetV2/Conv_1/Relu6, 62720, 3, 0.0235285, 0
INFO: 5: MobilenetV2/Conv_1/weights_quant/FakeQuantWithMinMaxVars, 409600, 3, 0.00516707, 125
INFO: 6: MobilenetV2/Logits/AvgPool, 1280, 3, 0.0235285, 0
INFO: 7: MobilenetV2/Logits/Conv2d_1c_1x1/BiasAdd, 1001, 3, 0.0988925, 58
INFO: 8: MobilenetV2/Logits/Conv2d_1c_1x1/Conv2D_bias, 4004, 2, 3.97886e-05, 0
INFO: 9: MobilenetV2/Logits/Conv2d_1c_1x1/weights_quant/FakeQuantWithMinMaxVars, 1281280, 3, 0.00169108, 113
INFO: 10: MobilenetV2/Logits/Squeeze_shape, 8, 2, 0, 0
INFO: 11: MobilenetV2/expanded_conv/depthwise/Relu6, 401408, 3, 0.0235285, 0
INFO: 12: MobilenetV2/expanded_conv/depthwise/depthwise_Fold_bias, 128, 2, 0.00808663, 0
INFO: 13: MobilenetV2/expanded_conv/depthwise/weights_quant/FakeQuantWithMinMaxVars, 288, 3, 0.343696, 165
INFO: 14: MobilenetV2/expanded_conv/project/Conv2D_Fold_bias, 64, 2, 0.0008793, 0
INFO: 15: MobilenetV2/expanded_conv/project/add_fold, 200704, 3, 0.354413, 129
INFO: 16: MobilenetV2/expanded_conv/project/weights_quant/FakeQuantWithMinMaxVars, 512, 3, 0.0373718, 140
INFO: 17: MobilenetV2/expanded_conv_1/depthwise/Relu6, 301056, 3, 0.0235285, 0
INFO: 18: MobilenetV2/expanded_conv_1/depthwise/depthwise_Fold_bias, 384, 2, 0.000493372, 0
INFO: 19: MobilenetV2/expanded_conv_1/depthwise/weights_quant/FakeQuantWithMinMaxVars, 864, 3, 0.0209691, 109
INFO: 20: MobilenetV2/expanded_conv_1/expand/Conv2D_Fold_bias, 384, 2, 0.00345855, 0
INFO: 21: MobilenetV2/expanded_conv_1/expand/Relu6, 1204224, 3, 0.0235285, 0
INFO: 22: MobilenetV2/expanded_conv_1/expand/weights_quant/FakeQuantWithMinMaxVars, 1536, 3, 0.00975851, 127
INFO: 23: MobilenetV2/expanded_conv_1/project/Conv2D_Fold_bias, 96, 2, 0.000530238, 0
INFO: 24: MobilenetV2/expanded_conv_1/project/add_fold, 75264, 3, 0.275834, 119
INFO: 25: MobilenetV2/expanded_conv_1/project/weights_quant/FakeQuantWithMinMaxVars, 2304, 3, 0.022536, 156
INFO: 26: MobilenetV2/expanded_conv_10/depthwise/Relu6, 75264, 3, 0.0235285, 0
INFO: 27: MobilenetV2/expanded_conv_10/depthwise/depthwise_Fold_bias, 1536, 2, 0.00073192, 0
INFO: 28: MobilenetV2/expanded_conv_10/depthwise/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0311078, 143
INFO: 29: MobilenetV2/expanded_conv_10/expand/Conv2D_Fold_bias, 1536, 2, 0.00035866, 0
INFO: 30: MobilenetV2/expanded_conv_10/expand/Relu6, 75264, 3, 0.0235285, 0
INFO: 31: MobilenetV2/expanded_conv_10/expand/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.00162825, 131
INFO: 32: MobilenetV2/expanded_conv_10/project/Conv2D_Fold_bias, 384, 2, 0.000174965, 0
INFO: 33: MobilenetV2/expanded_conv_10/project/add_fold, 18816, 3, 0.170611, 129
INFO: 34: MobilenetV2/expanded_conv_10/project/weights_quant/FakeQuantWithMinMaxVars, 36864, 3, 0.00743631, 129
INFO: 35: MobilenetV2/expanded_conv_11/add, 18816, 3, 0.176158, 127
INFO: 36: MobilenetV2/expanded_conv_11/depthwise/Relu6, 112896, 3, 0.0235285, 0
INFO: 37: MobilenetV2/expanded_conv_11/depthwise/depthwise_Fold_bias, 2304, 2, 0.00166601, 0
INFO: 38: MobilenetV2/expanded_conv_11/depthwise/weights_quant/FakeQuantWithMinMaxVars, 5184, 3, 0.0708081, 66
INFO: 39: MobilenetV2/expanded_conv_11/expand/Conv2D_Fold_bias, 2304, 2, 0.000278264, 0
INFO: 40: MobilenetV2/expanded_conv_11/expand/Relu6, 112896, 3, 0.0235285, 0
INFO: 41: MobilenetV2/expanded_conv_11/expand/weights_quant/FakeQuantWithMinMaxVars, 55296, 3, 0.00163099, 134
INFO: 42: MobilenetV2/expanded_conv_11/project/Conv2D_Fold_bias, 384, 2, 0.000197221, 0
INFO: 43: MobilenetV2/expanded_conv_11/project/add_fold, 18816, 3, 0.123328, 127
INFO: 44: MobilenetV2/expanded_conv_11/project/weights_quant/FakeQuantWithMinMaxVars, 55296, 3, 0.00838223, 136
INFO: 45: MobilenetV2/expanded_conv_12/add, 18816, 3, 0.233401, 126
INFO: 46: MobilenetV2/expanded_conv_12/depthwise/Relu6, 112896, 3, 0.0235285, 0
INFO: 47: MobilenetV2/expanded_conv_12/depthwise/depthwise_Fold_bias, 2304, 2, 0.00175259, 0
INFO: 48: MobilenetV2/expanded_conv_12/depthwise/weights_quant/FakeQuantWithMinMaxVars, 5184, 3, 0.0744879, 159
INFO: 49: MobilenetV2/expanded_conv_12/expand/Conv2D_Fold_bias, 2304, 2, 0.000321643, 0
INFO: 50: MobilenetV2/expanded_conv_12/expand/Relu6, 112896, 3, 0.0235285, 0
INFO: 51: MobilenetV2/expanded_conv_12/expand/weights_quant/FakeQuantWithMinMaxVars, 55296, 3, 0.00182588, 138
INFO: 52: MobilenetV2/expanded_conv_12/project/Conv2D_Fold_bias, 384, 2, 0.000564274, 0
INFO: 53: MobilenetV2/expanded_conv_12/project/add_fold, 18816, 3, 0.186196, 127
INFO: 54: MobilenetV2/expanded_conv_12/project/weights_quant/FakeQuantWithMinMaxVars, 55296, 3, 0.0239826, 154
INFO: 55: MobilenetV2/expanded_conv_13/depthwise/Relu6, 28224, 3, 0.0235285, 0
INFO: 56: MobilenetV2/expanded_conv_13/depthwise/depthwise_Fold_bias, 2304, 2, 0.000358996, 0
INFO: 57: MobilenetV2/expanded_conv_13/depthwise/weights_quant/FakeQuantWithMinMaxVars, 5184, 3, 0.0152579, 92
INFO: 58: MobilenetV2/expanded_conv_13/expand/Conv2D_Fold_bias, 2304, 2, 0.000322747, 0
INFO: 59: MobilenetV2/expanded_conv_13/expand/Relu6, 112896, 3, 0.0235285, 0
INFO: 60: MobilenetV2/expanded_conv_13/expand/weights_quant/FakeQuantWithMinMaxVars, 55296, 3, 0.0013828, 123
INFO: 61: MobilenetV2/expanded_conv_13/project/Conv2D_Fold_bias, 640, 2, 0.000222296, 0
INFO: 62: MobilenetV2/expanded_conv_13/project/add_fold, 7840, 3, 0.132378, 132
INFO: 63: MobilenetV2/expanded_conv_13/project/weights_quant/FakeQuantWithMinMaxVars, 92160, 3, 0.00944795, 140
INFO: 64: MobilenetV2/expanded_conv_14/add, 7840, 3, 0.15071, 134
INFO: 65: MobilenetV2/expanded_conv_14/depthwise/Relu6, 47040, 3, 0.0235285, 0
INFO: 66: MobilenetV2/expanded_conv_14/depthwise/depthwise_Fold_bias, 3840, 2, 0.000980373, 0
INFO: 67: MobilenetV2/expanded_conv_14/depthwise/weights_quant/FakeQuantWithMinMaxVars, 8640, 3, 0.0416675, 147
INFO: 68: MobilenetV2/expanded_conv_14/expand/Conv2D_Fold_bias, 3840, 2, 0.000267696, 0
INFO: 69: MobilenetV2/expanded_conv_14/expand/Relu6, 47040, 3, 0.0235285, 0
INFO: 70: MobilenetV2/expanded_conv_14/expand/weights_quant/FakeQuantWithMinMaxVars, 153600, 3, 0.00202221, 135
INFO: 71: MobilenetV2/expanded_conv_14/project/Conv2D_Fold_bias, 640, 2, 0.000185844, 0
INFO: 72: MobilenetV2/expanded_conv_14/project/add_fold, 7840, 3, 0.100457, 129
INFO: 73: MobilenetV2/expanded_conv_14/project/weights_quant/FakeQuantWithMinMaxVars, 153600, 3, 0.0078987, 139
INFO: 74: MobilenetV2/expanded_conv_15/add, 7840, 3, 0.210051, 131
INFO: 75: MobilenetV2/expanded_conv_15/depthwise/Relu6, 47040, 3, 0.0235285, 0
INFO: 76: MobilenetV2/expanded_conv_15/depthwise/depthwise_Fold_bias, 3840, 2, 0.00100747, 0
INFO: 77: MobilenetV2/expanded_conv_15/depthwise/weights_quant/FakeQuantWithMinMaxVars, 8640, 3, 0.0428194, 102
INFO: 78: MobilenetV2/expanded_conv_15/expand/Conv2D_Fold_bias, 3840, 2, 0.000240298, 0
INFO: 79: MobilenetV2/expanded_conv_15/expand/Relu6, 47040, 3, 0.0235285, 0
INFO: 80: MobilenetV2/expanded_conv_15/expand/weights_quant/FakeQuantWithMinMaxVars, 153600, 3, 0.00159444, 127
INFO: 81: MobilenetV2/expanded_conv_15/project/Conv2D_Fold_bias, 640, 2, 0.000869944, 0
INFO: 82: MobilenetV2/expanded_conv_15/project/add_fold, 7840, 3, 0.169606, 133
INFO: 83: MobilenetV2/expanded_conv_15/project/weights_quant/FakeQuantWithMinMaxVars, 153600, 3, 0.0369741, 131
INFO: 84: MobilenetV2/expanded_conv_16/depthwise/Relu6, 47040, 3, 0.0235285, 0
INFO: 85: MobilenetV2/expanded_conv_16/depthwise/depthwise_Fold_bias, 3840, 2, 0.00387191, 0
INFO: 86: MobilenetV2/expanded_conv_16/depthwise/weights_quant/FakeQuantWithMinMaxVars, 8640, 3, 0.164563, 201
INFO: 87: MobilenetV2/expanded_conv_16/expand/Conv2D_Fold_bias, 3840, 2, 0.000429939, 0
INFO: 88: MobilenetV2/expanded_conv_16/expand/Relu6, 47040, 3, 0.0235285, 0
INFO: 89: MobilenetV2/expanded_conv_16/expand/weights_quant/FakeQuantWithMinMaxVars, 153600, 3, 0.00204683, 135
INFO: 90: MobilenetV2/expanded_conv_16/project/Conv2D_Fold_bias, 1280, 2, 0.000188446, 0
INFO: 91: MobilenetV2/expanded_conv_16/project/add_fold, 15680, 3, 0.116945, 130
INFO: 92: MobilenetV2/expanded_conv_16/project/weights_quant/FakeQuantWithMinMaxVars, 307200, 3, 0.00800929, 111
INFO: 93: MobilenetV2/expanded_conv_2/add, 75264, 3, 0.432169, 133
INFO: 94: MobilenetV2/expanded_conv_2/depthwise/Relu6, 451584, 3, 0.0235285, 0
INFO: 95: MobilenetV2/expanded_conv_2/depthwise/depthwise_Fold_bias, 576, 2, 0.00399559, 0
INFO: 96: MobilenetV2/expanded_conv_2/depthwise/weights_quant/FakeQuantWithMinMaxVars, 1296, 3, 0.169819, 52
INFO: 97: MobilenetV2/expanded_conv_2/expand/Conv2D_Fold_bias, 576, 2, 0.00100837, 0
INFO: 98: MobilenetV2/expanded_conv_2/expand/Relu6, 451584, 3, 0.0235285, 0
INFO: 99: MobilenetV2/expanded_conv_2/expand/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0036557, 144
INFO: 100: MobilenetV2/expanded_conv_2/project/Conv2D_Fold_bias, 96, 2, 0.000644889, 0
INFO: 101: MobilenetV2/expanded_conv_2/project/add_fold, 75264, 3, 0.401493, 136
INFO: 102: MobilenetV2/expanded_conv_2/project/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0274089, 122
INFO: 103: MobilenetV2/expanded_conv_3/depthwise/Relu6, 112896, 3, 0.0235285, 0
INFO: 104: MobilenetV2/expanded_conv_3/depthwise/depthwise_Fold_bias, 576, 2, 0.000404757, 0
INFO: 105: MobilenetV2/expanded_conv_3/depthwise/weights_quant/FakeQuantWithMinMaxVars, 1296, 3, 0.0172029, 143
INFO: 106: MobilenetV2/expanded_conv_3/expand/Conv2D_Fold_bias, 576, 2, 0.00129602, 0
INFO: 107: MobilenetV2/expanded_conv_3/expand/Relu6, 451584, 3, 0.0235285, 0
INFO: 108: MobilenetV2/expanded_conv_3/expand/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.00299887, 104
INFO: 109: MobilenetV2/expanded_conv_3/project/Conv2D_Fold_bias, 128, 2, 0.00039633, 0
INFO: 110: MobilenetV2/expanded_conv_3/project/add_fold, 25088, 3, 0.218362, 127
INFO: 111: MobilenetV2/expanded_conv_3/project/weights_quant/FakeQuantWithMinMaxVars, 4608, 3, 0.0168447, 111
INFO: 112: MobilenetV2/expanded_conv_4/add, 25088, 3, 0.25969, 130
INFO: 113: MobilenetV2/expanded_conv_4/depthwise/Relu6, 150528, 3, 0.0235285, 0
INFO: 114: MobilenetV2/expanded_conv_4/depthwise/depthwise_Fold_bias, 768, 2, 0.00153525, 0
INFO: 115: MobilenetV2/expanded_conv_4/depthwise/weights_quant/FakeQuantWithMinMaxVars, 1728, 3, 0.0652507, 118
INFO: 116: MobilenetV2/expanded_conv_4/expand/Conv2D_Fold_bias, 768, 2, 0.000420222, 0
INFO: 117: MobilenetV2/expanded_conv_4/expand/Relu6, 150528, 3, 0.0235285, 0
INFO: 118: MobilenetV2/expanded_conv_4/expand/weights_quant/FakeQuantWithMinMaxVars, 6144, 3, 0.00192442, 128
INFO: 119: MobilenetV2/expanded_conv_4/project/Conv2D_Fold_bias, 128, 2, 0.000448521, 0
INFO: 120: MobilenetV2/expanded_conv_4/project/add_fold, 25088, 3, 0.227942, 121
INFO: 121: MobilenetV2/expanded_conv_4/project/weights_quant/FakeQuantWithMinMaxVars, 6144, 3, 0.0190629, 146
INFO: 122: MobilenetV2/expanded_conv_5/add, 25088, 3, 0.331715, 124
INFO: 123: MobilenetV2/expanded_conv_5/depthwise/Relu6, 150528, 3, 0.0235285, 0
INFO: 124: MobilenetV2/expanded_conv_5/depthwise/depthwise_Fold_bias, 768, 2, 0.00186105, 0
INFO: 125: MobilenetV2/expanded_conv_5/depthwise/weights_quant/FakeQuantWithMinMaxVars, 1728, 3, 0.0790978, 95
INFO: 126: MobilenetV2/expanded_conv_5/expand/Conv2D_Fold_bias, 768, 2, 0.000354455, 0
INFO: 127: MobilenetV2/expanded_conv_5/expand/Relu6, 150528, 3, 0.0235285, 0
INFO: 128: MobilenetV2/expanded_conv_5/expand/weights_quant/FakeQuantWithMinMaxVars, 6144, 3, 0.00136492, 135
INFO: 129: MobilenetV2/expanded_conv_5/project/Conv2D_Fold_bias, 128, 2, 0.000430409, 0
INFO: 130: MobilenetV2/expanded_conv_5/project/add_fold, 25088, 3, 0.257749, 124
INFO: 131: MobilenetV2/expanded_conv_5/project/weights_quant/FakeQuantWithMinMaxVars, 6144, 3, 0.0182931, 128
INFO: 132: MobilenetV2/expanded_conv_6/depthwise/Relu6, 37632, 3, 0.0235285, 0
INFO: 133: MobilenetV2/expanded_conv_6/depthwise/depthwise_Fold_bias, 768, 2, 0.000237353, 0
INFO: 134: MobilenetV2/expanded_conv_6/depthwise/weights_quant/FakeQuantWithMinMaxVars, 1728, 3, 0.0100879, 127
INFO: 135: MobilenetV2/expanded_conv_6/expand/Conv2D_Fold_bias, 768, 2, 0.000635912, 0
INFO: 136: MobilenetV2/expanded_conv_6/expand/Relu6, 150528, 3, 0.0235285, 0
INFO: 137: MobilenetV2/expanded_conv_6/expand/weights_quant/FakeQuantWithMinMaxVars, 6144, 3, 0.00191704, 127
INFO: 138: MobilenetV2/expanded_conv_6/project/Conv2D_Fold_bias, 256, 2, 0.000343546, 0
INFO: 139: MobilenetV2/expanded_conv_6/project/add_fold, 12544, 3, 0.185405, 126
INFO: 140: MobilenetV2/expanded_conv_6/project/weights_quant/FakeQuantWithMinMaxVars, 12288, 3, 0.0146013, 147
INFO: 141: MobilenetV2/expanded_conv_7/add, 12544, 3, 0.18911, 122
INFO: 142: MobilenetV2/expanded_conv_7/depthwise/Relu6, 75264, 3, 0.0235285, 0
INFO: 143: MobilenetV2/expanded_conv_7/depthwise/depthwise_Fold_bias, 1536, 2, 0.00143352, 0
INFO: 144: MobilenetV2/expanded_conv_7/depthwise/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0609271, 110
INFO: 145: MobilenetV2/expanded_conv_7/expand/Conv2D_Fold_bias, 1536, 2, 0.0002881, 0
INFO: 146: MobilenetV2/expanded_conv_7/expand/Relu6, 75264, 3, 0.0235285, 0
INFO: 147: MobilenetV2/expanded_conv_7/expand/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.00155389, 125
INFO: 148: MobilenetV2/expanded_conv_7/project/Conv2D_Fold_bias, 256, 2, 0.000394877, 0
INFO: 149: MobilenetV2/expanded_conv_7/project/add_fold, 12544, 3, 0.172635, 109
INFO: 150: MobilenetV2/expanded_conv_7/project/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.0167829, 124
INFO: 151: MobilenetV2/expanded_conv_8/add, 12544, 3, 0.199681, 124
INFO: 152: MobilenetV2/expanded_conv_8/depthwise/Relu6, 75264, 3, 0.0235285, 0
INFO: 153: MobilenetV2/expanded_conv_8/depthwise/depthwise_Fold_bias, 1536, 2, 0.00123308, 0
INFO: 154: MobilenetV2/expanded_conv_8/depthwise/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0524078, 133
INFO: 155: MobilenetV2/expanded_conv_8/expand/Conv2D_Fold_bias, 1536, 2, 0.000278048, 0
INFO: 156: MobilenetV2/expanded_conv_8/expand/Relu6, 75264, 3, 0.0235285, 0
INFO: 157: MobilenetV2/expanded_conv_8/expand/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.0014703, 134
INFO: 158: MobilenetV2/expanded_conv_8/project/Conv2D_Fold_bias, 256, 2, 0.000303476, 0
INFO: 159: MobilenetV2/expanded_conv_8/project/add_fold, 12544, 3, 0.147155, 123
INFO: 160: MobilenetV2/expanded_conv_8/project/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.0128983, 125
INFO: 161: MobilenetV2/expanded_conv_9/add, 12544, 3, 0.220273, 120
INFO: 162: MobilenetV2/expanded_conv_9/depthwise/Relu6, 75264, 3, 0.0235285, 0
INFO: 163: MobilenetV2/expanded_conv_9/depthwise/depthwise_Fold_bias, 1536, 2, 0.000959465, 0
INFO: 164: MobilenetV2/expanded_conv_9/depthwise/weights_quant/FakeQuantWithMinMaxVars, 3456, 3, 0.0407789, 155
INFO: 165: MobilenetV2/expanded_conv_9/expand/Conv2D_Fold_bias, 1536, 2, 0.000274232, 0
INFO: 166: MobilenetV2/expanded_conv_9/expand/Relu6, 75264, 3, 0.0235285, 0
INFO: 167: MobilenetV2/expanded_conv_9/expand/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.00137335, 127
INFO: 168: MobilenetV2/expanded_conv_9/project/Conv2D_Fold_bias, 256, 2, 0.000460252, 0
INFO: 169: MobilenetV2/expanded_conv_9/project/add_fold, 12544, 3, 0.156276, 122
INFO: 170: MobilenetV2/expanded_conv_9/project/weights_quant/FakeQuantWithMinMaxVars, 24576, 3, 0.0195615, 144
INFO: 171: input, 150528, 3, 0.0078125, 128
INFO: 172: output, 1001, 3, 0.0988925, 58
INFO: load picture
INFO: load picture
INFO: len: 940650
INFO: width, height, channels: 517, 606, 3
INFO: input: 171
INFO: number of inputs: 1
INFO: number of outputs: 1
Interpreter has 1 subgraphs.
-----------Subgraph-0 has 174 tensors and 65 nodes------------
1 Inputs: [171] -> 150528B (0.14MB)
1 Outputs: [172] -> 1001B (0.00MB)
Tensor ID Name Type AllocType Size (Bytes/MB) Shape MemAddr-Offset
Tensor 0 MobilenetV2/Conv/Conv2... kTfLiteInt32 kTfLiteMmapRo 128 / 0.00 [32] [3477068, 3477196)
Tensor 1 MobilenetV2/Conv/Relu6 kTfLiteUInt8 kTfLiteArenaRw 401408 / 0.38 [1,112,112,32] [150528, 551936)
Tensor 2 MobilenetV2/Conv/weigh... kTfLiteUInt8 kTfLiteMmapRo 864 / 0.00 [32,3,3,3] [3476180, 3477044)
Tensor 3 MobilenetV2/Conv_1/Con... kTfLiteInt32 kTfLiteMmapRo 5120 / 0.00 [1280] [3535656, 3540776)
Tensor 4 MobilenetV2/Conv_1/Relu6 kTfLiteUInt8 kTfLiteArenaRw 62720 / 0.06 [1,7,7,1280] [150528, 213248)
Tensor 5 MobilenetV2/Conv_1/wei... kTfLiteUInt8 kTfLiteMmapRo 409600 / 0.39 [1280,1,1,320] [2696436, 3106036)
Tensor 6 MobilenetV2/Logits/Avg... kTfLiteUInt8 kTfLiteArenaRw 1280 / 0.00 [1,1,1,1280] [213248, 214528)
Tensor 7 MobilenetV2/Logits/Con... kTfLiteUInt8 kTfLiteArenaRw 1001 / 0.00 [1,1,1,1001] [150528, 151529)
Tensor 8 MobilenetV2/Logits/Con... kTfLiteInt32 kTfLiteMmapRo 4004 / 0.00 [1001] [3540788, 3544792)
Tensor 9 MobilenetV2/Logits/Con... kTfLiteUInt8 kTfLiteMmapRo 1281280 / 1.22 [1001,1,1,1280] [1275816, 2557096)
Tensor 10 MobilenetV2/Logits/Squ... kTfLiteInt32 kTfLiteMmapRo 8 / 0.00 [2] [3544804, 3544812)
Tensor 11 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 401408 / 0.38 [1,112,112,32] [551936, 953344)
Tensor 12 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 128 / 0.00 [32] [3477208, 3477336)
Tensor 13 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 288 / 0.00 [1,3,3,32] [1274980, 1275268)
Tensor 14 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 64 / 0.00 [16] [3477348, 3477412)
Tensor 15 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 200704 / 0.19 [1,112,112,16] [1354752, 1555456)
Tensor 16 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 512 / 0.00 [16,1,1,32] [1275292, 1275804)
Tensor 17 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 301056 / 0.29 [1,56,56,96] [1354752, 1655808)
Tensor 18 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 384 / 0.00 [96] [3477424, 3477808)
Tensor 19 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 864 / 0.00 [1,3,3,96] [831076, 831940)
Tensor 20 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 384 / 0.00 [96] [3475784, 3476168)
Tensor 21 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 1204224 / 1.15 [1,112,112,96] [150528, 1354752)
Tensor 22 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1536 / 0.00 [96,1,1,16] [1273432, 1274968)
Tensor 23 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 96 / 0.00 [24] [3477820, 3477916)
Tensor 24 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,56,56,24] [1053696, 1128960)
Tensor 25 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 2304 / 0.00 [24,1,1,96] [3418160, 3420464)
Tensor 26 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [225792, 301056)
Tensor 27 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [2602780, 2604316)
Tensor 28 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [1,3,3,384] [2628920, 2632376)
Tensor 29 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3492664, 3494200)
Tensor 30 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [150528, 225792)
Tensor 31 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [384,1,1,64] [1170072, 1194648)
Tensor 32 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 384 / 0.00 [96] [3494212, 3494596)
Tensor 33 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 18816 / 0.02 [1,14,14,96] [376320, 395136)
Tensor 34 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 36864 / 0.04 [96,1,1,384] [1236556, 1273420)
Tensor 35 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 18816 / 0.02 [1,14,14,96] [395136, 413952)
Tensor 36 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,14,14,576] [263424, 376320)
Tensor 37 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3496924, 3499228)
Tensor 38 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 5184 / 0.00 [1,3,3,576] [2660484, 2665668)
Tensor 39 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3494608, 3496912)
Tensor 40 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,14,14,576] [150528, 263424)
Tensor 41 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 55296 / 0.05 [576,1,1,96] [831952, 887248)
Tensor 42 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 384 / 0.00 [96] [3499240, 3499624)
Tensor 43 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 18816 / 0.02 [1,14,14,96] [150528, 169344)
Tensor 44 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 55296 / 0.05 [96,1,1,576] [3420476, 3475772)
Tensor 45 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 18816 / 0.02 [1,14,14,96] [263424, 282240)
Tensor 46 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,14,14,576] [263424, 376320)
Tensor 47 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3501952, 3504256)
Tensor 48 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 5184 / 0.00 [1,3,3,576] [887260, 892444)
Tensor 49 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3499636, 3501940)
Tensor 50 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,14,14,576] [150528, 263424)
Tensor 51 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 55296 / 0.05 [576,1,1,96] [1114764, 1170060)
Tensor 52 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 384 / 0.00 [96] [3504268, 3504652)
Tensor 53 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 18816 / 0.02 [1,14,14,96] [376320, 395136)
Tensor 54 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 55296 / 0.05 [96,1,1,576] [775768, 831064)
Tensor 55 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 28224 / 0.03 [1,7,7,576] [263424, 291648)
Tensor 56 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3506980, 3509284)
Tensor 57 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 5184 / 0.00 [1,3,3,576] [715260, 720444)
Tensor 58 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 2304 / 0.00 [576] [3504664, 3506968)
Tensor 59 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,14,14,576] [150528, 263424)
Tensor 60 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 55296 / 0.05 [576,1,1,96] [720460, 775756)
Tensor 61 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 640 / 0.00 [160] [3509296, 3509936)
Tensor 62 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 7840 / 0.01 [1,7,7,160] [244608, 252448)
Tensor 63 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 92160 / 0.09 [160,1,1,576] [623088, 715248)
Tensor 64 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 7840 / 0.01 [1,7,7,160] [252480, 260320)
Tensor 65 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [197568, 244608)
Tensor 66 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3513800, 3517640)
Tensor 67 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 8640 / 0.01 [1,3,3,960] [1227904, 1236544)
Tensor 68 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3509948, 3513788)
Tensor 69 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [150528, 197568)
Tensor 70 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 153600 / 0.15 [960,1,1,160] [469476, 623076)
Tensor 71 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 640 / 0.00 [160] [3517652, 3518292)
Tensor 72 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 7840 / 0.01 [1,7,7,160] [150528, 158368)
Tensor 73 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 153600 / 0.15 [160,1,1,960] [892456, 1046056)
Tensor 74 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 7840 / 0.01 [1,7,7,160] [197568, 205408)
Tensor 75 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [197568, 244608)
Tensor 76 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3522156, 3525996)
Tensor 77 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 8640 / 0.01 [1,3,3,960] [2557116, 2565756)
Tensor 78 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3518304, 3522144)
Tensor 79 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [150528, 197568)
Tensor 80 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 153600 / 0.15 [960,1,1,160] [3264524, 3418124)
Tensor 81 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 640 / 0.00 [160] [3526008, 3526648)
Tensor 82 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 7840 / 0.01 [1,7,7,160] [244608, 252448)
Tensor 83 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 153600 / 0.15 [160,1,1,960] [3110124, 3263724)
Tensor 84 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [197568, 244608)
Tensor 85 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3530512, 3534352)
Tensor 86 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 8640 / 0.01 [1,3,3,960] [307212, 315852)
Tensor 87 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 3840 / 0.00 [960] [3526660, 3530500)
Tensor 88 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 47040 / 0.04 [1,7,7,960] [150528, 197568)
Tensor 89 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 153600 / 0.15 [960,1,1,160] [315864, 469464)
Tensor 90 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1280 / 0.00 [320] [3534364, 3535644)
Tensor 91 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 15680 / 0.01 [1,7,7,320] [244608, 260288)
Tensor 92 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 307200 / 0.29 [320,1,1,960] [0, 307200)
Tensor 93 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,56,56,24] [602112, 677376)
Tensor 94 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 451584 / 0.43 [1,56,56,144] [602112, 1053696)
Tensor 95 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 576 / 0.00 [144] [3478516, 3479092)
Tensor 96 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1296 / 0.00 [1,3,3,144] [1223128, 1224424)
Tensor 97 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 576 / 0.00 [144] [3477928, 3478504)
Tensor 98 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 451584 / 0.43 [1,56,56,144] [150528, 602112)
Tensor 99 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [144,1,1,24] [1224436, 1227892)
Tensor 100 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 96 / 0.00 [24] [3479104, 3479200)
Tensor 101 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,56,56,24] [150528, 225792)
Tensor 102 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [24,1,1,144] [1219660, 1223116)
Tensor 103 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 112896 / 0.11 [1,28,28,144] [602112, 715008)
Tensor 104 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 576 / 0.00 [144] [3479212, 3479788)
Tensor 105 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1296 / 0.00 [1,3,3,144] [1218352, 1219648)
Tensor 106 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 576 / 0.00 [144] [3106060, 3106636)
Tensor 107 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 451584 / 0.43 [1,56,56,144] [150528, 602112)
Tensor 108 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [144,1,1,24] [1214880, 1218336)
Tensor 109 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 128 / 0.00 [32] [3479800, 3479928)
Tensor 110 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 25088 / 0.02 [1,28,28,32] [451584, 476672)
Tensor 111 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 4608 / 0.00 [32,1,1,144] [1210260, 1214868)
Tensor 112 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 25088 / 0.02 [1,28,28,32] [476672, 501760)
Tensor 113 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,28,28,192] [301056, 451584)
Tensor 114 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3480720, 3481488)
Tensor 115 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1728 / 0.00 [1,3,3,192] [1202364, 1204092)
Tensor 116 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3479940, 3480708)
Tensor 117 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,28,28,192] [150528, 301056)
Tensor 118 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 6144 / 0.01 [192,1,1,32] [1204104, 1210248)
Tensor 119 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 128 / 0.00 [32] [3481500, 3481628)
Tensor 120 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 25088 / 0.02 [1,28,28,32] [150528, 175616)
Tensor 121 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 6144 / 0.01 [32,1,1,192] [2665684, 2671828)
Tensor 122 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 25088 / 0.02 [1,28,28,32] [301056, 326144)
Tensor 123 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,28,28,192] [301056, 451584)
Tensor 124 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3481640, 3482408)
Tensor 125 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1728 / 0.00 [1,3,3,192] [1113024, 1114752)
Tensor 126 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3263736, 3264504)
Tensor 127 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,28,28,192] [150528, 301056)
Tensor 128 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 6144 / 0.01 [192,1,1,32] [1196208, 1202352)
Tensor 129 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 128 / 0.00 [32] [3482420, 3482548)
Tensor 130 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 25088 / 0.02 [1,28,28,32] [451584, 476672)
Tensor 131 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 6144 / 0.01 [32,1,1,192] [2572016, 2578160)
Tensor 132 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 37632 / 0.04 [1,14,14,192] [301056, 338688)
Tensor 133 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3483340, 3484108)
Tensor 134 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 1728 / 0.00 [1,3,3,192] [1098980, 1100708)
Tensor 135 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 768 / 0.00 [192] [3482560, 3483328)
Tensor 136 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,28,28,192] [150528, 301056)
Tensor 137 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 6144 / 0.01 [192,1,1,32] [2565836, 2571980)
Tensor 138 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 256 / 0.00 [64] [1098712, 1098968)
Tensor 139 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [338688, 351232)
Tensor 140 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 12288 / 0.01 [64,1,1,192] [1100720, 1113008)
Tensor 141 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [313600, 326144)
Tensor 142 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [225792, 301056)
Tensor 143 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [1194660, 1196196)
Tensor 144 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [1,3,3,384] [1095244, 1098700)
Tensor 145 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3484120, 3485656)
Tensor 146 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [150528, 225792)
Tensor 147 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [384,1,1,64] [2671844, 2696420)
Tensor 148 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 256 / 0.00 [64] [3485668, 3485924)
Tensor 149 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [301056, 313600)
Tensor 150 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [64,1,1,384] [1070656, 1095232)
Tensor 151 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [326144, 338688)
Tensor 152 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [225792, 301056)
Tensor 153 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3487484, 3489020)
Tensor 154 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [1,3,3,384] [3106652, 3110108)
Tensor 155 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3485936, 3487472)
Tensor 156 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [150528, 225792)
Tensor 157 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [384,1,1,64] [1046068, 1070644)
Tensor 158 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 256 / 0.00 [64] [3489032, 3489288)
Tensor 159 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [301056, 313600)
Tensor 160 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [64,1,1,384] [2604328, 2628904)
Tensor 161 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [313600, 326144)
Tensor 162 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [225792, 301056)
Tensor 163 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3490848, 3492384)
Tensor 164 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 3456 / 0.00 [1,3,3,384] [2632392, 2635848)
Tensor 165 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 1536 / 0.00 [384] [3489300, 3490836)
Tensor 166 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 75264 / 0.07 [1,14,14,384] [150528, 225792)
Tensor 167 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [384,1,1,64] [2635868, 2660444)
Tensor 168 MobilenetV2/expanded_c... kTfLiteInt32 kTfLiteMmapRo 256 / 0.00 [64] [3492396, 3492652)
Tensor 169 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteArenaRw 12544 / 0.01 [1,14,14,64] [301056, 313600)
Tensor 170 MobilenetV2/expanded_c... kTfLiteUInt8 kTfLiteMmapRo 24576 / 0.02 [64,1,1,384] [2578188, 2602764)
Tensor 171 input kTfLiteUInt8 kTfLiteArenaRw 150528 / 0.14 [1,224,224,3] [0, 150528)
Tensor 172 output kTfLiteUInt8 kTfLiteArenaRw 1001 / 0.00 [1,1001] [151552, 152553)
Tensor 173 (nil) kTfLiteUInt8 kTfLiteArenaRw 338688 / 0.32 [1,112,112,27] [551936, 890624)
kTfLiteArenaRw Info:
Tensor 21 has the max size 1204224 bytes (1.148 MB).
This memory arena is estimated as[0xf7107440, 0xf6f73040), taking 1655808 bytes (1.579 MB).
One possible set of tensors that have non-overlapping memory spaces with each other, and they take up the whole arena:
Tensor 171 -> 21 -> 17.
kTfLiteArenaRwPersistent Info: not holding any allocation.
kTfLiteMmapRo Info:
Tensor 9 has the max size 1281280 bytes (1.222 MB).
This memory arena is estimated as[0xf7635a04, 0xf72d4318), taking 3544812 bytes (3.381 MB).
One possible set of tensors that have non-overlapping memory spaces with each other, and they take up the whole arena:
Tensor 92 -> 86 -> 89 -> 70 -> 63 -> 57 -> 60 -> 54 -> 19 -> 41 -> 48 -> 73 -> 157 -> 150 -> 144 -> 138 -> 134 -> 140 -> 125 -> 51 -> 31 -> 143 -> 128 -> 115 -> 118 -> 111 -> 108 -> 105 -> 102 -> 96 -> 99 -> 67 -> 34 -> 22 -> 13 -> 16 -> 9 -> 77 -> 137 -> 131 -> 170 -> 27 -> 160 -> 28 -> 164 -> 167 -> 38 -> 121 -> 147 -> 5 -> 106 -> 154 -> 83 -> 126 -> 80 -> 25 -> 44 -> 20 -> 2 -> 0 -> 12 -> 14 -> 18 -> 23 -> 97 -> 95 -> 100 -> 104 -> 109 -> 116 -> 114 -> 119 -> 124 -> 129 -> 135 -> 133 -> 145 -> 148 -> 155 -> 153 -> 158 -> 165 -> 163 -> 168 -> 29 -> 32 -> 39 -> 37 -> 42 -> 49 -> 47 -> 52 -> 58 -> 56 -> 61 -> 68 -> 66 -> 71 -> 78 -> 76 -> 81 -> 87 -> 85 -> 90 -> 3 -> 8 -> 10.
Node 0 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[171,2,0] -> 151520B (0.14MB)
1 Output Tensors:[1] -> 401408B (0.38MB)
1 Temporary Tensors:[173] -> 338688B (0.32MB)
Node 1 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[1,13,12] -> 401824B (0.38MB)
1 Output Tensors:[11] -> 401408B (0.38MB)
Node 2 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[11,16,14] -> 401984B (0.38MB)
1 Output Tensors:[15] -> 200704B (0.19MB)
Node 3 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[15,22,20] -> 202624B (0.19MB)
1 Output Tensors:[21] -> 1204224B (1.15MB)
Node 4 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[21,19,18] -> 1205472B (1.15MB)
1 Output Tensors:[17] -> 301056B (0.29MB)
Node 5 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[17,25,23] -> 303456B (0.29MB)
1 Output Tensors:[24] -> 75264B (0.07MB)
Node 6 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[24,99,97] -> 79296B (0.08MB)
1 Output Tensors:[98] -> 451584B (0.43MB)
Node 7 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[98,96,95] -> 453456B (0.43MB)
1 Output Tensors:[94] -> 451584B (0.43MB)
Node 8 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[94,102,100] -> 455136B (0.43MB)
1 Output Tensors:[101] -> 75264B (0.07MB)
Node 9 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[101,24] -> 150528B (0.14MB)
1 Output Tensors:[93] -> 75264B (0.07MB)
Node 10 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[93,108,106] -> 79296B (0.08MB)
1 Output Tensors:[107] -> 451584B (0.43MB)
Node 11 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[107,105,104] -> 453456B (0.43MB)
1 Output Tensors:[103] -> 112896B (0.11MB)
Node 12 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[103,111,109] -> 117632B (0.11MB)
1 Output Tensors:[110] -> 25088B (0.02MB)
Node 13 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[110,118,116] -> 32000B (0.03MB)
1 Output Tensors:[117] -> 150528B (0.14MB)
Node 14 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[117,115,114] -> 153024B (0.15MB)
1 Output Tensors:[113] -> 150528B (0.14MB)
Node 15 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[113,121,119] -> 156800B (0.15MB)
1 Output Tensors:[120] -> 25088B (0.02MB)
Node 16 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[120,110] -> 50176B (0.05MB)
1 Output Tensors:[112] -> 25088B (0.02MB)
Node 17 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[112,128,126] -> 32000B (0.03MB)
1 Output Tensors:[127] -> 150528B (0.14MB)
Node 18 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[127,125,124] -> 153024B (0.15MB)
1 Output Tensors:[123] -> 150528B (0.14MB)
Node 19 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[123,131,129] -> 156800B (0.15MB)
1 Output Tensors:[130] -> 25088B (0.02MB)
Node 20 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[130,112] -> 50176B (0.05MB)
1 Output Tensors:[122] -> 25088B (0.02MB)
Node 21 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[122,137,135] -> 32000B (0.03MB)
1 Output Tensors:[136] -> 150528B (0.14MB)
Node 22 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[136,134,133] -> 153024B (0.15MB)
1 Output Tensors:[132] -> 37632B (0.04MB)
Node 23 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[132,140,138] -> 50176B (0.05MB)
1 Output Tensors:[139] -> 12544B (0.01MB)
Node 24 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[139,147,145] -> 38656B (0.04MB)
1 Output Tensors:[146] -> 75264B (0.07MB)
Node 25 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[146,144,143] -> 80256B (0.08MB)
1 Output Tensors:[142] -> 75264B (0.07MB)
Node 26 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[142,150,148] -> 100096B (0.10MB)
1 Output Tensors:[149] -> 12544B (0.01MB)
Node 27 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[149,139] -> 25088B (0.02MB)
1 Output Tensors:[141] -> 12544B (0.01MB)
Node 28 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[141,157,155] -> 38656B (0.04MB)
1 Output Tensors:[156] -> 75264B (0.07MB)
Node 29 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[156,154,153] -> 80256B (0.08MB)
1 Output Tensors:[152] -> 75264B (0.07MB)
Node 30 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[152,160,158] -> 100096B (0.10MB)
1 Output Tensors:[159] -> 12544B (0.01MB)
Node 31 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[159,141] -> 25088B (0.02MB)
1 Output Tensors:[151] -> 12544B (0.01MB)
Node 32 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[151,167,165] -> 38656B (0.04MB)
1 Output Tensors:[166] -> 75264B (0.07MB)
Node 33 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[166,164,163] -> 80256B (0.08MB)
1 Output Tensors:[162] -> 75264B (0.07MB)
Node 34 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[162,170,168] -> 100096B (0.10MB)
1 Output Tensors:[169] -> 12544B (0.01MB)
Node 35 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[169,151] -> 25088B (0.02MB)
1 Output Tensors:[161] -> 12544B (0.01MB)
Node 36 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[161,31,29] -> 38656B (0.04MB)
1 Output Tensors:[30] -> 75264B (0.07MB)
Node 37 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[30,28,27] -> 80256B (0.08MB)
1 Output Tensors:[26] -> 75264B (0.07MB)
Node 38 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[26,34,32] -> 112512B (0.11MB)
1 Output Tensors:[33] -> 18816B (0.02MB)
Node 39 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[33,41,39] -> 76416B (0.07MB)
1 Output Tensors:[40] -> 112896B (0.11MB)
Node 40 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[40,38,37] -> 120384B (0.11MB)
1 Output Tensors:[36] -> 112896B (0.11MB)
Node 41 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[36,44,42] -> 168576B (0.16MB)
1 Output Tensors:[43] -> 18816B (0.02MB)
Node 42 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[43,33] -> 37632B (0.04MB)
1 Output Tensors:[35] -> 18816B (0.02MB)
Node 43 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[35,51,49] -> 76416B (0.07MB)
1 Output Tensors:[50] -> 112896B (0.11MB)
Node 44 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[50,48,47] -> 120384B (0.11MB)
1 Output Tensors:[46] -> 112896B (0.11MB)
Node 45 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[46,54,52] -> 168576B (0.16MB)
1 Output Tensors:[53] -> 18816B (0.02MB)
Node 46 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[53,35] -> 37632B (0.04MB)
1 Output Tensors:[45] -> 18816B (0.02MB)
Node 47 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[45,60,58] -> 76416B (0.07MB)
1 Output Tensors:[59] -> 112896B (0.11MB)
Node 48 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[59,57,56] -> 120384B (0.11MB)
1 Output Tensors:[55] -> 28224B (0.03MB)
Node 49 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[55,63,61] -> 121024B (0.12MB)
1 Output Tensors:[62] -> 7840B (0.01MB)
Node 50 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[62,70,68] -> 165280B (0.16MB)
1 Output Tensors:[69] -> 47040B (0.04MB)
Node 51 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[69,67,66] -> 59520B (0.06MB)
1 Output Tensors:[65] -> 47040B (0.04MB)
Node 52 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[65,73,71] -> 201280B (0.19MB)
1 Output Tensors:[72] -> 7840B (0.01MB)
Node 53 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[72,62] -> 15680B (0.01MB)
1 Output Tensors:[64] -> 7840B (0.01MB)
Node 54 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[64,80,78] -> 165280B (0.16MB)
1 Output Tensors:[79] -> 47040B (0.04MB)
Node 55 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[79,77,76] -> 59520B (0.06MB)
1 Output Tensors:[75] -> 47040B (0.04MB)
Node 56 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[75,83,81] -> 201280B (0.19MB)
1 Output Tensors:[82] -> 7840B (0.01MB)
Node 57 Operator Builtin Code 0 ADD (not delegated)
2 Input Tensors:[82,64] -> 15680B (0.01MB)
1 Output Tensors:[74] -> 7840B (0.01MB)
Node 58 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[74,89,87] -> 165280B (0.16MB)
1 Output Tensors:[88] -> 47040B (0.04MB)
Node 59 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
3 Input Tensors:[88,86,85] -> 59520B (0.06MB)
1 Output Tensors:[84] -> 47040B (0.04MB)
Node 60 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[84,92,90] -> 355520B (0.34MB)
1 Output Tensors:[91] -> 15680B (0.01MB)
Node 61 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[91,5,3] -> 430400B (0.41MB)
1 Output Tensors:[4] -> 62720B (0.06MB)
Node 62 Operator Builtin Code 1 AVERAGE_POOL_2D (not delegated)
1 Input Tensors:[4] -> 62720B (0.06MB)
1 Output Tensors:[6] -> 1280B (0.00MB)
Node 63 Operator Builtin Code 3 CONV_2D (not delegated)
3 Input Tensors:[6,9,8] -> 1286564B (1.23MB)
1 Output Tensors:[7] -> 1001B (0.00MB)
Node 64 Operator Builtin Code 22 RESHAPE (not delegated)
2 Input Tensors:[7,10] -> 1009B (0.00MB)
1 Output Tensors:[172] -> 1001B (0.00MB)
Execution plan as the list of 65 nodes invoked in-order: [0-64]
--------------Subgraph-0 dump has completed--------------
INFO: wanted width, height, channels: 224, 224, 3
INFO: output_number_of_pixels@@@@@@@@@@@@@@@@@@@:150528
pinput11^^^^^^^^^^^^^^^^^^^^^^^typed_tensor(0):(nil)
typed_tensor(171):0xf6f73040
pinput11 赋值完毕^^^^^^^^^^^^^^^^^^^^^^^INFO: invoked
INFO: average time: 84.954 ms
INFO: 0.717647: 653 military uniform
INFO: 0.564706: 835 suit
INFO: 0.533333: 458 bow tie
INFO: 0.52549: 907 Windsor tie
INFO: 0.513726: 753 racket
参考文章:
[1]:https://blog.csdn.net/m0_59824404/article/details/123328263
[2]: http://blog.cvosrobot.com/?post=460