使用deeplabV3+做语义分割自训练模型关于cropsize需要注意的地方;
python deeplab/train.py \
--logtostderr \
--train_split="train" \
--model_variant="xception_65" \
--atrous_rates=6 \
--atrous_rates=12 \
--atrous_rates=18 \
--output_stride=16 \
--decoder_output_stride=4 \
--train_crop_size=513 \
--train_crop_size=513 \
--train_batch_size=4 \
--dataset="railway" \
--training_number_of_steps=30000 \
--fine_tune_batch_norm=false \
--tf_initial_checkpoint="deeplab/datasets/railway/init_models/deeplabv3_pascal_train_aug/model.ckpt" \
--train_logdir="deeplab/datasets/railway/train" \
--dataset_dir="deeplab/datasets/railway/tfrecord"
其中有两项参数为
--train_crop_size=513 \
--train_crop_size=513 \
此时为输入源图像的随机裁剪,当前如果你的GPU够强大,也可以不裁剪,原图大小输入,如果你的GPU显存不够,可以减小
--train_batch_size=4
最后需要注意的是,这里的cropsize决定了eval.py中的eval_crop_size的大小,若此处为(513,513),eval.py中就不能大于513,否则会报错。
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1321 try:
-> 1322 return fn(*args)
1323 except errors.OpError as e:
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1306 return self._call_tf_sessionrun(
-> 1307 options, feed_dict, fetch_list, target_list, run_metadata)
1308
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1408 self._session, options, feed_dict, fetch_list, target_list,
-> 1409 run_metadata)
1410 else:
InvalidArgumentError: padded_shape[1]=87 is not divisible by block_shape[1]=2
[[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
in
28 image_path = '/home/user07/tensorflow/models/research/deeplab/datasets/railway/image/666.jpg'
29 original_im = Image.open(image_path)
---> 30 resized_im, seg_map = MODEL.run(original_im)
31
32 cv2.imwrite("test.jpg",seg_map)
in run(self, image)
50 batch_seg_map = self.sess.run(
51 self.OUTPUT_TENSOR_NAME,
---> 52 feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
53 seg_map = batch_seg_map[0]
54 return resized_image, seg_map
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
898 try:
899 result = self._run(None, fetches, feed_dict, options_ptr,
--> 900 run_metadata_ptr)
901 if run_metadata:
902 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1133 if final_fetches or final_targets or (handle and feed_dict_tensor):
1134 results = self._do_run(handle, final_targets, final_fetches,
-> 1135 feed_dict_tensor, options, run_metadata)
1136 else:
1137 results = []
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1314 if handle is None:
1315 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1316 run_metadata)
1317 else:
1318 return self._do_call(_prun_fn, handle, feeds, fetches)
~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 except KeyError:
1334 pass
-> 1335 raise type(e)(node_def, op, message)
1336
1337 def _extend_graph(self):
InvalidArgumentError: padded_shape[1]=87 is not divisible by block_shape[1]=2
[[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]
Caused by op 'xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND', defined at:
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in
app.launch_new_instance()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 505, in start
self.io_loop.start()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 148, in start
self.asyncio_loop.run_forever()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py", line 427, in run_forever
self._run_once()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py", line 1440, in _run_once
handle._run()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py", line 690, in
lambda f: self._run_callback(functools.partial(callback, future))
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 781, in inner
self.run()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 742, in run
yielded = self.gen.send(value)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 370, in dispatch_queue
yield self.process_one()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 225, in wrapper
runner = Runner(result, future, yielded)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 708, in __init__
self.run()
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 742, in run
yielded = self.gen.send(value)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 357, in process_one
yield gen.maybe_future(dispatch(*args))
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 267, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 534, in execute_request
user_expressions, allow_stdin,
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2848, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2874, in _run_cell
return runner(coro)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/async_helpers.py", line 67, in _pseudo_sync_runner
coro.send(None)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3049, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3214, in run_ast_nodes
if (yield from self.run_code(code, result)):
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "", line 27, in
MODEL = DeepLabModel(model_path)
File "", line 31, in __init__
tf.import_graph_def(graph_def, name='')
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py", line 432, in new_func
return func(*args, **kwargs)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py", line 442, in import_graph_def
_ProcessNewOps(graph)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py", line 234, in _ProcessNewOps
for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3563, in _add_new_tf_operations
for c_op in c_api_util.new_tf_operations(self)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3563, in
for c_op in c_api_util.new_tf_operations(self)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3450, in _create_op_from_tf_operation
ret = Operation(c_op, self)
File "/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1740, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): padded_shape[1]=87 is not divisible by block_shape[1]=2
[[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]
可以这样理解,eval.py中的cropsize须与train.py中的保持一致(待验证)
已经证明的是,在使用deeplab_demo.py测试单张图像之前,需要将训练后的模型文件导出,这里将命令写到export_model.sh脚本中去:
python deeplab/export_model.py --logtostderr --checkpoint_path="/home/user07/tensorflow/models/research/deeplab/datasets/railway/new_train/model.ckpt-$1" export_path="/home/user07/tensorflow/models/research/deeplab/datasets/railway/export/frozen_inference_graph-$1.pb" --model_variant="xception_65" --atrous_rates=6 --atrous_rates=12 --atrous_rates=18 --output_stride=16 --decoder_output_stride=4 --num_classes=4 --crop_size=961 --crop_size=1281 --inference_scales=1.0
直接运行:
sh export_model.sh 30000
后面的数字为训练目录下的迭代次数
然后会在export目录下生成frozen_inference_graph-30000.pb文件,最后将其压缩成tar.gz文件就可以在deeplab_demo.ipynb中使用了
tar zcvf frozen_inference_graph-30000.tar.gz frozen_inference_graph-30000.pb
修改后的deeplab_demo.ipynb:
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "KFPcBuVFw61h"
},
"source": [
"# DeepLab Demo\n",
"\n",
"This demo will demostrate the steps to run deeplab semantic segmentation model on sample input images."
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"cellView": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
"colab_type": "code",
"id": "kAbdmRmvq0Je"
},
"outputs": [],
"source": [
"#@title Imports\n",
"\n",
"import os\n",
"from io import BytesIO\n",
"import tarfile\n",
"import tempfile\n",
"from six.moves import urllib\n",
"\n",
"from matplotlib import gridspec\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"from PIL import Image\n",
"import cv2\n",
"\n",
"import tensorflow as tf\n",
"os.environ['CUDA_VISIBLE_DEVICES']='2'\n"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"cellView": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
"colab_type": "code",
"id": "vN0kU6NJ1Ye5"
},
"outputs": [],
"source": [
"#@title Helper methods\n",
"\n",
"\n",
"class DeepLabModel(object):\n",
" \"\"\"Class to load deeplab model and run inference.\"\"\"\n",
"\n",
" INPUT_TENSOR_NAME = 'ImageTensor:0'\n",
" OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n",
" INPUT_SIZE = 1280\n",
" FROZEN_GRAPH_NAME = 'frozen_inference_graph'\n",
"\n",
" def __init__(self, tarball_path):\n",
" \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n",
" self.graph = tf.Graph()\n",
"\n",
" graph_def = None\n",
" # Extract frozen graph from tar archive.\n",
" tar_file = tarfile.open(tarball_path)\n",
" for tar_info in tar_file.getmembers():\n",
" if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):\n",
" file_handle = tar_file.extractfile(tar_info)\n",
" graph_def = tf.GraphDef.FromString(file_handle.read())\n",
" break\n",
"\n",
" tar_file.close()\n",
"\n",
" if graph_def is None:\n",
" raise RuntimeError('Cannot find inference graph in tar archive.')\n",
"\n",
" with self.graph.as_default():\n",
" tf.import_graph_def(graph_def, name='')\n",
"\n",
" self.sess = tf.Session(graph=self.graph)\n",
"\n",
" def run(self, image):\n",
" \"\"\"Runs inference on a single image.\n",
"\n",
" Args:\n",
" image: A PIL.Image object, raw input image.\n",
"\n",
" Returns:\n",
" resized_image: RGB image resized from original input image.\n",
" seg_map: Segmentation map of `resized_image`.\n",
" \"\"\"\n",
" width, height = image.size\n",
" resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n",
" target_size = (int(resize_ratio * width), int(resize_ratio * height))\n",
" #target_size = (width, height)\n",
" resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n",
" batch_seg_map = self.sess.run(\n",
" self.OUTPUT_TENSOR_NAME,\n",
" feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n",
" seg_map = batch_seg_map[0]\n",
" return resized_image, seg_map\n",
"\n",
"\n",
"def create_pascal_label_colormap():\n",
" \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n",
"\n",
" Returns:\n",
" A Colormap for visualizing segmentation results.\n",
" \"\"\"\n",
" colormap = np.zeros((256, 3), dtype=int)\n",
" ind = np.arange(256, dtype=int)\n",
"\n",
" for shift in reversed(range(8)):\n",
" for channel in range(3):\n",
" colormap[:, channel] |= ((ind >> channel) & 1) << shift\n",
" ind >>= 3\n",
"\n",
" return colormap\n",
"\n",
"\n",
"def label_to_color_image(label):\n",
" \"\"\"Adds color defined by the dataset colormap to the label.\n",
"\n",
" Args:\n",
" label: A 2D array with integer type, storing the segmentation label.\n",
"\n",
" Returns:\n",
" result: A 2D array with floating type. The element of the array\n",
" is the color indexed by the corresponding element in the input label\n",
" to the PASCAL color map.\n",
"\n",
" Raises:\n",
" ValueError: If label is not of rank 2 or its value is larger than color\n",
" map maximum entry.\n",
" \"\"\"\n",
" if label.ndim != 2:\n",
" raise ValueError('Expect 2-D input label')\n",
"\n",
" colormap = create_pascal_label_colormap()\n",
"\n",
" if np.max(label) >= len(colormap):\n",
" raise ValueError('label value too large.')\n",
"\n",
" return colormap[label]\n",
"\n",
"\n",
"def vis_segmentation(image, seg_map):\n",
" \"\"\"Visualizes input image, segmentation map and overlay view.\"\"\"\n",
" plt.figure(figsize=(15, 5))\n",
" grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n",
"\n",
" plt.subplot(grid_spec[0])\n",
" plt.imshow(image)\n",
" plt.axis('off')\n",
" plt.title('input image')\n",
"\n",
" plt.subplot(grid_spec[1])\n",
" seg_image = label_to_color_image(seg_map).astype(np.uint8)\n",
" plt.imshow(seg_image)\n",
" plt.axis('off')\n",
" plt.title('segmentation map')\n",
"\n",
" plt.subplot(grid_spec[2])\n",
" plt.imshow(image)\n",
" plt.imshow(seg_image, alpha=0.7)\n",
" plt.axis('off')\n",
" plt.title('segmentation overlay')\n",
"\n",
" unique_labels = np.unique(seg_map)\n",
" ax = plt.subplot(grid_spec[3])\n",
" plt.imshow(\n",
" FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n",
" ax.yaxis.tick_right()\n",
" plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n",
" plt.xticks([], [])\n",
" ax.tick_params(width=0.0)\n",
" plt.grid('off')\n",
" plt.show()\n",
"\n",
"\n",
"# LABEL_NAMES = np.asarray([\n",
"# 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n",
"# 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n",
"# 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'\n",
"# ])\n",
"LABEL_NAMES = np.asarray([\n",
" 'background', 'road_day', 'road_night' \n",
"])\n",
"\n",
"FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)\n",
"FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
"colab_type": "code",
"id": "c4oXKmnjw6i_",
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"model loaded successfully!\n"
]
}
],
"source": [
"#@title Select and download models {display-mode: \"form\"}\n",
"\n",
"# MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']\n",
"\n",
"# _DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'\n",
"# _MODEL_URLS = {\n",
"# 'mobilenetv2_coco_voctrainaug':\n",
"# 'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',\n",
"# 'mobilenetv2_coco_voctrainval':\n",
"# 'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',\n",
"# 'xception_coco_voctrainaug':\n",
"# 'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',\n",
"# 'xception_coco_voctrainval':\n",
"# 'deeplabv3_pascal_trainval_2018_01_04.tar.gz',\n",
"# }\n",
"# _TARBALL_NAME = 'deeplab_model.tar.gz'\n",
"\n",
"# model_dir = tempfile.mkdtemp()\n",
"# tf.gfile.MakeDirs(model_dir)\n",
"\n",
"# download_path = os.path.join(model_dir, _TARBALL_NAME)\n",
"# print('downloading model, this might take a while...')\n",
"# urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],\n",
"# download_path)\n",
"# print('download completed! loading DeepLab model...')\n",
"model_path ='/home/user07/tensorflow/models/research/deeplab/datasets/railway/export/frozen_inference_graph-30000.tar.gz'\n",
"MODEL = DeepLabModel(model_path)\n",
"print('model loaded successfully!')"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "SZst78N-4OKO"
},
"source": [
"## Run on sample images\n",
"\n",
"Select one of sample images (leave `IMAGE_URL` empty) or feed any internet image\n",
"url for inference.\n",
"\n",
"Note that we are using single scale inference in the demo for fast computation,\n",
"so the results may slightly differ from the visualizations in\n",
"[README](https://github.com/tensorflow/models/blob/master/research/deeplab/README.md),\n",
"which uses multi-scale and left-right flipped inputs."
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
"colab_type": "code",
"id": "edGukUHXyymr"
},
"outputs": [
{
"ename": "InvalidArgumentError",
"evalue": "padded_shape[1]=87 is not divisible by block_shape[1]=2\n\t [[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device=\"/job:localhost/replica:0/task:0/device:GPU:0\"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]\n\nCaused by op 'xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND', defined at:\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py\", line 427, in run_forever\n self._run_once()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py\", line 1440, in _run_once\n handle._run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 370, in dispatch_queue\n yield self.process_one()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 225, in wrapper\n runner = Runner(result, future, yielded)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 708, in __init__\n self.run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 27, in \n MODEL = DeepLabModel(model_path)\n File \"\", line 31, in __init__\n tf.import_graph_def(graph_def, name='')\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 432, in new_func\n return func(*args, **kwargs)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3563, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3563, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3450, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1740, in __init__\n self._traceback = self._graph._extract_stack() # pylint: disable=protected-access\n\nInvalidArgumentError (see above for traceback): padded_shape[1]=87 is not divisible by block_shape[1]=2\n\t [[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device=\"/job:localhost/replica:0/task:0/device:GPU:0\"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1321\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1322\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1323\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1306\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1307\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1308\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1408\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1409\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1410\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m: padded_shape[1]=87 is not divisible by block_shape[1]=2\n\t [[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device=\"/job:localhost/replica:0/task:0/device:GPU:0\"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0mimage_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'/home/user07/tensorflow/models/research/deeplab/datasets/railway/image/666.jpg'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0moriginal_im\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m \u001b[0mresized_im\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mseg_map\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mMODEL\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moriginal_im\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"test.jpg\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseg_map\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, image)\u001b[0m\n\u001b[1;32m 50\u001b[0m batch_seg_map = self.sess.run(\n\u001b[1;32m 51\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOUTPUT_TENSOR_NAME\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 52\u001b[0;31m feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n\u001b[0m\u001b[1;32m 53\u001b[0m \u001b[0mseg_map\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_seg_map\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresized_image\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mseg_map\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 898\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 899\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 900\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 901\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 902\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1133\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1134\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1135\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1136\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1137\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1314\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1315\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1316\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1317\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1318\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1334\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1335\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1336\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1337\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mInvalidArgumentError\u001b[0m: padded_shape[1]=87 is not divisible by block_shape[1]=2\n\t [[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device=\"/job:localhost/replica:0/task:0/device:GPU:0\"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]\n\nCaused by op 'xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND', defined at:\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n self.io_loop.start()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 148, in start\n self.asyncio_loop.run_forever()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py\", line 427, in run_forever\n self._run_once()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/base_events.py\", line 1440, in _run_once\n handle._run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 781, in inner\n self.run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 370, in dispatch_queue\n yield self.process_one()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 225, in wrapper\n runner = Runner(result, future, yielded)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 708, in __init__\n self.run()\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 742, in run\n yielded = self.gen.send(value)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 357, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 267, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 534, in execute_request\n user_expressions, allow_stdin,\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2848, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2874, in _run_cell\n return runner(coro)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 67, in _pseudo_sync_runner\n coro.send(None)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3049, in run_cell_async\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3214, in run_ast_nodes\n if (yield from self.run_code(code, result)):\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 27, in \n MODEL = DeepLabModel(model_path)\n File \"\", line 31, in __init__\n tf.import_graph_def(graph_def, name='')\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 432, in new_func\n return func(*args, **kwargs)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3563, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3563, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3450, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/home/user07/anaconda3/envs/hss_tf/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1740, in __init__\n self._traceback = self._graph._extract_stack() # pylint: disable=protected-access\n\nInvalidArgumentError (see above for traceback): padded_shape[1]=87 is not divisible by block_shape[1]=2\n\t [[Node: xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32, _device=\"/job:localhost/replica:0/task:0/device:GPU:0\"](xception_65/exit_flow/block1/unit_1/xception_module/ArithmeticOptimizer/AddOpsRewrite_add-0-0-TransposeNCHWToNHWC-LayoutOptimizer, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/block_shape, xception_65/exit_flow/block2/unit_1/xception_module/separable_conv1_depthwise/depthwise/SpaceToBatchND/paddings)]]\n"
]
}
],
"source": [
"#@title Run on sample images {display-mode: \"form\"}\n",
"\n",
"# SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']\n",
"# IMAGE_URL = '' #@param {type:\"string\"}\n",
"\n",
"# _SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'\n",
"# 'deeplab/g3doc/img/%s.jpg?raw=true')\n",
"\n",
"\n",
"# def run_visualization(url):\n",
"# \"\"\"Inferences DeepLab model and visualizes result.\"\"\"\n",
"# try:\n",
"# f = urllib.request.urlopen(url)\n",
"# jpeg_str = f.read()\n",
"# original_im = Image.open(BytesIO(jpeg_str))\n",
"# except IOError:\n",
"# print('Cannot retrieve image. Please check url: ' + url)\n",
"# return\n",
"\n",
"# print('running deeplab on image %s...' % url)\n",
"# resized_im, seg_map = MODEL.run(original_im)\n",
"\n",
"# vis_segmentation(resized_im, seg_map)\n",
"\n",
"\n",
"# image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE\n",
"# run_visualization(image_url)\n",
"image_path = '/home/user07/tensorflow/models/research/deeplab/datasets/railway/image/666.jpg'\n",
"original_im = Image.open(image_path)\n",
"resized_im, seg_map = MODEL.run(original_im)\n",
"\n",
"cv2.imwrite(\"test.jpg\",seg_map)\n",
"vis_segmentation(resized_im, seg_map)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
"colab_type": "code",
"id": "7XrFNGsxzSIB"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"default_view": {},
"name": "DeepLab Demo.ipynb",
"provenance": [],
"version": "0.3.2",
"views": {}
},
"kernelspec": {
"display_name": "Python [conda env:hss_tf] *",
"language": "python",
"name": "conda-env-hss_tf-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 1
}