import tflite2onnx
tflite_path = "path/to/the/tflitemodel"
onnx_path = "path/where/you/want/to/save/your/model" #modelname.onnx
tflite2onnx.convert(tflite_path,onnx_path)
https://blog.csdn.net/weixin_41581849/article/details/120516302
python -m tf2onnx.convert --input ./pb/xxx.pb --inputs normalized_input_image_tensor:0 --outputs normalized_input_image_tensor:0,raw_outputs/class_predictions:0,raw_outputs/box_encodings:0,anchors:0 --output ./onnx/xxx.onnx --verbose --opset 12`
import onnx
model = onnx.load('linear_func_2.onnx')
node = model.graph.node
node[1].op_type = 'Sub' #将node[1].op_type改为sub操作
onnx.checker.check_model(model)
onnx.save(model, 'linear_func_21.onnx')
https://blog.csdn.net/qq_41870658/article/details/115701829
import tensorflow as tf
import os
from tensorflow.python.tools import freeze_graph
# network是你们自己定义的模型结构
# ps:
# def network(input):
# return tf.layers.max_pooling2d(input, 2, 2)
from model import network
os.environ['CUDA_VISIBLE_DEVICES']='2' #设置GPU
model_path = "path to /model.ckpt-0000" #设置model的路径,因新版tensorflow会生成三个文件,只需写到数字前
def main():
tf.reset_default_graph()
input_node = tf.placeholder(tf.float32, shape=(228, 304, 3)) #这个是你送入网络的图片大小,如果你是其他的大小自行修改
input_node = tf.expand_dims(input_node, 0)
flow = network(input_node)
flow = tf.cast(flow, tf.uint8, 'out') #设置输出类型以及输出的接口名字,为了之后的调用pb的时候使用
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, model_path)
#保存图
tf.train.write_graph(sess.graph_def, 'output_model/pb_model', 'model.pb')
#把图和参数结构一起
freeze_graph.freeze_graph('output_model/pb_model/model.pb', '', False, model_path, 'out','save/restore_all', 'save/Const:0', 'output_model/pb_model/frozen_model.pb', False, "")
print("done")
if __name__ == '__main__':
main()
https://blog.csdn.net/qq_42738654/article/details/120210351
def load_pb_graph(path):
with tf.gfile.GFile(path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as g:
tf.import_graph_def(graph_def, name=None)
return g
model_filename = '111.pb'
g = load_pb_graph(model_filename)
#加载原图完毕
new_model = tf.GraphDef()
with tf.Session(graph=g) as sess:
for n in sess.graph_def.node:
if n.name in ['import/input_ids','import/input_mask', 'import/token_type_ids']:
nn = new_model.node.add()
nn.op = n.op
nn.name = n.name
nn.attr['dtype'].CopyFrom(tf.AttrValue(type=tf.int32.as_datatype_enum))
s = tensor_shape_pb2.TensorShapeProto()
d1 = tensor_shape_pb2.TensorShapeProto.Dim()
d2 = tensor_shape_pb2.TensorShapeProto.Dim()
d1.size = 1
d2.size = 7
s.dim.extend([d1,d2])
nn.attr['shape'].shape.CopyFrom(s)
for i in n.input:
nn.input.extend([i])
else:
new_model.node.append(n)
# nn = new_model.node.add()
# nn.CopyFrom(n) 太过于耗时,可以使用append直接加入old节点
print('*'*100)
#将新图注入到默认的Graph中
#tf.import_graph_def(new_model, name='') # Imports `graph_def` into the current default `Graph`
# 测试案例
with tf.Session() as sess:
tf.train.write_graph(new_model, logdir='./', name='graph_def_new.pb', as_text=False)
OPS = tf.get_default_graph().get_operations()
for op in OPS:
for i in op.outputs:
print(i.name,i.shape)
OPS = tf.get_default_graph().get_operations()
for op in OPS:
txt = str([v.name for v in op.inputs])+'---->' + op.type +'---->'+str([v.name for v in op.outputs])
# # 获取每个op的输入节点名或输出节点名,以及op的操作类型
print(txt)
#获得name后可以使用这个函数来得到name具体的变量
layer1= sess.graph.get_operation_by_name('ssd300/conv5/conv5_1/Relu').outputs[0]
layer2= sess.graph.get_operation_by_name('ssd300/conv2/conv2_1/Relu').outputs[0]
out_t ,out_t1= sess.run([layer1,layer2], feed_dict={X : x})
tensorflow保存的ckpt,meta模型freeze成pb
"""1.Get input, output , saver and graph"""#从导入图中获取需要的东西
meta_path_restore = model_dir + '/model_'+model_version+'.ckpt.meta'
model_path_restore = model_dir + '/model_'+model_version+'.ckpt'
saver_restore = tf.train.import_meta_graph(meta_path_restore) #获取导入图的saver,便于后面恢复
graph_restore = tf.get_default_graph() #此时默认图就是导入的图
#从导入图中获取需要的tensor
#1. 用collection来获取
input_x = tf.get_collection('inputs')[0]
input_is_training = tf.get_collection('is_training')[0]
output_feat_fused = tf.get_collection('feat_fused')[0]
#2. 用tensor的name来获取
input_y = graph_restore.get_tensor_by_name('label_exp:0')
print('Get tensors...')
print('inputs shape: {}'.format(input_x.get_shape().as_list()))
print('input_is_training shape: {}'.format(input_is_training.get_shape().as_list()))
print('output_feat_fused shape: {}'.format(output_feat_fused.get_shape().as_list()))
"""2.Build new variable for fine tuning"""#构造新的variables用于后面的finetuning
graph_restore.clear_collection('feat_fused') #删除以前的集合,假如finetuning后用新的代替原来的
graph_restore.clear_collection('prob')
#添加新的东西
if F_scale is not None and F_scale!=0:
print('F_scale is not None, value={}'.format(F_scale))
feat_fused = Net_normlize_scale(output_feat_fused, F_scale)
tf.add_to_collection('feat_fused',feat_fused)#重新添加到新集合
logits_fused = last_logits(feat_fused,input_is_training,7) # scope name是"final_logits"
"""3.Get acc and loss"""#构造损失
with tf.variable_scope('accuracy'):
accuracy,prediction = ...
with tf.variable_scope('loss'):
loss = ...
"""4.Build op for fine tuning"""
global_step = tf.Variable(0, trainable=False,name='global_step')
learning_rate = tf.train.exponential_decay(initial_lr,
global_step=global_step,
decay_steps=decay_steps,
staircase=True,
decay_rate=0.1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
var_list = tf.contrib.framework.get_variables('final_logits')#关键!获取指定scope下的变量
train_op = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=0.9).minimize(loss,global_step=global_step,var_list=var_list) #只更新指定的variables
"""5.Begin training"""
init = tf.global_variables_initializer()
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
sess.run(init)
saver_restore.restore(sess, model_path_restore) #这里saver_restore对应导入图的saver, 如果用上面新的saver的话会报错 因为多出了一些新的变量 在保存的模型中是没有那些权值的
sess.run(train_op, feed_dict)
.......
用导入图的saver: saver_restore
用var_list = tf.contrib.framework.get_variables(scope_name)获取指定scope_name下的变量,
然后optimizer.minimize()时传入指定var_list
附:如何知道tensor名字以及获取指定变量?
1.获取某个操作之后的输出
用graph.get_operations()获取所有op
比如
那么output_pool_flatten = graph_restore.get_tensor_by_name(‘common_conv_xxx_net/common_conv_net/flatten/Reshape:0’)就是那个位置经过flatten后的输出了
2.获取指定的var的值
用GraphKeys获取变量
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)返回指定集合的变量
比如
那么var_logits_biases = graph_restore.get_tensor_by_name(‘common_conv_xxx_net/final_logits/logits/biases:0’)就是那个位置的biases了
3.获取指定scope的collection
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,scope=‘common_conv_xxx_net.final_logits’)
#coding=utf-8
import tensorflow as tf
import struct
#from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.layers.python.layers import batch_norm
import numpy as np
# import matplotlib.pyplot as plt
# import pylab
import time,random
import cv2,sys,os
np.set_printoptions(suppress=True)
label_num=['噪声','尖端','空穴','自由颗粒','悬浮放电']
def print2file(context):
# temp = sys.stdout
# sys.stdout = open('print.txt','a')
print(context)
# sys.stdout.close()
# sys.stdout =temp
def activation_print(activation):
if activation is binary_relu_unit:
print2file("binary_relu_unit")
if activation==sigmoid:
print2file("sigmoid")
if activation==tf.nn.sigmoid:
print2file("tf.nn.sigmoid")
if activation==binary_tanh_unit:
print2file("binary_tanh_unit")
if activation==tf.nn.tanh:
print2file("tf.nn.tanh")
if activation==tf.nn.relu:
print2file("tf.nn.relu")
def add_weight_decay(weights, lambda1=0.0001):
weight_decay = tf.multiply(tf.nn.l2_loss(weights), lambda1, name='weight_loss')
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
weight_decay)
return weights
def weight_variable(shape,name):
w=tf.get_variable(name='weight_%d'%name,shape=shape,initializer=tf.contrib.layers.xavier_initializer())
w = add_weight_decay(w)
return w
# return tf.get_variable(name='weight_%d'%name,shape=shape,initializer=tf.random_uniform_initializer(-1.27,1.27))
# return tf.get_variable(name='weight_%d'%name,shape=shape,
# initializer=tf.glorot_normal_initializer())
def bias_variable(shape):
inital=tf.constant(0.1,shape=shape)
return tf.Variable(inital)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#
def max_pool_2x2(x,pool_flag=None):
# strides[1,x_movement,y_movement,1]
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def avg_pool_2x2(x,kernel=[1,2,2,1]):
# strides[1,x_movement,y_movement,1]
return tf.nn.avg_pool(x,ksize=kernel,strides=kernel,padding='SAME')
def batch_norm_layer(value,train=None,name='batch_norm'):
# if train is not None:
# return batch_norm(value,decay=0.9,updates_collections=None,
# is_training=True)
# else :
# return batch_norm(value,decay=0.9,updates_collections=None,
# is_training=False)
return value
#define placeholder for inputs to network
def round_through2(x):
x = tf.clip_by_value(x,-1.27,1.27)
rounded = tf.round(x*100)
return x + tf.stop_gradient(rounded/100-x)
def round_through(x):
rounded = tf.round(x)
return x + tf.stop_gradient(rounded-x)
def hard_sigmoid(x):
return tf.clip_by_value((x + 1.)/2., 0, 1)
def binary_tanh_unit(x):
return 2.*round_through(hard_sigmoid(x))-1.
def binary_relu_unit(x):
x=x+0.00001
a=tf.clip_by_value(x, 0, 1)
return round_through(a)#define placeholder for inputs to network
def approxsign(x):
x=tf.clip_by_value(x, -1, 1)
condition = tf.greater(x, 0)
res = tf.where(condition,2*x-tf.square(x),tf.square(x)+2*x)
return res
# print(images_train[0])
def sigmoid(x):
return tf.nn.sigmoid(10*(x-0.5))
def linear(x):
return x
def clear_file(path):
with open(path,'w') as f:
pass
def write_weight(path,weight,bias=None):
weight=weight.reshape(-1)
with open(path,'ab') as f:
for i in weight:
s=struct.pack('f',i)
f.write(s)
if bias is None:
pass
else:
bias=bias.reshape(-1)
with open(path,'ab') as f:
for i in bias:
s=struct.pack('f',i)
f.write(s)
def write_accuracy(a,path='wbfab.accuracy'):
with open(path,'w') as f:
f.write("%f"%a)
count_pic=0
# old_accuracy=read_weight()
old_time1=time.time()
old_accuracy=0
def one_hot(x):
x=int(x[0])
a=np.zeros(5)
a[x]=1
return a
def write_pic(name,dir1='train1/'):
a=cv2.imread(dir1+name)
try:
a=cv2.resize(a, (90,64))
except Exception as e:
print(name)
# cv2.imshow('ss',a)
# cv2.waitKey(0)
a=np.array(a).reshape((64,90,3))
return a
def read_file(dir_name="train1/"):
a=[]
dir_path= os.listdir(dir_name)
dir_path.sort()
for i in dir_path:
if i[-1]=='g':
a.append(i)
random.shuffle(a)
return a
def input_data(infor,test_flag=0):
x=[]
y=[]
dir_name='train1/'
if test_flag==1:
dir_name='test1/'
for i in infor:
x.append(write_pic(i,dir_name))
y.append(one_hot(i))
return np.array(x),np.array(y)
# activation=binary_relu_unit
# activation=sigmoid
# activation=tf.nn.sigmoid
# activation=binary_tanh_unit
# activation=tf.nn.tanh
activation=tf.nn.relu
#quantization=binary_tanh_unit
# quantization=round_through2
quantization=linear
activation_print(activation)
saverdir="save_mode/"
model_path_restore=tf.train.latest_checkpoint(saverdir)
# model_path_restore = model_dir + '/model_'+model_version+'.ckpt'
meta_path_restore = 'save_mode/train.cpkt.meta'
saver_restore = tf.train.import_meta_graph(meta_path_restore) #获取导入图的saver,便于后面恢复
graph_restore = tf.get_default_graph() #此时默认图就是导入的图
# input_x = tf.get_collection('Placeholder')[0]
# input_is_training = tf.get_collection('is_training')[0]
# output_feat_fused = tf.get_collection('feat_fused')[0]
graph = tf.get_default_graph()
xs = graph.get_tensor_by_name('Placeholder:0')
# xs=tf.get_collection('Placeholder')[0]
ys=graph.get_tensor_by_name('Placeholder_1:0')
print(ys.shape)
keep_prob=graph.get_tensor_by_name('Placeholder_2:0')
train=graph.get_tensor_by_name('Placeholder_3:0')
# print("train",train.name)
pool_flag=graph.get_tensor_by_name('Placeholder_4:0')
bn_flag=tf.get_collection('Placeholder_5')
# graph_restore.clear_collection('feat_fused')
h_fc1=graph.get_tensor_by_name('dropout/mul:0')
W_fc3=weight_variable([256,256],20)
b_fc3=bias_variable([256])
h_fc1=activation((tf.matmul(h_fc1,W_fc3)+b_fc3))
W_fc4=weight_variable([256,5],21)
b_fc4=bias_variable([5])
prediction=tf.matmul(h_fc1,W_fc4)+b_fc4
out=tf.argmax(prediction,1)
correct_prediction=tf.equal(tf.argmax(prediction,1),tf.argmax(ys,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
global_step = tf.Variable(0, trainable=False)
# decaylearning_rate=1e-4
decaylearning_rate=tf.train.exponential_decay(0.0001,
global_step=global_step,decay_steps=5200, decay_rate=0.90)
import datetime
print2file("%s"%datetime.datetime.now())
regular_loss = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
regular_loss = tf.add_n(regular_loss)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=ys, logits=prediction))+0.5*regular_loss
train_step = tf.train.AdamOptimizer(decaylearning_rate,name='Adamk').minimize(loss,global_step=global_step)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
var_list = tf.trainable_variables()
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
saver_restore.restore(sess, model_path_restore)
saver = tf.train.Saver(var_list=var_list, max_to_keep=2)
# saverdir="save_mode123/"
# kpt=tf.train.latest_checkpoint(saverdir)
# if kpt!=None:
# saver.restore(sess,kpt)
# print2file("uning old_saver")
train_infor=read_file()
test_infor=read_file('test1/')
OPS = tf.get_default_graph().get_operations()
for op in OPS:
for i in op.outputs:
print(i.name,i.shape)
status=1
if status==1:
for i in range(1000):
# img=cv2.imread('train1/0_cCH21538.jpg')
# img=cv2.resize(img,(90,64))
# img=np.array(img).reshape([1,64,90,3])
# acc_forone=sess.run(out,feed_dict={xs:img,keep_prob:1})
# np.array(a).reshape((64,90,3))
# print(acc_forone)
for m in range(len(train_infor)//10):
train_data,train_label=input_data(train_infor[m*10:m*10+10])
# print(train_data.shape,train_label.shape)
train_step1,loss_=sess.run([train_step,loss],feed_dict={xs:train_data,
ys:train_label,keep_prob:0.8,train:1})
accuracy_all=0
for m in range(len(test_infor)//10):
test_data,test_label=input_data(test_infor[m*10:m*10+10],1)
# print(train_data)
acc_forone=sess.run(accuracy,feed_dict={xs:test_data,
ys:test_label,keep_prob:1})
accuracy_all+=acc_forone
accuracy_all=accuracy_all/(len(test_infor)//10)
print2file(accuracy_all)
if accuracy_all==1:
break
# train_step1,W_fc21=sess.run([train_step,W_fc2],feed_dict={,ys:batch_ys,keep_prob:0.5,train:1,pool_flag:1})
time1=time.time()
learning_rate=sess.run(decaylearning_rate)
# print2file(".....................time=%f learning_rate=%f loss=%f "%(time1-old_time1,learning_rate,loss_))
old_time1=time1
if learning_rate==0:
break
if old_accuracy<accuracy_all:
old_accuracy=accuracy_all
saver.save(sess,"save_mode_max/train.cpkt")
print2file("---------------------------------------------accuracy_max=%f"%(accuracy_all))
# model_path_restore.save(sess,"save_mode/train.cpkt")
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
#读取图像
img = Image.open('lena.jpg')
#转化为灰度图像->转化为numpy数组
img_gray = np.array(img.convert('L'), dtype=np.float32)
# 可视化图片
plt.figure(figsize=(12, 6))
plt.subplot(121), plt.imshow(img, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(122), plt.imshow(img_gray, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
imh, imw = img_gray.shape
# 将numpy数组转化为张量
img_gray_t = torch.from_numpy(img_gray.reshape((1, 1, imh, imw)))
print(img_gray_t.shape)
#卷积时 需将 图像转化为四维来表示 [batch,channel,h,w]
'''
在对图像进行卷积操作后,获得两个特征映射:
1、使用图像轮廓提取卷积核获取
2、第二个特征映射使用的卷积核为随机数,卷积核大小为5x5,对图像的边缘不使用0填充,所以卷积后输出的特征映射的尺寸为508x508
'''
#对灰度图像进行卷积提取图像轮廓
kersize=5 #定义边缘检测卷积核,并将维度处理为1*1*5*5
ker = torch.ones(kersize, kersize, dtype=torch.float32)*-1
ker[2, 2] = 24
ker = ker.reshape(1, 1, kersize, kersize)
print("边缘检测核:", ker)
# 进行卷积操作
conv2d = nn.Conv2d(1, 2, (kersize, kersize), bias=False)
# 设置卷积时使用的核,第一个卷积核使用边缘检测
conv2d.weight.data[0] = ker
# 对灰度图像进行卷积操作
imconv2out = conv2d(img_gray_t)
# 对卷积后的输出进行维度压缩
imconv2dout_im = imconv2out.data.squeeze()
print("卷积后尺寸:", imconv2dout_im.shape)
# 可视化卷积后的图像
plt.figure(figsize=(12, 6))
plt.subplot(121), plt.imshow(imconv2dout_im[0], cmap=plt.cm.gray)
# print(imconv2dout_im[0])
plt.axis("off")
plt.subplot(122), plt.imshow(imconv2dout_im[1], cmap=plt.cm.gray)
print("随机卷积核:", conv2d.weight.data[1])
plt.axis("off")
plt.show()