基础API
基础API与keras的集成
@tf.function的使用
自定义求导
API列表
自定义损失函数 -Tf.reduce_mean
自定义层次 -keras.layers.Lamda 和 继承法
tf.function
GraphDef
自动求导
t = tf.constant([[1.,2.,3.],[4.,5.,6.]])
print(t)
print(t[:,1:]) #去除第二列以后的数值
print(t[...,1])#把第二列去除来
print(t[:,1]) #和上一步 目的一样
# op
print(t+10) #加法
print(tf.square(t))#平方
print(t@tf.transpose(t))#乘以转置
# numpy conversion
print(t.numpy()) #TensorFlow 转化为numoy
#print(np.square(t))
np_t = np.array([[1.,2.,3],[4.,5.,6]])
print("---------------")
print(np_t)
print("---------------")
print(tf.constant(np_t)) #numpy转换为TensorFlow
# scalars
t=tf.constant(2.7) #零维
print(t.numpy())
print(t.shape) #维度
# strings
t = tf.constant("caf11e")
print(t)
print(tf.strings.length(t)) #可以使用tf.strings 对constant 字符串进行操作
print(tf.strings.length(t,unit = "UTF8_CHAR"))
print(tf.strings.unicode_decode(t,"UTF8"))# 转换为UTF8
t = tf.constant(["cafe","coffee","咖啡"])
print(tf.strings.length(t,unit="UTF8_CHAR"))
print(tf.strings.unicode_decode(t,"UTF8"))
#数据不等长的矩阵,实际数据中很常见
r = tf.ragged.constant([[11,19],[12,13,22],[],[21]])
print(r)
print(r[1])
print(r[1:3])
print(r.shape)
r2 = tf.ragged.constant([[1,],[2,4,1],[5,6,7],[2,3,4]])
print(r)
print(r2)
print(r2.shape)
print(tf.concat([r,r2],axis = 1)) # axis = 0 横向拼接 #axis =0 纵向拼接
print(r.to_tensor())
s = tf.SparseTensor(indices = [[0,1],[1,0],[2,3]], # 这里的indices 必须是按照顺序排列的.否则在 sparse.to_dense 会出错
values = [1,2,3],
dense_shape=[3,4])
print(s)
print(tf.sparse.to_dense(s)) #稀疏矩阵 转为 密集矩阵
s2 = s * 2
print(s2)
try:
s3 = s+1
except TypeError as ex:
print(ex)
s4 = tf.constant([[10,20],[1,2],[5,6],[2,4]]) #x稀疏矩阵和 密集矩阵相乘
print(tf.sparse.sparse_dense_matmul(s,s4))
s5 = tf.SparseTensor(indices = [[0,2],[1,0],[0,3]], #如果这里不是按照顺序排列的
values = [1,2,3],
dense_shape=[3,4])
print(s5)
s6 = tf.sparse.reorder(s5) # 需要调用sparse.reorfer进行重新排列.
print(tf.sparse.to_dense(s6)) #稀疏矩阵 转为 密集矩阵
v = tf.Variable([[1.0,2.0,3.0],[4.,5.,6.]])
print(v)
print(v.value())
print(v.numpy())
v.assign(2*v) #整个替换
print(v.numpy())
v[0,1].assign(42)#单变量赋值
print(v.numpy())
v[1,:].assign([4.,6.,1.])#赋值一行
print(v.numpy())
try:
v[0,1]= 42
except TypeError as ex:
print(ex)
def customized_mse(y_true,y_pred):
return tf.reduce_mean(tf.square(y_true-y_pred))
class CustomizedDenseLayer(keras.layers.Layer):
def __init__(self,units,activation = None,**kwargs):
self.units = units
self.activation = keras.layers.Activation(activation=activation)
super(CustomizedDenseLayer,self).__init__(**kwargs)
def build(self,input_shape):
"""构建所需要的参数"""
self.kernel = self.add_weight(name = 'kernal',
shape = (input_shape[1],self.units),
initializer = "uniform",
trainable = True)
self.bias = self.add_weight(name ='bias',
shape = (self.units),
initializer = 'zeros',
trainable = True)
super(CustomizedDenseLayer,self).build(input_shape)
def call(self,x):
"""完成正向计算"""
return self.activation(x @ self.kernel+self.bias)
tf.nn.softplus : ???(1+??) 是平滑版的relu
customized_softplus = keras.layers.Lambda(lambda x:tf.nn.softplus(x)) #使用lambda的方式创建 比较轻量级的layer 函数
print(customized_softplus([-10.,-5.,0.,5.10,]))
model = keras.models.Sequential([
CustomizedDenseLayer(30, activation= 'relu',
input_shape = x_train.shape[1:]),
CustomizedDenseLayer(1),
customized_softplus,
#customized_softplus,
])
# z >= 0 ? scale * z :scale * alpha * tf.nn.elu(z)
def scaled_elu(z,scale=2.0,alpha = 0.5): # scale 影响正半轴 scale*alpha 影响负半轴
is_positive = tf.greater_equal(z,0.0) # 把 判断z是否大于等于0 并返回bool 类型变量
return scale * tf.where(is_positive,z,alpha*tf.nn.elu(z)) #where 先判断第一个参数 bool变量,如果为True,执行第二个参数,如果False执行第三个参数
print(scaled_elu(tf.constant(-3.)))
print(scaled_elu(tf.constant([-100.,-50,-10,-5,-1,0,1,5,10,100])))
tf_scaled_elu = tf.function(scaled_elu) # 通过tf.function 实现吧 py函数 转换为图结构
print(tf_scaled_elu(tf.constant(-3.)))
print(tf_scaled_elu(tf.constant([-100.,-50,-10,-5,-1,0,1,5,10,100])))
#表面上实现的功能是一样的,但是经过转换为图结构的tf_scaled_elu 运算速度更快
%timeit tf_scaled_elu(tf.random.normal((1000,1000)))
%timeit scaled_elu(tf.random.normal((1000,1000))) #这个效果在GPU上 更明显
print(tf_scaled_elu.python_function) # 图结构 通过.python_function的形式可以找到 py原函数
# 1 + 1/2 +1/2^2 +...+ 1/2^n
@tf.function #加上这个标注 就可以把下面的代码转换成TensorFlow的图结构
def converge_to_2(n_iters):
total = tf.constant(0.)
increment = tf.constant(1.)
for _ in range(n_iters):
total += increment
increment/=2.
return total
print(converge_to_2(20))
def display_tf_code(func): #把python写的框架 转换成 tf 图结构,然后看一下他的代码
code = tf.autograph.to_code(func)
from IPython.display import display, Markdown
display(Markdown('```python\n{}```'.format(code)))
display_tf_code(scaled_elu)
变量的声明 要在 @tf.function 之前
var = tf.Variable(0.) #把变量放在外面
@tf.function
def add_21():
return var.assign_add(21) # +=
print(add_21())
@tf.function(input_signature=[tf.TensorSpec([None],tf.int32,name='x')])
def cube(z):
return tf.pow(z,3)
try:
print(cube(tf.constant([1.,2.,3.])))
except ValueError as ex:
print(ex)
print(cube(tf.constant([1,2,3])))
# @tf.function py func -> tf grath
# get_concrete_function ->add input signature ->SavedModel
cube_func_int32=cube.get_concrete_function(
tf.TensorSpec([None],tf.int32))
print(cube_func_int32)
print(cube_func_int32 is cube.get_concrete_function(tf.TensorSpec([5],tf.int32)))
print(cube_func_int32 is cube.get_concrete_function(tf.constant([5,2,3])))
cube_func_int32.graph
cube_func_int32.graph.get_operations()
pow_op = cube_func_int32.graph.get_operations()[2]
print(pow_op)
print(list(pow_op.inputs))
print(list(pow_op.outputs))
cube_func_int32.graph.get_operation_by_name("x")
cube_func_int32.graph.get_tensor_by_name("x:0")
cube_func_int32.graph.as_graph_def()