#1.引入OpenCV
#2.调用API
#3.程序暂停stop
import cv2
img = cv.imread('image0.jpg',1) #read image 1.name 2. 0:gray 1:color
cv2.imshow('image',img) #1.name win 2.img
cv2.waitKey(0) #stop
import cv2
img = cv2.imread('image0.jpg',1)
cv2.imwrite('image1.jpg',img) #1.name 2.data
#jpg 有损压缩0-100
import cv2
img = cv2.imread('image0.jpg',1)
cv2.imwrite('imageTest.jpg',img,[cv2.IMWRITE_JPEG_QUALITY, 50])
# png 无损压缩 透明度属性
import cv2
img = cv2.imread('image0.jpg',1)
cv2.imwrite('imageTest.png',img,[cv2.IMWRITE_PNG_COMPRESSION, 0])
jpg 数字越小 压缩比越高 压缩范围:0-100
png 数字越小 压缩比越低 压缩范围:0-9
import cv2
img = cv2.imread('image0.jpg',1)
(b,g,r) = img[100,100]
print(b,g,r) #bgr
#10 100---110 100
for i in range(1,100):
img[10+i,100] = (255,0,0)
cv2.imread('image',img)
cv2.waitKey(0) #1000 ms
类比 语法 API 原理
基础数据类型 运算符 流程 字典 数组
import tensorflow as tf
data1 = tf.constant(2.5)
data2 = tf.Variable(10,name='var')
print(data1)
print(data2)
tf.compat.v1.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.compat.v1.Session()
print(sess.run(data1))
import tensorflow as tf
data1 = tf.constant(2,dtype=tf.int32)
data2 = tf.Variable(10,name='var')
print(data1)
print(data2)
tf.compat.v1.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.compat.v1.Session()
print(sess.run(data1))
# data变量初始化
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(data2))
或者
import tensorflow.compat.v1 as tf
data1 = tf.constant(2,dtype=tf.int32)
data2 = tf.Variable(10,name='var')
print(data1)
print(data2)
'''
tf.compat.v1.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.compat.v1.Session()
print(sess.run(data1))
# data变量初始化
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(data2))
sess.close()
'''
init = tf.global_variables_initializer()#变量初始化
tf.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.Session()
with sess:
sess.run(init)
print(sess.run(data2))
tensorflow = tensor + graphs(计算图)
tensor 数据
op operation 赋值、四则运算
graphs 数据和操作的过程
session 执行的核心,运算的交互环境
import tensorflow as tf
data1 = tf.constant(2,dtype=tf.int32)
data2 = tf.Variable(10,name='var')
print(data1)
print(data2)
'''
tf.compat.v1.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.compat.v1.Session()
print(sess.run(data1))
# data变量初始化
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(data2))
sess.close()
'''
init = tf.compat.v1.global_variables_initializer()#变量初始化
tf.compat.v1.disable_eager_execution()#保证sess.run()能够正常运行
sess = tf.compat.v1.Session()
with sess:
sess.run(init)
print(sess.run(data2))
import tensorflow as tf
data1 = tf.constant(6)
data2 = tf.constant(2)
dataAdd = tf.add(data1,data2)
dataMul = tf.multiply(data1,data2)
dataSub = tf.subtract(data1,data2)
dataDiv = tf.divide(data1,data2)
tf.compat.v1.disable_eager_execution() #保证sess.run()能够正常运行
with tf.compat.v1.Session() as sess:
print(sess.run(dataAdd))
print(sess.run(dataMul))
print(sess.run(dataSub))
print(sess.run(dataDiv))
print('end!')
import tensorflow as tf
data1 = tf.constant(6)
data2 = tf.Variable(2)
dataAdd = tf.add(data1,data2)
dataMul = tf.multiply(data1,data2)
dataSub = tf.subtract(data1,data2)
dataDiv = tf.divide(data1,data2)
init = tf.compat.v1.global_variables_initializer()#变量初始化
tf.compat.v1.disable_eager_execution() #保证sess.run()能够正常运行
with tf.compat.v1.Session() as sess:
sess.run(init)
'''print(sess.run(init))'''
print(sess.run(dataAdd))
print(sess.run(dataMul))
print(sess.run(dataSub))
print(sess.run(dataDiv))
print('end!')
import tensorflow as tf
from tensorflow import keras
data1 = tf.constant(6)
data2 = tf.Variable(2)
dataAdd = tf.add(data1,data2)
dataCopy = tf.compat.v1.assign(data2,dataAdd) #dataAdd 追加到data2中
dataMul = tf.multiply(data1,data2)
dataSub = tf.subtract(data1,data2)
dataDiv = tf.divide(data1,data2)
init = tf.compat.v1.global_variables_initializer()
tf.compat.v1.disable_eager_execution()
with tf.compat.v1.Session() as sess:
sess.run(init)
print(sess.run(dataAdd))
print(sess.run(dataMul))
print(sess.run(dataSub))
print(sess.run(dataDiv))
print('sess.run(dataCopy)',sess.run(dataCopy)) #data1=6 data2=8
print('dataCopy.eval()',dataCopy.eval()) #data1 + data2 = 14
print('tf.compat.v1.get_default_session()',tf.compat.v1.get_default_session().run(dataCopy)) #data1 +data2 = 20
print('end!')
placeholder:TensorFlow中的占位符,用于传入外部数据。
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
data1 = tf.placeholder(tf.float32)
data2 = tf.placeholder(tf.float32)
dataAdd = tf.add(data1,data2)
with tf.Session() as sess:
print(sess.run(dataAdd,feed_dict={
data1:6,data2:2}))
#1.dataAdd 2.data(feed_dict={参数1:6,参数2:2})
print('end!')
类比 数组 M行N列[ ] 内部[ ] [ 里面 列数据] [ ]中括号整体行数
如:[ [ 6,6 ] ] 一行两列
import tensorflow as tf
data1 = tf.constant([[6,6]])
data2 = tf.constant([[2],
[2]])
data3 = tf.constant([[3,3]])
data4 = tf.constant([[1,2],
[3,4],
[5,6]])
print(data4.shape) #维度
with tf.compat.v1.Session() as sess:
print(sess.run(data4)) #打印整体内容
print(sess.run(data4[0])) #打印某一行
print(sess.run(data4[:,0])) #打印某一列
print(sess.run(data4[0,0])) #打印某行某列
矩阵乘法 MN Mk = kN
矩阵加法 MN MN = MN
import tensorflow as tf
data1 = tf.constant([[6,6]])
data2 = tf.constant([[2],
[2]])
data3 = tf.constant([[3,3]])
data4 = tf.constant([[1,2],
[3,4],
[5,6]])
matMul = tf.matmul(data1,data2)
matMul2 = tf.multiply(data1,data2)
matAdd = tf.add(data1,data3)
with tf.compat.v1.Session() as sess:
print(sess.run(matMul))
print(sess.run(matAdd))
print(sess.run(matMul2)) # 1*2 2*1 = 2*2
print(sess.run([matMul,matAdd]))
import tensorflow as tf
mat0 = tf.constant([[0,0,0],[0,0,0]])
mat1 = tf.zeros([2,3])
mat2 = tf.ones([3,2])
mat3 = tf.fill([2,3],15)
with tf.compat.v1.Session() as sess:
print(sess.run(mat0))
print(sess.run(mat1))
print(sess.run(mat2))
print(sess.run(mat3))
import tensorflow as tf
mat1 = tf.constant([[2],[3],[4]])
mat2 = tf.zeros_like(mat1)
mat3 = tf.linspace(0.0,2.0,11)
mat4 = tf.random.uniform([2,3],-1,2)
with tf.compat.v1.Session() as sess:
print(sess.run(mat1))
print(sess.run(mat2))
print(sess.run(mat3))
print(sess.run(mat4))
#增删改查
import numpy as np
data1 = np.array([1,2,3,4,5])
print(data1)
data2 = np.array([[1,2],
[3,4]])
print(data2)
#维度
print(data1.shape,data2.shape)
#zero ones
print(np.zeros([2,3]),np.ones([2,2]))
#改查
data2[1,0] = 5
print(data2)
print(data2[1,1])
#基本运算
data3 = np.ones([2,3])
print(data3*2)
print(data3/3)
print(data3+2)
print(data3-1)
#矩阵+*
data4 = np.array([[1,2,3],[4,5,6]])
print(data3+data4)
print(data3*data4)
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1,2,3,4,5,6,7,8])
y = np.array([3,5,7,6,2,6,10,15])
plt.plot(x,y,'r') #折线 1.x 2.y 3.color
#折线 饼状 柱状
x = np.array([1,2,3,4,5,6,7,8])
y = np.array([13,25,17,36,21,16,10,15])
plt.bar(x,y,0.5,alpha=1,color='b') #1.x 2.y 3.占百分比 4.透明度 5.color
plt.show()
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
date = np.linspace(1,15,15)
endPrice = np.array([2511.90,2538.26,2510.68,2591.66,2732.98,2701.69,2701.29,2678.67,2726.50,2681.50,2739.17,2715.07,2823.58,2864.90,2919.08]
)
beginPrice = np.array([2438.71,2500.88,2534.95,2512.52,2594.04,2743.26,2697.47,2695.24,2678.23,2722.13,2674.93,2744.13,2717.46,2832.73,2877.40])
print(date)
plt.figure()
for i in range(0,15):
#1.柱状图
dataOne = np.zeros([2])
dataOne[0] = i;
dataOne[1] = i;
priceOne = np.zeros([2])
priceOne[0] = beginPrice[i]
priceOne[1] = endPrice[i]
if endPrice[i]>beginPrice[i]:
plt.plot(dataOne,priceOne,'r',lw=8)
else:
plt.plot(dataOne,priceOne,'b',lw=8)
#plt.show()
# A(15*1)*w1(1*10)+b1(1*10) = B(15*10)
# B(15*10)*w2(10*1)+b2(15*1) = C(15*1)
# 1. A B C
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
dateNormal = np.zeros([15,1])
priceNormal = np.zeros([15,1])
for i in range(0,15):
dateNormal[i,0] = i/14.0;
priceNormal[i,0] = endPrice[i]/3000.0;
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])
# B
w1 = tf.Variable(tf.random_uniform([1,10],0,1))
b1 = tf.Variable(tf.zeros([1,10]))
wb1 = tf.matmul(x,w1)+b1
layer1 = tf.nn.relu(wb1) #激励函数
# C
w2 = tf.Variable(tf.random_uniform([10,1],0,1))
b2 = tf.Variable(tf.zeros([15,1]))
wb2 = tf.matmul(layer1,w2)+b2
layer2 = tf.nn.relu(wb2) #激励函数
loss = tf.reduce_mean(tf.square(y-layer2)) #y:真实值 layer2:计算
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(0,10000):
sess.run(train_step,feed_dict={
x:dateNormal,y:priceNormal})
# w1w2 b1b2 A + wb -->layer2
pred = sess.run(layer2,feed_dict={
x:dateNormal})
predPrice = np.zeros([15,1])
for i in range(0,15):
predPrice[i,0]=(pred*3000)[i,0]
plt.plot(date,predPrice,'b',lw=1)
plt.show()
# 1.load 2.info 3.resize 4.check
import cv2
img = cv2.imread('image0.jpg',1)
imgInfo = img.shape
print(imgInfo)
height = imgInfo[0]
width = imgInfo[1]
mode = imgInfo[2]
# 1.放大 缩小 2.等比例 非等比例
dstHeight = int(height*0.5)
dstWidth = int(width*0.5)
# 最近临域插值 双线性插值 像素关系重采样 立方插值
dst = cv2.resize(img,(dstWidth,dstHeight))
cv2.imshow('image',dst)
cv2.waitKey(0)
最近临域插值
原理
原图src 1020 目标dst 510
dst<-src
(1,2) <- (2,4)
dst x 1 -> src x 2 newX
newX = x*(src 行/dst 行) newX = 1*(10/5) = 2
newY = y*(src 行/dst 行) newY = 2*(20/10) = 4
12.3 = 12
双线性插值
A1=20% * 上 + 80% * 下 A2
B1=30% * 左 + 70% * 右 B2
# 1.info 2.空白模板 3.xy
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
mode = imgInfo[2]
dstHeight = int(height/2)
dstWidth = int(width/2)
dstImage = np.zeros((dstHeight,dstWidth,3),np.uint8) #数据类型np.uint8 0-255
for i in range(0,dstHeight): #高度对应行
for j in range(0,dstHeight): #高度对应列
iNew = int(i*(height*1.0/dstHeight))
jNew = int(j*(width*1.0/dstWidth))
dstImage[i,j] = img[iNew,jNew]
cv2.imshow('dst',dstImage)
cv2.waitKey(0)
API级别:
[1,0,100],[0,1,200] 拆分为 2 * 2 2 * 1
[[1,0],[0,1]] 2 * 2 A
[[100],[200]] 2 * 1 B
输入 xy C
AC+B = [[1x+0y] , [0x+1*y]] + [[100],[200]] = [[x+100],[y+200]]
像素级别:
(10,20)-> (110,120)
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
####
matShift = np.float32([[1,0,100],[0,1,200]]) #移位矩阵 2*3
dst = cv2.warpAffine(img,matShift,(height,width)) #目标图片 功能:移位、矩阵运算 1.data 2.mat 3.info
cv2.imshow('dst',dst)
cv2.waitKey(0)
源码形式:
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
dst = np.zeros(img.shape,np.uint8)
height = imgInfo[0]
width = imgInfo[1]
for i in range(0,height):
for j in range(0,width-100):
dst[i,j+100]=img[i,j]
cv2.imshow('image',dst)
cv2.waitKey(0)
# [[A1 A2 B1],[A3 A4 B2]]
# [[A1 A2],[A3 A4]] [[B1],[B2]]
# newX = A1*x + A2*y + B1
# newY = A3*x + A4*y + B2
# x->x*0.5 y->y*0.5
# newX = 0.5*x
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
matScale = np.float32([[0.5,0,0],[0,0.5,0]]) #移位矩阵 2*3
dst = cv2.warpAffine(img,matScale,(int(height/2),int(width/2))) #目标图片 功能:移位、矩阵运算 1.data 2.mat 3.info
cv2.imshow('dst',dst)
cv2.waitKey(0)
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
#src 3 -> dst 3 (左上角 左下角 右上角)
matSrc = np.float32([[0,0],[0,height-1],[width-1,0]])
matDst = np.float32([[50,50],[300,height-200],[width-300,100]])
#组合
matAffine = cv2.getAffineTransform(matSrc,matDst) #mat 1.src 2.dst
dst = cv2.warpAffine(img,matAffine,(width,height))
cv2.imshow('dst',dst)
cv2.waitKey(0)
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
deep = imgInfo[2]
newImgInfo = (height*2,width,deep)
dst = np.zeros(newImgInfo,np.uint8)
for i in range(0,height):
for j in range(0,width):
dst[i,j]=img[i,j]
# x不变 y=2*h-y-1
dst[height*2-i-1,j] = img[i,j]
for i in range(0,width):
dst[height,i] = (0,0,255) #BGR
cv2.imshow('dst',dst)
cv2.waitKey(0)
import cv2
import numpy as np
img = cv2.imread('image0.jpg',1)
cv2.imshow('src',img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
# 2*3
matRotate = cv2.getRotationMatrix2D((height*0.5,width*0.5),45,0.5) #mat rotate 1.center 2.angle 3.scr
# 100*100 25
dst = cv2.warpAffine(img,matRotate,(height,width))
cv2.imshow('dst',dst)
cv2.waitKey(0)