最简单的图片爬虫
import urllib
import urllib3
import os
from bs4 import BeautifulSoup
html = urllib.request.urlopen('https://class.imooc.com/?c=ios&mc_marking=286b51b2a8e40915ea9023c821882e74&mc_channel=L5').read()
soup = BeautifulSoup(html,'html.parser',from_encoding='utf-8')
images = soup.findAll('div','img-con')
print(images)
imageName = 0
for image in images:
link = image.get('style')
link = 'http:'+link[21:-1]
print('link=',link)
'''fileFormat = link[-3:]
if fileFormat == 'png' or fileFormat == 'jpg':
fileSavePath = 'E:/code/OpenCV/img/'+str(imageName)+'.jpg'
imageName = imageName + 1
urllib.request.urlretrieve(link,fileSavePath)'''
if link[-3:] == 'jpg':
fileSavePath = 'E:/code/OpenCV/img/'+str(imageName)+'.jpg'
imageName = imageName + 1
urllib.request.urlretrieve(link,fileSavePath)
OpenCV预处理
import cv2
import numpy as np
face_xml = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_xml = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('face.jpg')
cv2.imshow('src',img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_xml.detectMultiScale(gray,1.3,5)
print('face=',len(faces))
index = 0
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_face = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
fileName = str(index)+'.jpg'
cv2.imwrite(fileName,roi_color)
index = index + 1
eyes = eye_xml.detectMultiScale(roi_face)
print('eye=',len(eyes))
cv2.imshow('dst',img)
cv2.waitKey(0)
“某个人脸”识别
import tensorflow.compat.v1 as tf
import numpy as np
import scipy.io as sio
f = open('Yale_64x64.mat','rb')
mdict = sio.loadmat(f)
train_data = mdict['fea']
train_label = mdict['gnd']
train_data = np.random.permutation(train_data)
train_label = np.random.permutation(train_label)
test_data = train_data[0:64]
test_label = train_label[0:64]
np.random.seed(100)
test_data = np.random.permutation(test_data)
np.random.seed(100)
test_label = np.random.permutation(test_label)
train_data = train_data.reshape(train_data.shape[0],64,64,1).astype(np.float32)/255
train_label_new = np.zeros([165,15])
for i in range(0,165):
j = int(train_label[i,0])-1
train_label_new[i,j] = 1
test_data_input = test_data.reshape(test_data.shape[0],64,64,1).astype(np.float32)/255
test_label_input = np.zeros([64,15])
for i in range(0,64):
j = int(test_label[i,0])-1
test_label_input[i,j] = 1
tf.disable_v2_behavior()
data_input = tf.placeholder(tf.float32,[None,64,64,1])
label_input = tf.placeholder(tf.float32,[None,15])
layer1 = tf.layers.conv2d(inputs=data_input,filters=32,kernel_size=2,strides=1,padding='SAME',activation=tf.nn.relu)
layer1_pool = tf.layers.max_pooling2d(layer1,pool_size=2,strides=2)
layer2 = tf.reshape(layer1_pool,[-1,32*32*32])
layer2_relu = tf.layers.dense(layer2,1024,tf.nn.relu)
output = tf.layers.dense(layer2_relu,15)
loss = tf.losses.softmax_cross_entropy(onehot_labels=label_input,logits=output)
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
accuracy = tf.metrics.accuracy(labels=tf.argmax(label_input,axis=1),predictions=tf.argmax(output,axis=1))[1]
init = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
for i in range(0,200):
train_data_input = np.array(train_data)
train_label_input = np.array(train_label_new)
sess.run([train,loss],feed_dict = {
data_input:train_data_input,label_input:train_label_input})
acc = sess.run(accuracy,feed_dict={
data_input:test_data_input,label_input:test_label_input})
print('acc:%.2f',acc)