可以先看原理,具体博客为:
https://blog.csdn.net/u013066730/article/details/82763115
使用Pool().map()函数实现多进程,map的第二个参数是可迭代的,这点需要注意。
#coding=utf-8
from multiprocessing import Pool
import scipy
from scipy import misc
import os
import time
import glob
from scipy import ndimage
start = time.time()
def get_image_paths(folder): #这个函数的作用的获取文件的列表,注释部分是获取
# return (os.path.join(folder, f)
# for f in os.listdir(folder)
# if 'png' in f)
return glob.glob(os.path.join(folder, '*.png'))
def create_read_img(filename):
im = misc.imread(filename) #读取图像
img_rote = ndimage.rotate(im, 90) #旋转90度
#scipy.misc.imsave('...',img_rote) #也可以保存
if __name__ =="__main__":
img_path = '存放图像的目录/'
imgs = get_image_paths(img_path)
print imgs
pool = Pool()#参数如果为空,表示使用所有核心
pool.map(create_read_img,imgs)#map的第二个参数imgs,必须是可迭代的
pool.close()
pool.join()
# for i in imgs:
# create_read_img(i) 这部分是循环,可以用来对比时间
end = time.time()
print end - start
使用Pool().apply_async()传入单个参数,不同上面的map可以传入一个list等可迭代的数据。
#coding:utf-8
from __future__ import print_function
from multiprocessing import cpu_count
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Pool(4)
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
使用多个进程进行图片的读取,处理,保存。这个是直接自己手动分配进程的,好处就是你对进程的分配很了解,如果有问题也能及时检查出来。并没有真正保存图片,怕一下没收住,保存图像太多了。
#coding:utf-8
from __future__ import print_function
from multiprocessing import Process, Queue, Pool, cpu_count
import os, time, random
import psutil, math
import cv2
def cpu_pool():
cpus_per = psutil.cpu_percent(1)
average_cpu_per = 0
for i in range(len(cpus_per)):
average_cpu_per = average_cpu_per + cpus_per[i]
average_cpu_per = average_cpu_per / len(cpus_per)
cpu_per = math.ceil(average_cpu_per / 10.)
cpu_use = int(math.floor((10 - cpu_per) * cpu_count() * 1.0))
return cpu_use
def read_image(queue_image, image_paths):
for image_path in image_paths:
print('Put %s to queue...' % str(os.path.split(image_path)[-1]))
image = cv2.imread(image_path)
final_image = [os.path.split(image_path)[-1], image] #将图片和路径组成一个元素
queue_image.put(final_image)
# queue_image.qsize() #获取队列中元素的个数
def read_image2(queue_image, image_paths):
for image_path in image_paths:
print('Put %s to queue...' % str(os.path.split(image_path)[-1] + "-----------second"))
image = cv2.imread(image_path)
final_image = [os.path.split(image_path)[-1], image]
queue_image.put(final_image)
def process_image(value):
value = cv2.rotate(value,90)
return "detect_result"
def get_image(queue_image,queue_result):
# print('Process to read: %s' % os.getpid())
while True:
value = queue_image.get(True)
# print('Get %s from queue.' % str(value.shape))
image_path = value[0]
image = value[1]
result = process_image(image)
print('Get %s from queue.' % str(result))
queue_result.put([image_path, result])
print('Put %s to queue...' % str(result + "--------------------------------------result"))
def save_result(queue_result):
while True:
value = queue_result.get(True)
image_path = value[0]
result = value[1]
# print('Get %s from queue.' % str(value.shape)
print('Get %s from queue.' % str(result + "---------------------------------------result"))
if __name__=='__main__':
root_image_path = r"图片文件夹"
image_paths = [os.path.join(root_image_path, image_name) for image_name in os.listdir(root_image_path) if image_name.split(".")[-1] == "png"]
process_num = 2 # 两个进程
image_path_len = len(image_paths)
first_index = image_path_len // 2
image_path_1 = image_paths[:first_index] # 第一个进程分配的图片
image_path_2 = image_paths[first_index:] # 第二个进程分配的图片
# 父进程创建Queue,并传给各个子进程:
num_len = 400
queue_image = Queue(num_len)
queue_result = Queue(num_len)
pw1 = Process(target=read_image, args=(queue_image, image_path_1,))
pw2 = Process(target=read_image2, args=(queue_image, image_path_2,))
pr1 = Process(target=get_image, args=(queue_image, queue_result,))
pr2 = Process(target=save_result, args=(queue_result,))
# 启动子进程pw,写入:
pw1.start()
pw2.start()
# 启动子进程pr,读取:
pr1.start()
pr2.start()
# 等待pw结束:
pw1.join()
pw2.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr1.terminate()
pr2.terminate()
使用进程池,完成子进程之间的通信,和例三的方式很像,就是需要自己写子进程了。当处理一个内容时,例三大约1.8s,例四大约1.8s。速度差不多。不加多进程是2.2秒。
#coding:utf-8
from multiprocessing import Process, Queue, Pool
import os, time, random, multiprocessing, cv2
# 写数据进程执行的代码:
def write(q):
for value in ['A', 'B', 'C']:
print 'Put %s to queue...' % value
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
while True:
if not q.empty():
value = q.get(True)
print 'Get_result %s from queue.' % value
time.sleep(random.random())
else:
break
def read_image(queue_image, image_paths):
for image_path in image_paths:
print('Put_read %s to queue...' % str(os.path.split(image_path)[-1]))
image = cv2.imread(image_path)
final_image = [os.path.split(image_path)[-1], image]
queue_image.put(final_image)
def get_image(queue_image):
while True:
value = queue_image.get(True)
image_path = value[0]
image = value[1]
print 'Get_result %s from queue.' % image_path
if queue_image.empty():
time.sleep(0.1) #防止为取的速度快于读的速度,导致取提前退出
if queue_image.empty():
break
if __name__ == '__main__':
start_time = time.time()
root_image_path = r"存放图片文件夹"
image_paths = [os.path.join(root_image_path, image_name) for image_name in os.listdir(root_image_path) if
image_name.split(".")[-1] == "png"]
process_num = 2
image_path_len = len(image_paths)
first_index = image_path_len // 2
image_path_1 = image_paths[:first_index]
image_path_2 = image_paths[first_index:]
manager = multiprocessing.Manager()
# 父进程创建Queue,并传给各个子进程:
queue_image = manager.Queue()
p = Pool(3)
for i in range(2):
if i == 0:
pw = p.apply_async(read_image, args=(queue_image,image_path_1))
if i == 1:
pw = p.apply_async(read_image, args=(queue_image, image_path_2))
pr = p.apply_async(get_image, args=(queue_image,))
p.close()
p.join()
end_time = time.time()
print(end_time - start_time)
print '所有数据都写入并且读完'