python 多进程共享变量

现在要读取多个数据文件从中抽取数据将结果合并(相当于word count)


方法一是常规的分多线程分别处理数据输出,最后定义一个字典遍历输出文件将结果集合并

方法二是先定义结果集字典,然后多线程处理数据,放入字典的时候加上同步锁


以为方法二会比方法一快的多 因为省去了大量的IO,只在合并结果的时候加锁

然而测试之后发现同步锁反而是最大的瓶颈


测试20W数据,抽取某个字段开16线程跑,方法一用了10s,方法二用了55s

这差距也太大了……天


所以还是老老实实放弃多线程同步共享变量把 性能太差


python 多线程 加锁同步 共享变量的代码如下

#!/usr/bin/env python
#-*- coding: utf-8 -*-

import sys
reload(sys)
sys.setdefaultencoding('utf-8')

import MySQLdb
import re
import multiprocessing
import os
import traceback

import jieba
import ConfigParser
import datetime
import json
import logging

def myprocess(num,indir,titles,lock):
		
	try:
		fin = open(indir + os.sep + str(num),'r')
		cnt0 = 0
		for line in fin:
			cnt0 += 1
			if cnt0 %100 == 0:
				print num,cnt0
			if cnt0 == 10000:
				break
			data = eval(line.strip())
			us_id = int(data[21])
			title = data[8]
			if title.strip() == '':
				continue
			with lock:
				if title in titles:
					titles[title].append(us_id) 
				else:
					titles[title]=[us_id]
		print num,'END'
				
	except Exception,e:
		traceback.print_exc()

def func():
	indir = '/root/data/bids/data_all'
	outdir = './titles1119'
	#if not os.path.exists(outdir):
	#	os.mkdir(outdir)
	start,end,add = 0,20,1
	manager = multiprocessing.Manager()
	lock = manager.Lock()
	titles = manager.dict()
	#开启进程池
	pool = multiprocessing.Pool(processes = 16)	
	for num in range(start,end,add):
		pool.apply_async(myprocess,[num,indir,titles,lock])
	pool.close()
	pool.join()
	titles = dict(titles)
	with open(outdir,'w') as fout:
		for title in titles:
			print >> fout,'%s\t%s'%(title,titles[title])
			if len(titles[title])>1:
				print titles[title]

if __name__ == '__main__':
	program = os.path.basename(sys.argv[0])
	logger = logging.getLogger(program)
	logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',filename = program+'.log',filemode='a')
	logging.root.setLevel(level=logging.INFO)
	logger.info("start running %s" % ' '.join(sys.argv))
	func()
	logger.info("end\t running %s" % ' '.join(sys.argv))




你可能感兴趣的:(python 多进程共享变量)