1. multiprocessing Pool 的使用
import multiprocessingdef func(args):# do somethingreturn df #返回一个dfif __name__ == "__main__":pool = multiprocessing.Pool()result=pd.DataFrame()result=result.append(pool.map(func,list) ) #给func传入一个listpool.close()pool.join()
2. 利用multiprocessing Pool map对 pandas groupby apply加速,一个参数版本
from multiprocessing import Pooldef processParallel(group):df,name=group# do somethingreturn dfif __name__ == '__main__':pool = Pool(50)for i in range(0,9):result = pd.concat(pool.map(processParallel, [(group,name) for name, group in scd.groupby('col')]),ignore_index=True)pool.close()pool.join()
3. 利用multiprocessing Pool map对 pandas groupby apply加速,传入多个参数
注意:把需要迭代参数的放在第一个
from multiprocessing import Pool
from functools import partialdef processParallel(group,a,b):df,name=group# do somethingreturn dfif __name__ == '__main__':pool = Pool(50)partialprocessParallel=partial(processParallel, a=1,b=2)#利用partial传入固定参数a,bfor i in range(0,9):result = pd.concat(pool.map(partialprocessParallel, [(group,name) for name, group in scd.groupby('col')]),ignore_index=True)pool.close()pool.join()
效果比joblib 的Parallel, delayed要好