Python 使用argparse和concurrent.futures包冻结程序

Python 使用argparse和concurrent.futures包冻结程序,python,multiprocessing,argparse,Python,Multiprocessing,Argparse,我正在编写一个python程序来处理HPC bash termial上的NGS序列数据。该程序在我的mac电脑上的jupyter笔记本上正常运行,可以使用单个进程,也可以使用多个进程。但是,只要我尝试使用argpase包在终端中传入参数。程序不会给我最终结果,相反,它会不确定地运行,就好像进程还没有完成一样。我检查了一下,几乎可以肯定这是由argpase和concurrent.futures.ProcessPoolExecutor()之间的冲突引起的。那么,有人能给出一些解决这个问题的建议吗?

我正在编写一个python程序来处理HPC bash termial上的NGS序列数据。该程序在我的mac电脑上的jupyter笔记本上正常运行,可以使用单个进程,也可以使用多个进程。但是,只要我尝试使用argpase包在终端中传入参数。程序不会给我最终结果,相反,它会不确定地运行,就好像进程还没有完成一样。我检查了一下,几乎可以肯定这是由argpase和concurrent.futures.ProcessPoolExecutor()之间的冲突引起的。那么,有人能给出一些解决这个问题的建议吗? 谢谢大家!

以下代码在终端上产生冻结问题

#! /usr/bin/env python

import pandas as pd
import time
import concurrent.futures
import argparse


def run(args):
    start = time.perf_counter()
    input_file = args.input
    output_file = args.output
    chunk = args.chunk_size

    def cal_breaking(data):
        for index, row in data.iterrows():
            if row[1] == 0:  # mapping to the foward strand
                data.at[index, 'breaking_pos'] = int(row[5]) + int(row[3])
            elif row[1] == 16:  # mapping to the reverse strand
                data.at[index, 'breaking_pos'] = int(row[3])
            else:
                pass
        return data

    new_df = pd.DataFrame(
        columns=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL'])
    processes = []
    for df in pd.read_csv(input_file, delimiter='\t', usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], chunksize=chunk):
        df.columns = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
        df = df.loc[~df['CIGAR'].str.contains('S') & ~df['CIGAR'].str.contains(
            'H')]  # filtered out those read that contains 'soft clip' and 'hard clip' sequences
        df['CIGAR'] = df.iloc[:, 5].str.extract(
            r'(\d+)')  # -d+ regex expression representing one or more numbers(0-9)
        df['breaking_pos'] = None
        with concurrent.futures.ProcessPoolExecutor() as executor:
            processes.append(executor.submit(cal_breaking, df))
    for process in processes:
        new_df = pd.concat([new_df, process.result()], sort=True)

    new_df['count'] = 1
    new_df = new_df.groupby(['RNAME', 'breaking_pos']).count()['count'].reset_index()
    new_df['end'] = new_df['breaking_pos'] + 1
    new_df = new_df[['RNAME', 'breaking_pos', 'end', 'count']]
    new_df.to_csv(output_file, '\t', index=None, header=None)
    end = time.perf_counter()
    print(f'process finished in {round(end - start, 2)} second(s)')


def main():
    parser = argparse.ArgumentParser(description="tagging HiC-Pro pair's sub-compartment")
    parser.add_argument("-in", help="input pairs file", dest="input", type=str, required=True)
    parser.add_argument("-out", help="output files name", dest="output", type=str, required=True)
    parser.add_argument("-ck", help="read in chunk size", dest="chunk_size", type=int, required=True)
    parser.set_defaults(func=run)
    args = parser.parse_args()
    args.func(args)


if __name__ == "__main__":
    main()
如果不使用多处理,以下代码在终端上运行良好,没有问题:

#! /usr/bin/env python

import pandas as pd
import time
import argparse


def run(args):
    start = time.perf_counter()
    input_file = args.input
    output_file = args.output
    chunk = args.chunk_size

    def cal_breaking(data):
        for index, row in data.iterrows():
            if row[1] == 0:  # mapping to the foward strand
                data.at[index, 'breaking_pos'] = int(row[5]) + int(row[3])
            elif row[1] == 16:  # mapping to the reverse strand
                data.at[index, 'breaking_pos'] = int(row[3])
            else:
                pass
        return data

    new_df = pd.DataFrame(
        columns=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL'])

    for df in pd.read_csv(input_file, delimiter='\t', usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], chunksize=chunk):
        df.columns = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
        df = df.loc[~df['CIGAR'].str.contains('S') & ~df['CIGAR'].str.contains(
            'H')]  # filtered out those read that contains 'soft clip' and 'hard clip' sequences
        df['CIGAR'] = df.iloc[:, 5].str.extract(
            r'(\d+)')  # -d+ regex expression representing one or more numbers(0-9)
        df['breaking_pos'] = None
        new_df = pd.concat([new_df, cal_breaking(df)], sort=True)

    new_df['count'] = 1
    new_df = new_df.groupby(['RNAME', 'breaking_pos']).count()['count'].reset_index()
    new_df['end'] = new_df['breaking_pos'] + 1
    new_df = new_df[['RNAME', 'breaking_pos', 'end', 'count']]
    new_df.to_csv(output_file, '\t', index=None, header=None)
    end = time.perf_counter()
    print(f'process finished in {round(end - start, 2)} second(s)')


def main():
    parser = argparse.ArgumentParser(description="tagging HiC-Pro pair's sub-compartment")
    parser.add_argument("-in", help="input pairs file", dest="input", type=str, required=True)
    parser.add_argument("-out", help="output files name", dest="output", type=str, required=True)
    parser.add_argument("-ck", help="read in chunk size", dest="chunk_size", type=int, required=True)
    parser.set_defaults(func=run)
    args = parser.parse_args()
    args.func(args)


if __name__ == "__main__":
    main()
ProcessPoolExecutor类是一个Executor子类,它使用进程池异步执行调用。ProcessPoolExecutor使用多处理模块,这允许它绕过全局解释器锁,但也意味着只能执行和返回可拾取的对象


根据文档,默认情况下,如果对
argparse
性能有任何疑问,
print(args)
检查生成的内容,将需要max_workers。如果您对
名称空间
感到满意,您可以关注程序的其他部分。嘿,我尝试了您的方法,但问题仍然存在。因为文件包含100000行,而我设置的chunksize是50000,所以实际上只有两个进程被提交。但它无法完成,并且没有任何错误消息。
with concurrent.futures.ProcessPoolExecutor(max_workers=6) as executor:
            processes.append(executor.submit(cal_breaking, df))