Python 如何在使用Pool.map()进行多处理时解决内存问题?

Python 如何在使用Pool.map()进行多处理时解决内存问题?,python,pandas,memory,multiprocessing,python-multiprocessing,Python,Pandas,Memory,Multiprocessing,Python Multiprocessing,我已将程序(如下)编写为: 将一个巨大的文本文件读取为数据帧 然后groupby使用特定列值分割数据并存储为数据帧列表 然后通过管道将数据传输到多进程池.map(),以并行处理每个数据帧 一切正常,程序在我的小测试数据集上运行良好。但是,当我导入大数据(约14GB)时,内存消耗呈指数级增长,然后冻结计算机或被杀死(在HPC集群中) 我添加了代码,以便在数据/变量无效时立即清除内存。我也会尽快关闭游泳池。尽管有14GB的输入,我还是只期望有2*14GB的内存负担,但似乎很多事情都在进行中。我还

我已将程序(如下)编写为:

  • 将一个巨大的文本文件读取为数据帧
  • 然后
    groupby
    使用特定列值分割数据并存储为数据帧列表
  • 然后通过管道将数据传输到
    多进程池.map()
    ,以并行处理每个数据帧
一切正常,程序在我的小测试数据集上运行良好。但是,当我导入大数据(约14GB)时,内存消耗呈指数级增长,然后冻结计算机或被杀死(在HPC集群中)

我添加了代码,以便在数据/变量无效时立即清除内存。我也会尽快关闭游泳池。尽管有14GB的输入,我还是只期望有2*14GB的内存负担,但似乎很多事情都在进行中。我还尝试使用
chunkSize和maxtaskpercild等进行调整,但我没有看到测试和大文件在优化方面有任何区别

我认为当我开始
多处理
时,这个代码位置需要改进

p=Pool(3)#一次运行的池数;默认值为1
结果=p.map(矩阵到vcf,列表(gen矩阵到df列表.values())
但是,我发布了全部代码

测试示例:我创建了一个高达250 mb的测试文件(“genome_matrix_final-chr1234-1mb.txt”),并运行了该程序。当我检查系统监视器时,我可以看到内存消耗增加了约6GB。我不太清楚为什么250 mb的文件加上一些输出占用了这么多内存空间。我已经通过drop box共享了该文件,如果它有助于发现真正的问题的话

有人能建议我如何解决这个问题吗

我的python脚本:

#!/home/bin/python3

import pandas as pd
import collections
from multiprocessing import Pool
import io
import time
import resource

print()
print('Checking required modules')
print()


''' change this input file name and/or path as need be '''
genome_matrix_file = "genome_matrix_final-chr1n2-2mb.txt"   # test file 01
genome_matrix_file = "genome_matrix_final-chr1234-1mb.txt"  # test file 02
#genome_matrix_file = "genome_matrix_final.txt"    # large file 

def main():
    with open("genome_matrix_header.txt") as header:
        header = header.read().rstrip('\n').split('\t')
        print()

    time01 = time.time()
    print('starting time: ', time01)

    '''load the genome matrix file onto pandas as dataframe.
    This makes is more easy for multiprocessing'''
    gen_matrix_df = pd.read_csv(genome_matrix_file, sep='\t', names=header)

    # now, group the dataframe by chromosome/contig - so it can be multiprocessed
    gen_matrix_df = gen_matrix_df.groupby('CHROM')

    # store the splitted dataframes as list of key, values(pandas dataframe) pairs
    # this list of dataframe will be used while multiprocessing
    gen_matrix_df_list = collections.OrderedDict()
    for chr_, data in gen_matrix_df:
        gen_matrix_df_list[chr_] = data

    # clear memory
    del gen_matrix_df

    '''Now, pipe each dataframe from the list using map.Pool() '''
    p = Pool(3)  # number of pool to run at once; default at 1
    result = p.map(matrix_to_vcf, list(gen_matrix_df_list.values()))

    del gen_matrix_df_list  # clear memory

    p.close()
    p.join()


    # concat the results from pool.map() and write it to a file
    result_merged = pd.concat(result)
    del result  # clear memory

    pd.DataFrame.to_csv(result_merged, "matrix_to_haplotype-chr1n2.txt", sep='\t', header=True, index=False)

    print()
    print('completed all process in "%s" sec. ' % (time.time() - time01))
    print('Global maximum memory usage: %.2f (mb)' % current_mem_usage())
    print()


'''function to convert the dataframe from genome matrix to desired output '''
def matrix_to_vcf(matrix_df):

    print()
    time02 = time.time()

    # index position of the samples in genome matrix file
    sample_idx = [{'10a': 33, '10b': 18}, {'13a': 3, '13b': 19},
                    {'14a': 20, '14b': 4}, {'16a': 5, '16b': 21},
                    {'17a': 6, '17b': 22}, {'23a': 7, '23b': 23},
                    {'24a': 8, '24b': 24}, {'25a': 25, '25b': 9},
                    {'26a': 10, '26b': 26}, {'34a': 11, '34b': 27},
                    {'35a': 12, '35b': 28}, {'37a': 13, '37b': 29},
                    {'38a': 14, '38b': 30}, {'3a': 31, '3b': 15},
                    {'8a': 32, '8b': 17}]

    # sample index stored as ordered dictionary
    sample_idx_ord_list = []
    for ids in sample_idx:
        ids = collections.OrderedDict(sorted(ids.items()))
        sample_idx_ord_list.append(ids)


    # for haplotype file
    header = ['contig', 'pos', 'ref', 'alt']

    # adding some suffixes "PI" to available sample names
    for item in sample_idx_ord_list:
        ks_update = ''
        for ks in item.keys():
            ks_update += ks
        header.append(ks_update+'_PI')
        header.append(ks_update+'_PG_al')


    #final variable store the haplotype data
    # write the header lines first
    haplotype_output = '\t'.join(header) + '\n'


    # to store the value of parsed the line and update the "PI", "PG" value for each sample
    updated_line = ''

    # read the piped in data back to text like file
    matrix_df = pd.DataFrame.to_csv(matrix_df, sep='\t', index=False)

    matrix_df = matrix_df.rstrip('\n').split('\n')
    for line in matrix_df:
        if line.startswith('CHROM'):
            continue

        line_split = line.split('\t')
        chr_ = line_split[0]
        ref = line_split[2]
        alt = list(set(line_split[3:]))

        # remove the alleles "N" missing and "ref" from the alt-alleles
        alt_up = list(filter(lambda x: x!='N' and x!=ref, alt))

        # if no alt alleles are found, just continue
        # - i.e : don't write that line in output file
        if len(alt_up) == 0:
            continue

        #print('\nMining data for chromosome/contig "%s" ' %(chr_ ))
        #so, we have data for CHR, POS, REF, ALT so far
        # now, we mine phased genotype for each sample pair (as "PG_al", and also add "PI" tag)
        sample_data_for_vcf = []
        for ids in sample_idx_ord_list:
            sample_data = []
            for key, val in ids.items():
                sample_value = line_split[val]
                sample_data.append(sample_value)

            # now, update the phased state for each sample
            # also replacing the missing allele i.e "N" and "-" with ref-allele
            sample_data = ('|'.join(sample_data)).replace('N', ref).replace('-', ref)
            sample_data_for_vcf.append(str(chr_))
            sample_data_for_vcf.append(sample_data)

        # add data for all the samples in that line, append it with former columns (chrom, pos ..) ..
        # and .. write it to final haplotype file
        sample_data_for_vcf = '\t'.join(sample_data_for_vcf)
        updated_line = '\t'.join(line_split[0:3]) + '\t' + ','.join(alt_up) + \
            '\t' + sample_data_for_vcf + '\n'
        haplotype_output += updated_line

    del matrix_df  # clear memory
    print('completed haplotype preparation for chromosome/contig "%s" '
          'in "%s" sec. ' %(chr_, time.time()-time02))
    print('\tWorker maximum memory usage: %.2f (mb)' %(current_mem_usage()))

    # return the data back to the pool
    return pd.read_csv(io.StringIO(haplotype_output), sep='\t')


''' to monitor memory '''
def current_mem_usage():
    return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.


if __name__ == '__main__':
    main()
赏金猎人更新:

#!/home/bin/python3

import pandas as pd
import collections
from multiprocessing import Pool
import io
import time
import resource

print()
print('Checking required modules')
print()


''' change this input file name and/or path as need be '''
genome_matrix_file = "genome_matrix_final-chr1n2-2mb.txt"   # test file 01
genome_matrix_file = "genome_matrix_final-chr1234-1mb.txt"  # test file 02
#genome_matrix_file = "genome_matrix_final.txt"    # large file 

def main():
    with open("genome_matrix_header.txt") as header:
        header = header.read().rstrip('\n').split('\t')
        print()

    time01 = time.time()
    print('starting time: ', time01)

    '''load the genome matrix file onto pandas as dataframe.
    This makes is more easy for multiprocessing'''
    gen_matrix_df = pd.read_csv(genome_matrix_file, sep='\t', names=header)

    # now, group the dataframe by chromosome/contig - so it can be multiprocessed
    gen_matrix_df = gen_matrix_df.groupby('CHROM')

    # store the splitted dataframes as list of key, values(pandas dataframe) pairs
    # this list of dataframe will be used while multiprocessing
    gen_matrix_df_list = collections.OrderedDict()
    for chr_, data in gen_matrix_df:
        gen_matrix_df_list[chr_] = data

    # clear memory
    del gen_matrix_df

    '''Now, pipe each dataframe from the list using map.Pool() '''
    p = Pool(3)  # number of pool to run at once; default at 1
    result = p.map(matrix_to_vcf, list(gen_matrix_df_list.values()))

    del gen_matrix_df_list  # clear memory

    p.close()
    p.join()


    # concat the results from pool.map() and write it to a file
    result_merged = pd.concat(result)
    del result  # clear memory

    pd.DataFrame.to_csv(result_merged, "matrix_to_haplotype-chr1n2.txt", sep='\t', header=True, index=False)

    print()
    print('completed all process in "%s" sec. ' % (time.time() - time01))
    print('Global maximum memory usage: %.2f (mb)' % current_mem_usage())
    print()


'''function to convert the dataframe from genome matrix to desired output '''
def matrix_to_vcf(matrix_df):

    print()
    time02 = time.time()

    # index position of the samples in genome matrix file
    sample_idx = [{'10a': 33, '10b': 18}, {'13a': 3, '13b': 19},
                    {'14a': 20, '14b': 4}, {'16a': 5, '16b': 21},
                    {'17a': 6, '17b': 22}, {'23a': 7, '23b': 23},
                    {'24a': 8, '24b': 24}, {'25a': 25, '25b': 9},
                    {'26a': 10, '26b': 26}, {'34a': 11, '34b': 27},
                    {'35a': 12, '35b': 28}, {'37a': 13, '37b': 29},
                    {'38a': 14, '38b': 30}, {'3a': 31, '3b': 15},
                    {'8a': 32, '8b': 17}]

    # sample index stored as ordered dictionary
    sample_idx_ord_list = []
    for ids in sample_idx:
        ids = collections.OrderedDict(sorted(ids.items()))
        sample_idx_ord_list.append(ids)


    # for haplotype file
    header = ['contig', 'pos', 'ref', 'alt']

    # adding some suffixes "PI" to available sample names
    for item in sample_idx_ord_list:
        ks_update = ''
        for ks in item.keys():
            ks_update += ks
        header.append(ks_update+'_PI')
        header.append(ks_update+'_PG_al')


    #final variable store the haplotype data
    # write the header lines first
    haplotype_output = '\t'.join(header) + '\n'


    # to store the value of parsed the line and update the "PI", "PG" value for each sample
    updated_line = ''

    # read the piped in data back to text like file
    matrix_df = pd.DataFrame.to_csv(matrix_df, sep='\t', index=False)

    matrix_df = matrix_df.rstrip('\n').split('\n')
    for line in matrix_df:
        if line.startswith('CHROM'):
            continue

        line_split = line.split('\t')
        chr_ = line_split[0]
        ref = line_split[2]
        alt = list(set(line_split[3:]))

        # remove the alleles "N" missing and "ref" from the alt-alleles
        alt_up = list(filter(lambda x: x!='N' and x!=ref, alt))

        # if no alt alleles are found, just continue
        # - i.e : don't write that line in output file
        if len(alt_up) == 0:
            continue

        #print('\nMining data for chromosome/contig "%s" ' %(chr_ ))
        #so, we have data for CHR, POS, REF, ALT so far
        # now, we mine phased genotype for each sample pair (as "PG_al", and also add "PI" tag)
        sample_data_for_vcf = []
        for ids in sample_idx_ord_list:
            sample_data = []
            for key, val in ids.items():
                sample_value = line_split[val]
                sample_data.append(sample_value)

            # now, update the phased state for each sample
            # also replacing the missing allele i.e "N" and "-" with ref-allele
            sample_data = ('|'.join(sample_data)).replace('N', ref).replace('-', ref)
            sample_data_for_vcf.append(str(chr_))
            sample_data_for_vcf.append(sample_data)

        # add data for all the samples in that line, append it with former columns (chrom, pos ..) ..
        # and .. write it to final haplotype file
        sample_data_for_vcf = '\t'.join(sample_data_for_vcf)
        updated_line = '\t'.join(line_split[0:3]) + '\t' + ','.join(alt_up) + \
            '\t' + sample_data_for_vcf + '\n'
        haplotype_output += updated_line

    del matrix_df  # clear memory
    print('completed haplotype preparation for chromosome/contig "%s" '
          'in "%s" sec. ' %(chr_, time.time()-time02))
    print('\tWorker maximum memory usage: %.2f (mb)' %(current_mem_usage()))

    # return the data back to the pool
    return pd.read_csv(io.StringIO(haplotype_output), sep='\t')


''' to monitor memory '''
def current_mem_usage():
    return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.


if __name__ == '__main__':
    main()
我使用
Pool.map()
实现了多处理,但代码造成了很大的内存负担(输入测试文件~300MB,但内存负担约为6GB)。我只期望最大3*300 mb的内存负担

  • 有人能解释一下,是什么原因导致如此小的文件和如此小的计算长度需要如此巨大的内存
  • 此外,我正试图得到答案,并用它来改进我的大型程序中的多进程。因此,添加任何方法,模块,不改变计算部分(CPU绑定进程)的结构太多应该是好的
  • 我已经包含了两个测试文件,用于测试代码
  • 附加的代码是完整代码,因此在复制粘贴时应按预期工作。任何更改只能用于改进多处理步骤中的优化

    • 我也有同样的问题。我需要处理一个巨大的文本语料库,同时在内存中保留一个由数百万行的几个数据帧组成的知识库。我认为这个问题很常见,所以我将保持我的答案面向一般目的

      设置组合为我解决了问题(仅1&3&5可能为您解决):

    • 使用
      Pool.imap
      (或
      imap\u unordered
      )而不是
      Pool.map
      。这将比在开始处理之前将所有数据加载到内存中更慢地迭代数据

    • 将值设置为
      chunksize
      参数。这也将使
      imap
      更快

    • 将值设置为
      maxstasksperchild
      参数

    • 将输出附加到磁盘而不是内存中。当它达到一定尺寸时,立即或每隔一段时间

    • 在不同的批处理中运行代码。如果有迭代器,则可以使用。方法是将
      列表(gen_matrix_df_list.values())
      拆分为三个或更多列表,然后只将前三分之一传递给
      map
      imap
      ,然后在另一次运行中传递第二个第三分之一,等等。因为您有一个列表,所以您可以简单地将其切片到同一行代码中


    • 使用
      multiprocessing.Pool
      时,将使用
      fork()
      系统调用创建许多子进程。这些进程中的每一个都以当时父进程内存的精确副本开始。由于在创建大小为3的
      池之前加载csv,因此池中的这3个进程中的每一个都将不必要地拥有数据帧的副本。(
      gen_matrix_df
      以及
      gen_matrix_df_list
      将存在于当前进程以及3个子进程中的每一个子进程中,因此这些结构中的每一个都将有4个副本存储在内存中)

      尝试在加载文件之前创建
      (实际上是从一开始),这样可以减少内存使用

      如果仍然过高,您可以:

    • 将gen_matrix_df_列表转储到文件中,每行1项,例如:

      import os
      import cPickle
      
      with open('tempfile.txt', 'w') as f:
          for item in gen_matrix_df_list.items():
              cPickle.dump(item, f)
              f.write(os.linesep)
      
    • 在迭代器上对您在此文件中转储的行使用
      Pool.imap()
      ,例如:

      with open('tempfile.txt', 'r') as f:
          p.imap(matrix_to_vcf, (cPickle.loads(line) for line in f))
      
      (请注意,
      matrix\u to\u vcf
      在上面的示例中接受一个
      (键,值)
      元组,而不仅仅是一个值)

    • 我希望这有帮助


      注意:我还没有测试上面的代码。这只是为了演示这个想法。

      关于多处理内存的一般回答

      您问:“是什么导致分配了这么多内存?”。答案取决于两个部分

      首先,正如您已经注意到的,每个
      多处理
      工作者都会获得自己的数据副本(引用),因此您应该将大参数分块。或者对于大文件,如果可能的话,一次读取一点

      默认情况下,池的工作进程是真正的Python进程 在以下情况下使用Python标准库的多处理模块: n_jobs!=1.作为并行调用的输入传递的参数如下 序列化并重新分配到每个工作进程的内存中

      对于大型参数,这可能会有问题
         '''load the genome matrix file onto pandas as dataframe.
          This makes is more easy for multiprocessing'''
      
          # store the splitted dataframes as list of key, values(pandas dataframe) pairs
          # this list of dataframe will be used while multiprocessing
          #not sure why you need the ordered dict here, might add memory overhead
          #gen_matrix_df_list = collections.OrderedDict()  
          #a defaultdict won't throw an exception when we try to append to it the first time. if you don't want a default dict for some reason, you have to initialize each entry you care about.
          gen_matrix_df_list = collections.defaultdict(list)   
          chunksize = 10 ** 6
      
          for chunk in pd.read_csv(genome_matrix_file, sep='\t', names=header, chunksize=chunksize)
              # now, group the dataframe by chromosome/contig - so it can be multiprocessed
              gen_matrix_df = chunk.groupby('CHROM')
              for chr_, data in gen_matrix_df:
                  gen_matrix_df_list[chr_].append(data)
      
          '''Having sorted chunks on read to a list of df, now create single data frames for each chr_'''
          #The dict contains a list of small df objects, so now concatenate them
          #by reassigning to the same dict, the memory footprint is not increasing 
          for chr_ in gen_matrix_df_list.keys():
              gen_matrix_df_list[chr_]=pd.concat(gen_matrix_df_list[chr_])
      
          '''Now, pipe each dataframe from the list using map.Pool() '''
          p = Pool(3)  # number of pool to run at once; default at 1
          result = p.map(matrix_to_vcf, list(gen_matrix_df_list.values()))
          p.close()
          p.join()
      
      >>> import sys
      >>> sys.getsizeof(42)
      28
      >>> sys.getsizeof('T')
      50
      
      #!/usr/bin/env python3
      
      import pandas as pd
      from memory_profiler import profile
      
      @profile
      def main():
          with open('genome_matrix_header.txt') as header:
              header = header.read().rstrip('\n').split('\t')
      
          gen_matrix_df = pd.read_csv(
              'genome_matrix_final-chr1234-1mb.txt', sep='\t', names=header)
      
          gen_matrix_df.info()
          gen_matrix_df.info(memory_usage='deep')
      
      if __name__ == '__main__':
          main()
      
      mprof run justpd.py
      mprof plot
      
      Line #    Mem usage    Increment   Line Contents
      ================================================
           6     54.3 MiB     54.3 MiB   @profile
           7                             def main():
           8     54.3 MiB      0.0 MiB       with open('genome_matrix_header.txt') as header:
           9     54.3 MiB      0.0 MiB           header = header.read().rstrip('\n').split('\t')
          10                             
          11   2072.0 MiB   2017.7 MiB       gen_matrix_df = pd.read_csv('genome_matrix_final-chr1234-1mb.txt', sep='\t', names=header)
          12                                 
          13   2072.0 MiB      0.0 MiB       gen_matrix_df.info()
          14   2072.0 MiB      0.0 MiB       gen_matrix_df.info(memory_usage='deep')
      
      <class 'pandas.core.frame.DataFrame'>
      RangeIndex: 4000000 entries, 0 to 3999999
      Data columns (total 34 columns):
      ...
      dtypes: int64(2), object(32)
      memory usage: 1.0+ GB
      
      memory usage: 7.9 GB
      
      # added after read_csv
      from pympler import tracker
      tr = tracker.SummaryTracker()
      tr.print_diff()   
      
                                                   types |   # objects |   total size
      ================================================== | =========== | ============
                       <class 'pandas.core.series.Series |          34 |      7.93 GB
                                            <class 'list |        7839 |    732.38 KB
                                             <class 'str |        7741 |    550.10 KB
                                             <class 'int |        1810 |     49.66 KB
                                            <class 'dict |          38 |      7.43 KB
        <class 'pandas.core.internals.SingleBlockManager |          34 |      3.98 KB
                                   <class 'numpy.ndarray |          34 |      3.19 KB
      
      import ctypes
      
      class PyASCIIObject(ctypes.Structure):
           _fields_ = [
               ('ob_refcnt', ctypes.c_size_t),
               ('ob_type', ctypes.py_object),
               ('length', ctypes.c_ssize_t),
               ('hash', ctypes.c_int64),
               ('state', ctypes.c_int32),
               ('wstr', ctypes.c_wchar_p)
          ]
      
      >>> a = 'name'
      >>> b = '!@#$'
      >>> a_struct = PyASCIIObject.from_address(id(a))
      >>> a_struct.state & 0b11
      1
      >>> b_struct = PyASCIIObject.from_address(id(b))
      >>> b_struct.state & 0b11
      0
      
      >>> a = 'foo'
      >>> b = 'foo'
      >>> a is b
      True
      >> gen_matrix_df.REF[0] is gen_matrix_df.REF[6]
      True
      
      >>> rows = 4 * 10 ** 6
      >>> int_cols = 2
      >>> str_cols = 32
      >>> int_size = 8
      >>> str_size = 58  
      >>> ptr_size = 8
      >>> (int_cols * int_size + str_cols * (str_size + ptr_size)) * rows / 2 ** 30
      7.927417755126953
      
      Line #    Mem usage    Increment   Line Contents
      ================================================
           8     53.1 MiB     53.1 MiB   @profile
           9                             def main():
          10     53.1 MiB      0.0 MiB       with open("genome_matrix_header.txt") as header:
          11     53.1 MiB      0.0 MiB           header = header.read().rstrip('\n').split('\t')
          12                             
          13   2070.9 MiB   2017.8 MiB       gen_matrix_df = pd.read_csv('genome_matrix_final-chr1234-1mb.txt', sep='\t', names=header)
          14   2071.2 MiB      0.4 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[gen_matrix_df.keys()[0]])
          15   2071.2 MiB      0.0 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[gen_matrix_df.keys()[0]])
          16   2040.7 MiB    -30.5 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          ...
          23   1827.1 MiB    -30.5 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          24   1094.7 MiB   -732.4 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          25   1765.9 MiB    671.3 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          26   1094.7 MiB   -671.3 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          27   1704.8 MiB    610.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          28   1094.7 MiB   -610.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          29   1643.9 MiB    549.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          30   1094.7 MiB   -549.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          31   1582.8 MiB    488.1 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          32   1094.7 MiB   -488.1 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])    
          33   1521.9 MiB    427.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])    
          34   1094.7 MiB   -427.2 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          35   1460.8 MiB    366.1 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          36   1094.7 MiB   -366.1 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          37   1094.7 MiB      0.0 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
          ...
          47   1094.7 MiB      0.0 MiB       gen_matrix_df = gen_matrix_df.drop(columns=[random.choice(gen_matrix_df.keys())])
      
      #!/usr/bin/env python3
      
      import csv
      import sys
      import json
      
      def smemstat():
        with open('smemstat.json') as f:
          smem = json.load(f)
      
        rows = []
        fieldnames = set()    
        for s in smem['smemstat']['periodic-samples']:
          row = {}
          for ps in s['smem-per-process']:
            if 'script.py' in ps['command']:
              for k in ('uss', 'pss', 'rss'):
                row['{}-{}'.format(ps['pid'], k)] = ps[k] // 2 ** 20
      
          # smemstat produces empty samples, backfill from previous
          if rows:            
            for k, v in rows[-1].items():
              row.setdefault(k, v)
      
          rows.append(row)
          fieldnames.update(row.keys())
      
        with open('smemstat.csv', 'w') as out:
          dw = csv.DictWriter(out, fieldnames=sorted(fieldnames))
          dw.writeheader()
          list(map(dw.writerow, rows))
      
      def glances():
        rows = []
        fieldnames = ['available', 'used', 'cached', 'mem_careful', 'percent',
          'free', 'mem_critical', 'inactive', 'shared', 'history_size',
          'mem_warning', 'total', 'active', 'buffers']
        with open('glances.csv', 'w') as out:
          dw = csv.DictWriter(out, fieldnames=fieldnames)
          dw.writeheader()
          with open('glances.json') as f:
            for l in f:
              d = json.loads(l)
              dw.writerow(d['mem'])
      
      if __name__ == '__main__':
        globals()[sys.argv[1]]()
      
      ...
      global global_gen_matrix_df_values
      global_gen_matrix_df_values = list(gen_matrix_df_list.values())
      del gen_matrix_df_list
      
      p = Pool(2)
      result = p.map(matrix_to_vcf, range(len(global_gen_matrix_df_values)))
      ...
      
      def matrix_to_vcf(i):
          matrix_df = global_gen_matrix_df_values[i]