Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/338.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python:基于交叉点的简单列表合并_Python_Merge_Tree_Set Intersection_Equivalence Classes - Fatal编程技术网

Python:基于交叉点的简单列表合并

Python:基于交叉点的简单列表合并,python,merge,tree,set-intersection,equivalence-classes,Python,Merge,Tree,Set Intersection,Equivalence Classes,考虑以下整数列表: #-------------------------------------- 0 [0,1,3] 1 [1,0,3,4,5,10,...] 2 [2,8] 3 [3,1,0,...] ... n [] #-------------------------------------- 问题是合并至少有一个公共元素的列表。因此,仅给定部分的结果如下: #-------------------------------------- 0 [0,1,3,4,5,10,...] 2 [

考虑以下整数列表:

#--------------------------------------
0 [0,1,3]
1 [1,0,3,4,5,10,...]
2 [2,8]
3 [3,1,0,...]
...
n []
#--------------------------------------
问题是合并至少有一个公共元素的列表。因此,仅给定部分的结果如下:

#--------------------------------------
0 [0,1,3,4,5,10,...]
2 [2,8]
#--------------------------------------
在大数据(元素只是数字)上执行此操作的最有效方法是什么?
tree
结构是否值得思考? 我现在通过将列表转换为
集合
并对交叉点进行迭代来完成这项工作,但速度很慢!此外,我有一种感觉,这是如此基本!此外,该实现还缺少一些东西(未知),因为有些列表有时未合并!话虽如此,如果您建议自行实现,请慷慨地提供一个简单的示例代码[显然Python是我的最爱:)]或pesudo代码。
更新1: 以下是我使用的代码:

#--------------------------------------
lsts = [[0,1,3],
        [1,0,3,4,5,10,11],
        [2,8],
        [3,1,0,16]];
#--------------------------------------
函数是(buggy!!):

更新2: 根据我的经验,Niklas Baumstark下面给出的代码对于简单的情况来说要快一点。还没有测试“Hooked”给出的方法,因为它是完全不同的方法(顺便说一句,它看起来很有趣)。 所有这些的测试程序可能很难或不可能确保结果。我将使用的实际数据集非常大和复杂,因此不可能仅仅通过重复来跟踪任何错误。也就是说,我需要对该方法的可靠性100%满意,然后才能将其作为一个模块在一个大型代码中推广。就目前而言,Niklas的方法更快,简单集合的答案当然是正确的。
但是,我如何才能确保它对真正的大型数据集运行良好?因为我将无法直观地跟踪错误

更新3: 注意,对于这个问题,方法的可靠性比速度更重要。我希望最终能够将Python代码翻译成Fortran,以获得最佳性能

更新4:
这篇文章中有许多有趣的观点,并慷慨地给出了答案和建设性的评论。我建议大家通读一遍。请接受我对问题的发展、惊人的答案、建设性的评论和讨论的赞赏。

我的尝试:

def merge(lsts):
    sets = [set(lst) for lst in lsts if lst]
    merged = True
    while merged:
        merged = False
        results = []
        while sets:
            common, rest = sets[0], sets[1:]
            sets = []
            for x in rest:
                if x.isdisjoint(common):
                    sets.append(x)
                else:
                    merged = True
                    common |= x
            results.append(common)
        sets = results
    return sets

lst = [[65, 17, 5, 30, 79, 56, 48, 62],
       [6, 97, 32, 93, 55, 14, 70, 32],
       [75, 37, 83, 34, 9, 19, 14, 64],
       [43, 71],
       [],
       [89, 49, 1, 30, 28, 3, 63],
       [35, 21, 68, 94, 57, 94, 9, 3],
       [16],
       [29, 9, 97, 43],
       [17, 63, 24]]
print merge(lst)
基准:

import random

# adapt parameters to your own usage scenario
class_count = 50
class_size = 1000
list_count_per_class = 100
large_list_sizes = list(range(100, 1000))
small_list_sizes = list(range(0, 100))
large_list_probability = 0.5

if False:  # change to true to generate the test data file (takes a while)
    with open("/tmp/test.txt", "w") as f:
        lists = []
        classes = [
            range(class_size * i, class_size * (i + 1)) for i in range(class_count)
        ]
        for c in classes:
            # distribute each class across ~300 lists
            for i in xrange(list_count_per_class):
                lst = []
                if random.random() < large_list_probability:
                    size = random.choice(large_list_sizes)
                else:
                    size = random.choice(small_list_sizes)
                nums = set(c)
                for j in xrange(size):
                    x = random.choice(list(nums))
                    lst.append(x)
                    nums.remove(x)
                random.shuffle(lst)
                lists.append(lst)
        random.shuffle(lists)
        for lst in lists:
            f.write(" ".join(str(x) for x in lst) + "\n")

setup = """
# Niklas'
def merge_niklas(lsts):
    sets = [set(lst) for lst in lsts if lst]
    merged = 1
    while merged:
        merged = 0
        results = []
        while sets:
            common, rest = sets[0], sets[1:]
            sets = []
            for x in rest:
                if x.isdisjoint(common):
                    sets.append(x)
                else:
                    merged = 1
                    common |= x
            results.append(common)
        sets = results
    return sets

# Rik's
def merge_rik(data):
    sets = (set(e) for e in data if e)
    results = [next(sets)]
    for e_set in sets:
        to_update = []
        for i, res in enumerate(results):
            if not e_set.isdisjoint(res):
                to_update.insert(0, i)

        if not to_update:
            results.append(e_set)
        else:
            last = results[to_update.pop(-1)]
            for i in to_update:
                last |= results[i]
                del results[i]
            last |= e_set
    return results

# katrielalex's
def pairs(lst):
    i = iter(lst)
    first = prev = item = i.next()
    for item in i:
        yield prev, item
        prev = item
    yield item, first

import networkx

def merge_katrielalex(lsts):
    g = networkx.Graph()
    for lst in lsts:
        for edge in pairs(lst):
            g.add_edge(*edge)
    return networkx.connected_components(g)

# agf's (optimized)
from collections import deque

def merge_agf_optimized(lists):
    sets = deque(set(lst) for lst in lists if lst)
    results = []
    disjoint = 0
    current = sets.pop()
    while True:
        merged = False
        newsets = deque()
        for _ in xrange(disjoint, len(sets)):
            this = sets.pop()
            if not current.isdisjoint(this):
                current.update(this)
                merged = True
                disjoint = 0
            else:
                newsets.append(this)
                disjoint += 1
        if sets:
            newsets.extendleft(sets)
        if not merged:
            results.append(current)
            try:
                current = newsets.pop()
            except IndexError:
                break
            disjoint = 0
        sets = newsets
    return results

# agf's (simple)
def merge_agf_simple(lists):
    newsets, sets = [set(lst) for lst in lists if lst], []
    while len(sets) != len(newsets):
        sets, newsets = newsets, []
        for aset in sets:
            for eachset in newsets:
                if not aset.isdisjoint(eachset):
                    eachset.update(aset)
                    break
            else:
                newsets.append(aset)
    return newsets

# alexis'
def merge_alexis(data):
    bins = range(len(data))  # Initialize each bin[n] == n
    nums = dict()

    data = [set(m) for m in data]  # Convert to sets
    for r, row in enumerate(data):
        for num in row:
            if num not in nums:
                # New number: tag it with a pointer to this row's bin
                nums[num] = r
                continue
            else:
                dest = locatebin(bins, nums[num])
                if dest == r:
                    continue  # already in the same bin

                if dest > r:
                    dest, r = r, dest  # always merge into the smallest bin

                data[dest].update(data[r])
                data[r] = None
                # Update our indices to reflect the move
                bins[r] = dest
                r = dest

    # Filter out the empty bins
    have = [m for m in data if m]
    return have

def locatebin(bins, n):
    while bins[n] != n:
        n = bins[n]
    return n

lsts = []
size = 0
num = 0
max = 0
for line in open("/tmp/test.txt", "r"):
    lst = [int(x) for x in line.split()]
    size += len(lst)
    if len(lst) > max:
        max = len(lst)
    num += 1
    lsts.append(lst)
"""

setup += """
print "%i lists, {class_count} equally distributed classes, average size %i, max size %i" % (num, size/num, max)
""".format(class_count=class_count)

import timeit
print "niklas"
print timeit.timeit("merge_niklas(lsts)", setup=setup, number=3)
print "rik"
print timeit.timeit("merge_rik(lsts)", setup=setup, number=3)
print "katrielalex"
print timeit.timeit("merge_katrielalex(lsts)", setup=setup, number=3)
print "agf (1)"
print timeit.timeit("merge_agf_optimized(lsts)", setup=setup, number=3)
print "agf (2)"
print timeit.timeit("merge_agf_simple(lsts)", setup=setup, number=3)
print "alexis"
print timeit.timeit("merge_alexis(lsts)", setup=setup, number=3)
这将是我的最新方法:

def merge(data):
    sets = (set(e) for e in data if e)
    results = [next(sets)]
    for e_set in sets:
        to_update = []
        for i,res in enumerate(results):
            if not e_set.isdisjoint(res):
                to_update.insert(0,i)

        if not to_update:
            results.append(e_set)
        else:
            last = results[to_update.pop(-1)]
            for i in to_update:
                last |= results[i]
                del results[i]
            last |= e_set

    return results
注意:在合并过程中,将删除空列表

更新:可靠性

要获得100%的成功可靠性,您需要两项测试:

  • 检查所有结果集是否相互脱节:

    merged = [{0, 1, 3, 4, 5, 10, 11, 16}, {8, 2}, {8}]
    
    from itertools import combinations
    for a,b in combinations(merged,2):
        if not a.isdisjoint(b):
            raise Exception(a,b)    # just an example
    
  • 检查合并集是否覆盖原始数据。(根据KatrieleAlex的建议)

我认为这需要一些时间,但如果你想100%确定的话,也许这是值得的。

使用矩阵运算 让我在回答这一问题之前先发表以下评论:

这样做是错误的。它容易出现数值不稳定,并且比其他方法慢得多,使用风险自负。

尽管如此,我还是忍不住要从动态的角度来解决这个问题(我希望你能从新的角度来看待这个问题)。理论上,这应该一直有效,但特征值计算往往会失败。其思想是将列表视为从行到列的流。如果两行共享一个公共值,则它们之间存在连接流。如果我们将这些水流视为水,我们会看到当水流之间有一条连接路径时,它们会聚集成小水池。为简单起见,我将使用一个较小的集合,尽管它也适用于您的数据集:

from numpy import where, newaxis
from scipy import linalg, array, zeros

X = [[0,1,3],[2],[3,1]]
我们需要将数据转换成流图。如果行i流入值j,我们将其放入矩阵中。这里有3行和4个唯一值:

A = zeros((4,len(X)), dtype=float)
for i,row in enumerate(X):
    for val in row: A[val,i] = 1
通常,您需要更改
4
,以捕获您拥有的唯一值的数量。如果这个集合是一个从0开始的整数列表,你可以简单地把它设为最大数。我们现在执行特征值分解。精确地说是奇异值分解,因为我们的矩阵不是平方的

S  = linalg.svd(A)
我们只想保留这个答案的3x3部分,因为它将表示池的流量。事实上,我们只需要这个矩阵的绝对值;我们只关心这个集群空间中是否有流

我们可以把这个矩阵M看作一个马尔可夫矩阵,并通过行规范化使其显式化。一旦我们有了这个,我们计算(左)特征值反算。这个矩阵的一部分

M /=  M.sum(axis=1)[:,newaxis]
U,V = linalg.eig(M,left=True, right=False)
V = abs(V)
现在,一个非连通(非遍历)马尔可夫矩阵有一个很好的性质,即对于每个非连通簇,都有一个单位特征值。与这些单位值相关的特征向量是我们想要的:

idx = where(U > .999)[0]
C = V.T[idx] > 0
由于前面提到的数值不稳定,我不得不使用.999。在这一点上,我们完成了!现在,每个独立集群都可以拉出相应的行:

for cluster in C:
    print where(A[:,cluster].sum(axis=1))[0]
正如预期的那样:

[0 1 3]
[2]
X
更改为您的
lst
,您将得到:
[01 3 4 5 10 11 16][2 8]


附录

为什么这可能有用?我不知道您的基础数据来自何处,但当连接不是绝对连接时会发生什么?假设行
1
在80%的时间内有条目
3
?上面的flow方法可以很好地工作,并且可以通过
.999
值完全参数化,它离统一越远,关联就越松散


视觉表现

因为一张图片相当于1K个单词,下面是我的示例和你的
lst
的矩阵a和V的曲线图。请注意在
V
中如何拆分为两个簇(这是一个块对角矩阵,排列后有两个块),因为每个示例只有两个唯一的列表


Fas
M /=  M.sum(axis=1)[:,newaxis]
U,V = linalg.eig(M,left=True, right=False)
V = abs(V)
idx = where(U > .999)[0]
C = V.T[idx] > 0
for cluster in C:
    print where(A[:,cluster].sum(axis=1))[0]
[0 1 3]
[2]
M = dot(A.T,A)
M /=  M.sum(axis=1)[:,newaxis]
U,V = linalg.eig(M,left=True, right=False)
def merge(lsts):
        # this is an index list that stores the joined id for each list
        joined = range(len(lsts))
        # create an ordered list with indices
        indexed_list = sorted((el,index) for index,lst in enumerate(lsts) for el in lst)
        # loop throught the ordered list, and if two elements are the same and
        # the lists are not yet joined, alter the list with joined id
        el_0,idx_0 = None,None
        for el,idx in indexed_list:
                if el == el_0 and joined[idx] != joined[idx_0]:
                        old = joined[idx]
                        rep = joined[idx_0]
                        joined = [rep if id == old else id for id in joined]
                el_0, idx_0 = el, idx
        return joined
def pairs(lst):
    i = iter(lst)
    first = prev = item = i.next()
    for item in i:
        yield prev, item
        prev = item
    yield item, first

lists = [[1,2,3],[3,5,6],[8,9,10],[11,12,13]]

import networkx
g = networkx.Graph()
for sub_list in lists:
    for edge in pairs(sub_list):
            g.add_edge(*edge)

networkx.connected_components(g)
[[1, 2, 3, 5, 6], [8, 9, 10], [11, 12, 13]]
from itertools import chain

def check(lsts, result):
    lsts = [set(s) for s in lsts]
    all_items = set(chain(*lsts))
    all_result_items = set(chain(*result))
    num_result_items = sum(len(s) for s in result)
    if num_result_items != len(all_result_items):
        print("Error: result sets overlap!")
        print(num_result_items, len(all_result_items))
        print(sorted(map(len, result)), sorted(map(len, lsts)))
    if all_items != all_result_items:
        print("Error: result doesn't match input lists!")
    if not all(any(set(s).issubset(t) for t in result) for s in lst):
        print("Error: not all input lists are contained in a result set!")

    seen = set()
    todo = list(filter(bool, lsts))
    done = False
    while not done:
        deletes = []
        for i, s in enumerate(todo): # intersection with seen, or with unseen result set, is OK
            if not s.isdisjoint(seen) or any(t.isdisjoint(seen) for t in result if not s.isdisjoint(t)):
                seen.update(s)
                deletes.append(i)
        for i in reversed(deletes):
            del todo[i]
        done = not deletes
    if todo:
        print("Error: A result set should be split into two or more parts!")
        print(todo)
def merge(mylists):
    results, sets = [], [set(lst) for lst in mylists if lst]
    upd, isd, pop = set.update, set.isdisjoint, sets.pop
    while sets:
        if not [upd(sets[0],pop(i)) for i in xrange(len(sets)-1,0,-1) if not isd(sets[0],sets[i])]:
            results.append(pop(0))
    return results
def merge(lsts):
  sets = map(set,lsts)
  results = []
  while sets:
    first, rest = sets[0], sets[1:]
    merged = False
    sets = []
    for s in rest:
      if s and s.isdisjoint(first):
        sets.append(s)
      else:
        first |= s
        merged = True
    if merged: sets.append(first)
    else: results.append(first)
  return results
lists = [[1,2,3],[3,5,6],[8,9,10],[11,12,13]]

import networkx as nx
g = nx.Graph()

for sub_list in lists:
    for i in range(1,len(sub_list)):
        g.add_edge(sub_list[0],sub_list[i])

print nx.connected_components(g)
#[[1, 2, 3, 5, 6], [8, 9, 10], [11, 12, 13]]
5000 lists, 5 classes, average size 74, max size 1000
15.2264976415
print timeit.timeit("merge1(lsts)", setup=setup, number=10)
5000 lists, 5 classes, average size 74, max size 1000
1.26998780571
from collections import deque

def merge(lists):
    sets = deque(set(lst) for lst in lists if lst)
    results = []
    disjoint = 0
    current = sets.pop()
    while True:
        merged = False
        newsets = deque()
        for _ in xrange(disjoint, len(sets)):
            this = sets.pop()
            if not current.isdisjoint(this):
                current.update(this)
                merged = True
                disjoint = 0
            else:
                newsets.append(this)
                disjoint += 1
        if sets:
            newsets.extendleft(sets)
        if not merged:
            results.append(current)
            try:
                current = newsets.pop()
            except IndexError:
                break
            disjoint = 0
        sets = newsets
    return results
1, 2 1, 3 1, 4 2, 3 2, 4 3, 4
def merge0(lists):
    newsets, sets = [set(lst) for lst in lists if lst], []
    while len(sets) != len(newsets):
        sets, newsets = newsets, []
        for aset in sets:
            for eachset in newsets:
                if not aset.isdisjoint(eachset):
                    eachset.update(aset)
                    break
            else:
                newsets.append(aset)
    return newsets
import random

tenk = range(10000)
lsts = [random.sample(tenk, random.randint(0, 500)) for _ in range(2000)]

setup = """
def merge0(lists):
  newsets, sets = [set(lst) for lst in lists if lst], []
  while len(sets) != len(newsets):
    sets, newsets = newsets, []
    for aset in sets:
      for eachset in newsets:
        if not aset.isdisjoint(eachset):
          eachset.update(aset)
          break
      else:
        newsets.append(aset)
  return newsets

def merge1(lsts):
  sets = [set(lst) for lst in lsts if lst]
  merged = 1
  while merged:
    merged = 0
    results = []
    while sets:
      common, rest = sets[0], sets[1:]
      sets = []
      for x in rest:
        if x.isdisjoint(common):
          sets.append(x)
        else:
          merged = 1
          common |= x
      results.append(common)
    sets = results
  return sets

lsts = """ + repr(lsts)

import timeit
print timeit.timeit("merge0(lsts)", setup=setup, number=10)
print timeit.timeit("merge1(lsts)", setup=setup, number=10)
c = Counter(chain(*lists))
    print c[1]
"88"
import heapq
from itertools import chain
def merge6(lists):
    for l in lists:
        l.sort()
    one_list = heapq.merge(*[zip(l,[i]*len(l)) for i,l in enumerate(lists)]) #iterating through one_list takes 25 seconds!!
    previous = one_list.next()

    d = {i:i for i in range(len(lists))}
    for current in one_list:
        if current[0]==previous[0]:
            d[current[1]] = d[previous[1]]
        previous=current

    groups=[[] for i in range(len(lists))]
    for k in d:
        groups[d[k]].append(lists[k]) #add a each list to its group

    return [set(chain(*g)) for g in groups if g] #since each subroup in each g is sorted, it would be faster to merge these subgroups removing duplicates along the way.


lists = [[1,2,3],[3,5,6],[8,9,10],[11,12,13]]
print merge6(lists)
"[set([1, 2, 3, 5, 6]), set([8, 9, 10]), set([11, 12, 13])]""



import timeit
print timeit.timeit("merge1(lsts)", setup=setup, number=10)
print timeit.timeit("merge4(lsts)", setup=setup, number=10)
print timeit.timeit("merge6(lsts)", setup=setup, number=10)
5000 lists, 5 classes, average size 74, max size 1000
1.26732238315
5000 lists, 5 classes, average size 74, max size 1000
1.16062907437
5000 lists, 5 classes, average size 74, max size 1000
30.7257182826
def mergelists5(data):
    """Check each number in our arrays only once, merging when we find
    a number we have seen before.
    """

    bins = range(len(data))  # Initialize each bin[n] == n
    nums = dict()

    data = [set(m) for m in data ]  # Convert to sets    
    for r, row in enumerate(data):
        for num in row:
            if num not in nums:
                # New number: tag it with a pointer to this row's bin
                nums[num] = r
                continue
            else:
                dest = locatebin(bins, nums[num])
                if dest == r:
                    continue # already in the same bin

                if dest > r:
                    dest, r = r, dest   # always merge into the smallest bin

                data[dest].update(data[r]) 
                data[r] = None
                # Update our indices to reflect the move
                bins[r] = dest
                r = dest 

    # Filter out the empty bins
    have = [ m for m in data if m ]
    print len(have), "groups in result"
    return have


def locatebin(bins, n):
    """
    Find the bin where list n has ended up: Follow bin references until
    we find a bin that has not moved.
    """
    while bins[n] != n:
        n = bins[n]
    return n
class MergeTestCase(unittest.TestCase):

    def setUp(self):
        with open('./lists/test_list.txt') as f:
            self.lsts = json.loads(f.read())
        self.merged = self.merge_func(deepcopy(self.lsts))

    def test_disjoint(self):
        """Check disjoint-ness of merged results"""
        from itertools import combinations
        for a,b in combinations(self.merged, 2):
            self.assertTrue(a.isdisjoint(b))

    def test_coverage(self):    # Credit to katrielalex
        """Check coverage original data"""
        merged_flat = set()
        for s in self.merged:
            merged_flat |= s

        original_flat = set()
        for lst in self.lsts:
            original_flat |= set(lst)

        self.assertTrue(merged_flat == original_flat)

    def test_subset(self):      # Credit to WolframH
        """Check that every original data is a subset"""
        for lst in self.lsts:
            self.assertTrue(any(set(lst) <= e for e in self.merged))
katrielalex
steabert
filename = './lists/timing_1.txt'
class_count = 50,
class_size = 1000,
list_count_per_class = 100,
large_list_sizes = (100, 1000),
small_list_sizes = (0, 100),
large_list_probability = 0.5,

filename = './lists/timing_2.txt'
class_count = 15,
class_size = 1000,
list_count_per_class = 300,
large_list_sizes = (100, 1000),
small_list_sizes = (0, 100),
large_list_probability = 0.5,

filename = './lists/timing_3.txt'
class_count = 15,
class_size = 1000,
list_count_per_class = 300,
large_list_sizes = (100, 1000),
small_list_sizes = (0, 100),
large_list_probability = 0.1,
Timing with: >> Niklas << Benchmark
Info: 5000 lists, average size 305, max size 999

Timing Results:
10.434  -- alexis
11.476  -- agf
11.555  -- Niklas B.
13.622  -- Rik. Poggi
14.016  -- agf (optimized)
14.057  -- ChessMaster
20.208  -- katrielalex
21.697  -- steabert
25.101  -- robert king
76.870  -- Sven Marnach
133.399  -- hochl
Timing with: >> Niklas << Benchmark
Info: 4500 lists, average size 305, max size 999

Timing Results:
8.247  -- Niklas B.
8.286  -- agf
8.637  -- Rik. Poggi
8.967  -- alexis
9.090  -- ChessMaster
9.091  -- agf (optimized)
18.186  -- katrielalex
19.543  -- steabert
22.852  -- robert king
70.486  -- Sven Marnach
104.405  -- hochl
Timing with: >> Niklas << Benchmark
Info: 4500 lists, average size 98, max size 999

Timing Results:
2.746  -- agf
2.850  -- Niklas B.
2.887  -- Rik. Poggi
2.972  -- alexis
3.077  -- ChessMaster
3.174  -- agf (optimized)
5.811  -- katrielalex
7.208  -- robert king
9.193  -- steabert
23.536  -- Sven Marnach
37.436  -- hochl
Timing with: >> Sven << Benchmark
Info: 200 lists, average size 10, max size 10

Timing Results:
2.053  -- alexis
2.199  -- ChessMaster
2.410  -- agf (optimized)
3.394  -- agf
3.398  -- Rik. Poggi
3.640  -- robert king
3.719  -- steabert
3.776  -- Niklas B.
3.888  -- hochl
4.610  -- Sven Marnach
5.018  -- katrielalex
Timing with: >> Agf << Benchmark
Info: 2000 lists, average size 246, max size 500

Timing Results:
3.446  -- Rik. Poggi
3.500  -- ChessMaster
3.520  -- agf (optimized)
3.527  -- Niklas B.
3.527  -- agf
3.902  -- hochl
5.080  -- alexis
15.997  -- steabert
16.422  -- katrielalex
18.317  -- robert king
1257.152  -- Sven Marnach
def merge(data):
    parents = {}
    def find(i):
        j = parents.get(i, i)
        if j == i:
            return i
        k = find(j)
        if k != j:
            parents[i] = k
        return k
    for l in filter(None, data):
        parents.update(dict.fromkeys(map(find, l), find(l[0])))
    merged = {}
    for k, v in parents.items():
        merged.setdefault(find(v), []).append(k)
    return merged.values()
def merge_list(starting_list):
    final_list = []
    for i,v in enumerate(starting_list[:-1]):
        if set(v)&set(starting_list[i+1]):
            starting_list[i+1].extend(list(set(v) - set(starting_list[i+1])))
        else:
            final_list.append(v)
    final_list.append(starting_list[-1])
    return final_list
def merge(lists): 
    while(1):
        flag=0
        for i in range(0,len(lists)):
            for j in range(i+1,len(lists)):
                if len(intersection(lists[i],lists[j]))!=0:
                    lists[i]=union(lists[i],lists[j])
                    lists.remove(lists[j])
                    flag+=1
                    break
        if flag==0:
            break
    return lists
from itertools import combinations


def merge(elements_list):
    d = {index: set(elements) for index, elements in enumerate(elements_list)}

    while any(not set.isdisjoint(d[i], d[j]) for i, j in combinations(d.keys(), 2)):
        merged = set()
        for i, j in combinations(d.keys(), 2):
            if not set.isdisjoint(d[i], d[j]):
                d[i] = set.union(d[i], d[j])
                merged.add(j)

        for k in merged:
            d.pop(k)    

    return [v for v in d.values() if v]

lst = [[65, 17, 5, 30, 79, 56, 48, 62],
       [6, 97, 32, 93, 55, 14, 70, 32],
       [75, 37, 83, 34, 9, 19, 14, 64],
       [43, 71],
       [],
       [89, 49, 1, 30, 28, 3, 63],
       [35, 21, 68, 94, 57, 94, 9, 3],
       [16],
       [29, 9, 97, 43],
       [17, 63, 24]]

print(merge(lst))