Python Beautifulsoup findall未经处理就卡住了

Python Beautifulsoup findall未经处理就卡住了,python,web-scraping,beautifulsoup,Python,Web Scraping,Beautifulsoup,我试图了解BeautifulSoup,并试图找到facebook.com内的所有链接,并迭代其中的每个链接 这是我的代码…它工作得很好,但一旦它找到Linkedin.com并对其进行迭代,它就会在这个URL之后的某个点卡住- 当我单独运行Linkedin.com时,我没有任何问题 这可能是我操作系统中的一个限制吗..我正在使用Ubuntu Linux import urllib2 import BeautifulSoup import re def main_process(response):

我试图了解BeautifulSoup,并试图找到facebook.com内的所有链接,并迭代其中的每个链接

这是我的代码…它工作得很好,但一旦它找到Linkedin.com并对其进行迭代,它就会在这个URL之后的某个点卡住-

当我单独运行Linkedin.com时,我没有任何问题

这可能是我操作系统中的一个限制吗..我正在使用Ubuntu Linux

import urllib2
import BeautifulSoup
import re
def main_process(response):
    print "Main process started"
    soup = BeautifulSoup.BeautifulSoup(response)
    limit = '5'
    count = 0
    main_link = valid_link =  re.search("^(https?://(?:\w+.)+\.com)(?:/.*)?$","http://www.facebook.com")
    if main_link:
        main_link = main_link.group(1)
    print 'main_link = ', main_link
    result = {}
    result[main_link] = {'incoming':[],'outgoing':[]}
    print 'result = ', result
    for link in soup.findAll('a',href=True):
        if count < 10:
            valid_link =  re.search("^(https?://(?:\w+.)+\.com)(?:/.*)?$",link.get('href'))
            if valid_link:
                #print 'Main link = ', link.get('href')
                print 'Links object = ', valid_link.group(1)
                connecting_link = valid_link.group(1)
                connecting_link = connecting_link.encode('ascii')
                if main_link <> connecting_link:
                    print 'outgoing link = ', connecting_link
                    result = add_new_link(connecting_link, result)
                    #Check if the outgoing is already added, if its then don't add it
                    populate_result(result,main_link,connecting_link)
                    print 'result = ', result
                    print 'connecting'
                    request = urllib2.Request(connecting_link)
                    response = urllib2.urlopen(request)
                    soup = BeautifulSoup.BeautifulSoup(response)
                    for sublink in soup.findAll('a',href=True):
                        print 'sublink = ', sublink.get('href')
                        valid_link =  re.search("^(https?://(?:\w+.)+\.com)(?:/.*)?$",sublink.get('href'))
                        if valid_link:
                            print 'valid_link = ', valid_link.group(1)
                            valid_link = valid_link.group(1)
                            if valid_link <> connecting_link:
                                populate_result(result,connecting_link,valid_link)
        count += 1      
    print 'final result = ', result
    #    print 'found a url with national-park in the link'

def add_new_link(connecting_link, result):
    result[connecting_link] = {'incoming':[],'outgoing':[]}
    return result

def populate_result(result,link,dest_link):

    if len(result[link]['outgoing']) == 0:
        result[link]['outgoing'].append(dest_link)
    else:
        found_in_list = 'Y'
        try:
            result[link]['outgoing'].index(dest_link)
            found_in_list = 'Y'
        except ValueError:
            found_in_list = 'N'

        if found_in_list == 'N':
            result[link]['outgoing'].append(dest_link)

    return result


if __name__ == "__main__":

    request = urllib2.Request("http://facebook.com")
    print 'process start'
    try:
        response = urllib2.urlopen(request)
        main_process(response)
    except urllib2.URLError, e:
        print "URLERROR"

    print "program ended"
问题在于挂起此行上某些URL的重新搜索:

valid_link = re.search("^(https?://(?:\w+.)+\.com)(?:/.*)?$", sublink.get('href'))
例如,它挂在https://www.facebook.com/campaign/landing.php?placement=pflo&campaign_id=402047449186&extra_1=auto 网址:

看起来,它引入了一个导致regex搜索挂起的案例

一种解决方案是使用不同的正则表达式来验证URL,请参见此处的大量选项:


希望有帮助。

你能缩小范围吗?仅供参考,你的正则表达式案例实际上迫使我开始一个新线程,请参阅
>>> import re
>>> s = "https://www.facebook.com/campaign/landing.php?placement=pflo&campaign_id=402047449186&extra_1=auto"
>>> re.search("^(https?://(?:\w+.)+\.com)(?:/.*)?$", s)

hanging "forever"...