获取所有分页URL的列表';来自python请求中txt文件中链接的

获取所有分页URL的列表';来自python请求中txt文件中链接的,python,list,function,python-requests,Python,List,Function,Python Requests,Hi Guys定义了一个函数,用于从python中txt文件的链接获取底部所有分页URL的列表 下面是我需要做的一个例子 输入链接 http://www.apartmentguide.com/apartments/Alabama/Hartselle/ 期望输出 www.apartmentguide.com/apartments/Alabama/Hartselle/?page=2 www.apartmentguide.com/apartments/Alabama/Hartselle/?page=

Hi Guys定义了一个函数,用于从python中txt文件的链接获取底部所有分页URL的列表

下面是我需要做的一个例子

输入链接

http://www.apartmentguide.com/apartments/Alabama/Hartselle/
期望输出

www.apartmentguide.com/apartments/Alabama/Hartselle/?page=2
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=3
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=4
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=5
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=6
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=7
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=8
www.apartmentguide.com/apartments/Alabama/Hartselle/?page=9
因此,每个输入Url都有任何限制

这是我到目前为止编写的函数,但它不起作用。我也不擅长Python

import requests
#from bs4 import BeautifulSoup
from scrapy import Selector as Se
import urllib2


lists = open("C:\Users\Administrator\Desktop\\3.txt","r")
read_list = lists.read()
line = read_list.split("\n")


def get_links(line):
    for each in line:
        r = requests.get(each)
        sel = Se(text=r.text, type="html")
        next_ = sel.xpath('//a[@class="next sprite"]//@href').extract()
        for next_1 in next_:
            next_2 = "http://www.apartmentguide.com"+next_1
            print next_2
        get_links(next_1)

get_links(line)

下面是两种方法

import mechanize

import requests
from bs4 import BeautifulSoup, SoupStrainer
import urlparse

import pprint

#-- Mechanize --
br = mechanize.Browser()

def get_links_mechanize(root):
    links = []
    br.open(root)

    for link in br.links():
        try:
            if dict(link.attrs)['class'] == 'page':
                links.append(link.absolute_url)
        except:
            pass
    return links


#-- Requests / BeautifulSoup / urlparse --
def get_links_bs(root):
    links = []
    r = requests.get(root)

    for link in BeautifulSoup(r.text, parse_only=SoupStrainer('a')):
        if link.has_attr('href') and link.has_attr('class') and 'page' in link.get('class'):
            links.append(urlparse.urljoin(root, link.get('href')))

    return links


#with open("C:\Users\Administrator\Desktop\\3.txt","r") as f:
#    for root in f:
#        links = get_links(root) 
#        # <Do something with links>
root = 'http://www.apartmentguide.com/apartments/Alabama/Hartselle/'

print "Mech:"
pprint.pprint( get_links_mechanize(root) )
print "Requests/BS4/urlparse:"
pprint.pprint( get_links_bs(root) )
有重复的。您可以通过更改来消除重复项

return links

不管你选择什么方法

编辑:

我注意到上面的函数只返回到第2-5页的链接,您必须浏览这些页面才能看到实际上有10页

另一种完全不同的方法是从“根”页面中搜索结果的数量,然后预测结果的页面数量,然后从中构建链接

由于每页有20个结果,因此计算出多少页是很简单的,请考虑:

import requests, re, math, pprint

def scrape_results(root):
    links = []
    r = requests.get(root)

    mat = re.search(r'We have (\d+) apartments for rent', r.text)
    num_results = int(mat.group(1))                     # 182 at the moment
    num_pages = int(math.ceil(num_results/20.0))        # ceil(182/20) => 10

    # Construct links for pages 1-10
    for i in range(num_pages):
        links.append("%s?page=%d" % (root, (i+1)))

    return links

pprint.pprint(scrape_results(root))
这将是3种方法中最快的,但可能更容易出错

编辑2

可能是这样的:

import re, math, pprint
import requests, urlparse
from bs4 import BeautifulSoup, SoupStrainer

def get_pages(root):
    links = []
    r = requests.get(root)

    mat = re.search(r'We have (\d+) apartments for rent', r.text)
    num_results = int(mat.group(1))                     # 182 at the moment
    num_pages = int(math.ceil(num_results/20.0))        # ceil(182/20) => 10

    # Construct links for pages 1-10
    for i in range(num_pages):
        links.append("%s?page=%d" % (root, (i+1)))

    return links

def get_listings(page):
    links = []
    r = requests.get(page)

    for link in BeautifulSoup(r.text, parse_only=SoupStrainer('a')):
        if link.has_attr('href') and link.has_attr('data-listingid') and 'name' in link.get('class'):
            links.append(urlparse.urljoin(root, link.get('href')))

    return links

root='http://www.apartmentguide.com/apartments/Alabama/Hartselle/'
listings = []
for page in get_pages(root):
    listings += get_listings(page)

pprint.pprint(listings)
print(len(listings))

对于Re,我不确定,所以尝试了xpath

links = open("C:\Users\ssamant\Desktop\Anida\Phase_II\Apartmentfinder\\2.txt","r")
read_list = links.read()
line = read_list.split("\n")

for each in line:
    lines = []
    r = requests.get(each)
    sel = Selector(text=r.text,type="html")
    mat = sel.xpath('//h1//strong/text()').extract()
    mat = str(mat)
    mat1 = mat.replace(" apartments for rent']","")
    mat2 = mat1.replace("[u'","")
    mat3 = int(mat2)
    num_pages = int(math.ceil(mat3/20.0))
    for i in range(num_pages):
        lines.append("%s/Page%d" % (each, (i+1)))
    with open('C:\Users\ssamant\Desktop\Anida\Phase_II\Apartmentfinder\\test.csv', 'ab') as f:
        writer = csv.writer(f)
        for val in lines:
            writer.writerow([val])

该列表可在此链接找到-appsnic.com/wp-content/uploads/2015/03/3.txt谢谢Edwards,第三个使用math works的列表。实际上,我在寻找使用分页在每个页面上找到的20个链接中的每一个。是否有可能使用mechanize或bs4来跟踪每个链接中的每个分页并在每页提取20个列表?我进行了另一次编辑(“编辑2”)——可能会有所帮助。谢谢Edward,我不知道,但出于某种原因,代码对象不起作用。”Doctype“对象没有属性”有\u attr“我有bs4
import re, math, pprint
import requests, urlparse
from bs4 import BeautifulSoup, SoupStrainer

def get_pages(root):
    links = []
    r = requests.get(root)

    mat = re.search(r'We have (\d+) apartments for rent', r.text)
    num_results = int(mat.group(1))                     # 182 at the moment
    num_pages = int(math.ceil(num_results/20.0))        # ceil(182/20) => 10

    # Construct links for pages 1-10
    for i in range(num_pages):
        links.append("%s?page=%d" % (root, (i+1)))

    return links

def get_listings(page):
    links = []
    r = requests.get(page)

    for link in BeautifulSoup(r.text, parse_only=SoupStrainer('a')):
        if link.has_attr('href') and link.has_attr('data-listingid') and 'name' in link.get('class'):
            links.append(urlparse.urljoin(root, link.get('href')))

    return links

root='http://www.apartmentguide.com/apartments/Alabama/Hartselle/'
listings = []
for page in get_pages(root):
    listings += get_listings(page)

pprint.pprint(listings)
print(len(listings))
links = open("C:\Users\ssamant\Desktop\Anida\Phase_II\Apartmentfinder\\2.txt","r")
read_list = links.read()
line = read_list.split("\n")

for each in line:
    lines = []
    r = requests.get(each)
    sel = Selector(text=r.text,type="html")
    mat = sel.xpath('//h1//strong/text()').extract()
    mat = str(mat)
    mat1 = mat.replace(" apartments for rent']","")
    mat2 = mat1.replace("[u'","")
    mat3 = int(mat2)
    num_pages = int(math.ceil(mat3/20.0))
    for i in range(num_pages):
        lines.append("%s/Page%d" % (each, (i+1)))
    with open('C:\Users\ssamant\Desktop\Anida\Phase_II\Apartmentfinder\\test.csv', 'ab') as f:
        writer = csv.writer(f)
        for val in lines:
            writer.writerow([val])