Python BeautifulSoup和请求解析问题

Python BeautifulSoup和请求解析问题,python,parsing,python-requests,bs4,Python,Parsing,Python Requests,Bs4,尝试使用BeautifulSoup和请求模块时出错 我的代码如下: import requests from bs4 import BeautifulSoup def get_html(url): url = ('https://m.vk.com/bageto?act=members&offset=0') r = requests.get(url) return r.text def get_total_pages(get_html): soup =

尝试使用BeautifulSoup和请求模块时出错

我的代码如下:

import requests
from bs4 import BeautifulSoup

def get_html(url):
    url = ('https://m.vk.com/bageto?act=members&offset=0')
    r = requests.get(url)
    return r.text

def get_total_pages(get_html):
    soup = BeautifulSoup(get_html, 'lxml')
    pages = soup.find('div', class_='pagination').find_all('a', class_='pg_link')[-1].get('href')
    total_pages = pages.split('=')[2]
    return int(total_pages)

def main():
    base_url = 'https://m.vk.com/bageto?act=members&offset='`enter code here`
    total_pages = get_total_pages(get_html)
    for i in range(50, total_pages, 50):
        print (i)
这将产生以下错误:

C:\Users\PANDEMIC\Desktop\Python-Test>vkp.py Traceback (most recent call last):
File "C:\Users\PANDEMIC\Desktop\Python-Test\vkp.py",
line 23, in <module>
    main()
File "C:\Users\PANDEMIC\Desktop\Python-Test\vkp.py", line 20, in main
    total_pages = get_total_pages(get_html)
File "C:\Users\PANDEMIC\Desktop\Python-Test\vkp.py", line 13, in get_total_pages
    soup = BeautifulSoup(get_html, 'lxml')
File "C:\Users\PANDEMIC\AppData\Local\Programs\Python\Python36-32\lib\site-packages\bs4\__init__.py", line 192, in __init__
    elif len(markup) <= 256 and (
TypeError: object of type 'function' has no len()
C:\Users\popularity\Desktop\Python Test>vkp.py回溯(最后一次调用):
文件“C:\Users\popularity\Desktop\Python Test\vkp.py”,
第23行,在
main()
文件“C:\Users\popularity\Desktop\Python Test\vkp.py”,主目录第20行
总页数=获取总页数(获取html)
文件“C:\Users\popularity\Desktop\Python Test\vkp.py”,第13行,在get\u total\u页面中
soup=BeautifulSoup(获取html“lxml”)
文件“C:\Users\popularity\AppData\Local\Programs\Python\Python36-32\lib\site packages\bs4\\uuuuuu init\uuuuu.py”,第192行,在\uuu init中__
elif len(markup)在中执行
get_html()
时忘记了
()
和参数

total_pages = get_total_pages( get_html(base_url) )

顺便说一句:您不需要
get\u html
中的
url
,因为它将在下次调用中写入您的参数

def get_html(url):
    #url = ('https://m.vk.com/bageto?act=members&offset=0')
    r = requests.get(url)
    return r.text
也可以使用默认值

def get_html(url='https://m.vk.com/bageto?act=members&offset=0')
    r = requests.get(url)
    return r.text
完整版本,以
base\u url+“0”作为
get\u html(base\u url+“0”)中的参数

您应该将html字符串传递给BeautifulSoup,而不是函数。

get_html()
中执行
get_html()
get_total_pages(get_html())
def main():try:base_url=''total_pages=int(get_total_pages(get_html('))时,对于范围内的i(50,total_pages,50):url_gen=base_url+str(i)print)(url_gen)键盘中断除外:print(‘您自己停止编写脚本’)main()
def main():
    try:
        urll = []
        base_url = 'https://m.vk.com/bageto?act=members&offset='
        total_pages = int(get_total_pages(get_html(url)))
        for i in range(0, total_pages, 50):
            url_gen = str(base_url + str(i))
            urll.append(url_gen)
            #get_page_data(url_gen)
        pool = ThreadPool(8)
        results = pool.map(get_page_data, urll)

    except KeyboardInterrupt:
        print('you are stopped script yourself')

if __name__ == '__main__':

    main()
import requests
from bs4 import BeautifulSoup


def get_html(url):
    url = ('https://m.vk.com/bageto?act=members&offset=0')
    r = requests.get(url)
    return r.text

def get_total_pages(html):

    soup            = BeautifulSoup(html, 'lxml')
    pages           = soup.find('div', class_='pagination').find_all('a', class_='pg_link')[-1].get('href')
    total_pages     = pages.split('=')[2]
    return int(total_pages)

def main():
    base_url = 'https://m.vk.com/bageto?act=members&offset=0'
    html = get_html(base_url)
    total_pages = get_total_pages(html)
    print(total_pages)
def main():
    try:
        urll = []
        base_url = 'https://m.vk.com/bageto?act=members&offset='
        total_pages = int(get_total_pages(get_html(url)))
        for i in range(0, total_pages, 50):
            url_gen = str(base_url + str(i))
            urll.append(url_gen)
            #get_page_data(url_gen)
        pool = ThreadPool(8)
        results = pool.map(get_page_data, urll)

    except KeyboardInterrupt:
        print('you are stopped script yourself')

if __name__ == '__main__':

    main()