分页链接在我的BeautfiulsoupPython代码中是重复的

分页链接在我的BeautfiulsoupPython代码中是重复的,python,web-scraping,beautifulsoup,Python,Web Scraping,Beautifulsoup,我正在尝试使用BeautifulSoup从TripAdvisor网站上获取数据。 链接: 链接不会转到下一页,而是重复自己。我的问题有解决办法吗? 我已经为汤选择了正确的选择器,并且能够刮取数据。要使分页工作正常,必须更改URL中的-oa-部分: from bs4 import BeautifulSoup import requests import csv class Parse(): def __init__(self): self.row_list =

我正在尝试使用BeautifulSoup从TripAdvisor网站上获取数据。 链接: 链接不会转到下一页,而是重复自己。我的问题有解决办法吗?
我已经为汤选择了正确的选择器,并且能够刮取数据。

要使分页工作正常,必须更改URL中的
-oa-
部分:


from bs4 import BeautifulSoup
import requests
import csv

class Parse():
    
    def __init__(self):
        self.row_list = []
        self.base_url ='https://www.tripadvisor.co.uk'

    def parse(self,url): # correct  
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36 Edg/90.0.818.51'}     
        response = requests.get(url,headers).text
        soup = BeautifulSoup(response,'html.parser')        
        next_link = soup.find('a',class_='_23XJjgWS _1hF7hP_9 _2QvUxWyA')
        next_page = self.base_url+next_link.attrs['href']  
        
        cards = soup.find_all('section',class_='_2TabEHya _3YhIe-Un')
        for card in cards:
            name = card.find('div',class_='_1gpq3zsA _1zP41Z7X').text
            rating = str(card.find('svg',class_='zWXXYhVR'))         
            rating = self.remove(filter_col=rating)
            review_count = card.find('span',class_='DrjyGw-P _26S7gyB4 _14_buatE _1dimhEoy').text
            status = card.find('div',class_='DrjyGw-P _26S7gyB4 _3SccQt-T').text            
            row_list = [name,rating,status,review_count]                 
        return next_page,row_list

    def remove(self,filter_col):
        rating = filter_col.split(' ')[1]
        rating = rating[-3:]
        return rating              
    
    def write_csv(self,row_list):
        with open('top_sites.csv','w') as file:
            csv_writer = csv.writer(file, delimiter=',')
            csv_writer.writerows(row_list)    
    
if __name__=='__main__':
    url = "https://www.tripadvisor.co.uk/Attractions-g294190-Activities-oa30-Myanmar.html"
    parsing = Parse()                  
    next_url,row_list = parsing.parse(url=url)
    print(next_url)
 
PS C:\Users\Caspe\PycharmProjects\Selenium Test> & "c:/Users/Caspe/PycharmProjects/Selenium Test/.venv/Scripts/python.exe" "c:/Users/Caspe/PycharmProjects/Selenium Test/Demo/tripadvisor_topattract.py" 
https://www.tripadvisor.co.uk/Attractions-g294190-Activities-Myanmar.html
PS C:\Users\Caspe\PycharmProjects\Selenium Test>
导入csv
导入请求
从bs4导入BeautifulSoup
url=”https://www.tripadvisor.co.uk/Attractions-g294190-Activities-oa{}-Myanmar.html“
数据=[]

对于范围(0,4)内的页面:#非常感谢。感谢你花时间回答我的问题。
import csv
import requests
from bs4 import BeautifulSoup

url = "https://www.tripadvisor.co.uk/Attractions-g294190-Activities-oa{}-Myanmar.html"

data = []
for page in range(0, 4):  # <--- increase page count here
    print("Getting page {}..".format(page))

    soup = BeautifulSoup(
        requests.get(url.format(page * 30)).content, "html.parser"
    )
    titles = soup.select('span[name="title"]')
    for title in titles:
        no, t = title.get_text(strip=True, separator="|").split("|")
        rating = title.find_next("svg")
        review_count = rating.find_next("span")
        data.append(
            (
                no,
                t,
                rating["title"],
                review_count.text,
                review_count.find_next(
                    "div", class_="DrjyGw-P _26S7gyB4 _3SccQt-T"
                ).text,
            )
        )

with open("data.csv", "w") as f_out:
    w = csv.writer(f_out)
    w.writerows(data)