Python BeautifulSoup索引器错误:列表索引超出范围

Python BeautifulSoup索引器错误:列表索引超出范围,python,python-3.x,list,web-scraping,beautifulsoup,Python,Python 3.x,List,Web Scraping,Beautifulsoup,错误: import requests from bs4 import BeautifulSoup import csv import time class LightupScraper: results = [] def fetch(self, url): print(f'HTTP GET request to URL: {url}', end='') res = requests.get(url) print(f' | S

错误:

import requests
from bs4 import BeautifulSoup
import csv
import time

class LightupScraper:

    results = []

    def fetch(self, url):
        print(f'HTTP GET request to URL: {url}', end='')
        res = requests.get(url)
        print(f' | Status Code: {res.status_code}')

        return res

    def save_response(self, res):
        with open('res.html', 'w') as html_file:
              html_file.write(res)

    def load_response(self):
          html = ''

          with open('res.html', 'r') as html_file:
                for line in html_file:
                     html += line

          return html

    def parse(self, html):

        content = BeautifulSoup(html, 'lxml')
        titles = [title.text.strip() for title in content.find_all('h4', {'class': 'card-title ols-card-title'})]
        links = [link.find('a')['href'] for link in content.find_all('h4', {'class': 'card-title ols-card-title'})]
        skus = [sku.text for sku in content.find_all('span', {'class': 'productView-info-value ols-card-text--sku'})]
        mpn = [mpn.text.split(':')[-1].strip() for mpn in content.find_all('span', {'class': 'productView-info-name mpn-label ols-card-text--mpn'})]
        details = [ul.find_all('li') for ul in content.find_all('ul', {'class': 'ols-card-text__list'})]
        brand = [''.join([brand.text for brand in detail if 'Brand:' in brand.text]).split(':')[-1].strip() for detail in details]
        base = [''.join([base.text for base in detail if 'Base Type:' in base.text]).split(':')[-1].strip() for detail in details]
        life_hours = [''.join([life_hour.text for life_hour in detail if 'Life Hours:' in life_hour.text]).split(':')[-1].strip() for detail in details]
        lumens = [''.join([lumen.text for lumen in detail if 'Lumens:' in lumen.text]).split(':')[-1].strip() for detail in details]
        warrantys = [''.join([warranty.text for warranty in detail if 'Warranty:' in warranty.text]).split(':')[-1].strip() for detail in details]
        wattages = [''.join([wattage.text for wattage in detail if 'Wattage:' in wattage.text]).split(':')[-1].strip() for detail in details]
        features = [feature.text.split() for feature in content.find_all('span', {'class': 'ols-card-text__list--features'})]
        prices = [price.text for price in content.find_all('span', {'class': 'price price--withoutTax'})]



        for feature in features:
             feat = feature

        for item in range(0, len(titles)):
             self.results.append({
              'titles': titles[item],
              'skus': skus[item],
              'mpn': mpn[item],
              'brand': brand[item],
              'base': base[item],
              'life_hours': life_hours[item],
              'lumens': lumens[item],
              'warrantys': warrantys[item],
              'wattages': wattages[item],
              'feature': feat[item],
              'links': links[item],
              'price': prices[item]
          })

    def to_csv(self):
        with open('lightup.csv', 'w', newline='') as csv_file:
              writer = csv.DictWriter(csv_file, fieldnames=self.results[0].keys())
              writer.writeheader()

            for row in self.results:
                 writer.writerow(row)

            print('Exported results to lightup.csv')

    def run(self):
     
       page_num = 3

       for page in range(1, page_num + 1):
            base_url = 'https://www.lightup.com/standard-household-lighting.html?p='
            base_url += str(page)
            res = self.fetch(base_url)
            self.parse(res.text)


    self.to_csv()



 if __name__ == '__main__':
     scraper = LightupScraper()
     scraper.run()

我正试图刮取价格,但我得到的列表索引超出范围错误,因为负责价格的标签返回14个元素,其他标签返回16个。这是因为一些价格标签不同,例如,对于每个案例的价格,标签是价格-无税价格-案例和单个产品价格价格——不含税。我尝试了除块以外的其他方法,但运气不佳。它给了我完整的另一个列表,而不是单独的价格。我无法解决这个问题。可能有人可以给我一些建议,让我真正做到这一点。

你可能可以这样做:

File "lightup_scraper.py", line 66, in parse
'price': prices[item]
IndexError: list index out of range
它不漂亮,但很管用

但问题是;用你的方法把所有的东西都列在清单上,你怎么知道哪个产品没有标价呢

也许一个添加到csv的循环或者为每个产品构造某种dict可能是更好的方法

for item in range(0, len(titles)):
        default = {
            'titles': "x",
            'skus': "x",
            'mpn': "x",
            'brand': "x",
            'base': "x",
            'life_hours': "x",
            'lumens': "x",
            'warrantys': "x",
            'wattages': "x",
            'feature': "x",
            'links': "x",
            'price': "x"
        }
        default['titles'] = titles[item]
        default['skus'] = skus[item]
        default['mpn'] = mpn[item]
        default['brand'] = brand[item]
        default['base'] = base[item]
        default['life_hours'] = life_hours[item]
        default['lumens'] = lumens[item]
        default['warrantys'] = warrantys[item]
        default['wattages'] = wattages[item]
        default['feature'] = feature[item]
        default['links'] = links[item]
        try:
            default['price'] = prices[item]
        except:
            pass
        self.results.append(default)