如何在使用python抓取wikitable时处理rowspan?

如何在使用python抓取wikitable时处理rowspan?,python,web,screen-scraping,Python,Web,Screen Scraping,我正试图从这个维基百科页面的表格中提取存储的数据。但是,我无法从rowspan Hers的文章中获取存储的完整数据,这是我迄今为止所写的: from bs4 import BeautifulSoup from urllib.request import urlopen wiki = urlopen("https://en.wikipedia.org/wiki/Minister_of_Agriculture_(India)") soup = BeautifulSoup(wiki, "html.

我正试图从这个维基百科页面的表格中提取存储的数据。但是,我无法从rowspan Hers的文章中获取存储的完整数据,这是我迄今为止所写的:

from bs4 import BeautifulSoup
from urllib.request import urlopen

wiki = urlopen("https://en.wikipedia.org/wiki/Minister_of_Agriculture_(India)")

soup = BeautifulSoup(wiki, "html.parser")

table = soup.find("table", { "class" : "wikitable" })
for row in table.findAll("tr"):
    cells = row.findAll("td")

    if cells:
        name = cells[0].find(text=True)
        pic = cells[1].find("img")
        strt = cells[2].find(text=True)
        end = cells[3].find(text=True)
        pri = cells[6].find(text=True)

        z=name+'\n'+pic+'\n'+strt+'\n'+end+'\n'+pri
        print z

这是这个问题的唯一解决办法。在这里,我将把rowspan、colspan表更改为simple表。 我在这个问题上浪费了很多天,但没有找到简单好的解决办法。在许多stackoverflow解决方案中,开发人员只抓取文本。但就我而言,我也需要url链接。所以,我写了这段代码。 这对我有用

# this code written in beautifulsoup python3.5
# fetch one wikitable in html format with links from wikipedia
from bs4 import BeautifulSoup
import requests
import codecs
import os

url = "https://en.wikipedia.org/wiki/Ministry_of_Agriculture_%26_Farmers_Welfare"

fullTable = '<table class="wikitable">'

rPage = requests.get(url)
soup = BeautifulSoup(rPage.content, "lxml")

table = soup.find("table", {"class": "wikitable"})

rows = table.findAll("tr")
row_lengths = [len(r.findAll(['th', 'td'])) for r in rows]
ncols = max(row_lengths)
nrows = len(rows)

# rows and cols convert list of list
for i in range(len(rows)):
    rows[i]=rows[i].findAll(['th', 'td'])


# Header - colspan check in Header
for i in range(len(rows[0])):
    col = rows[0][i]
    if (col.get('colspan')):
        cSpanLen = int(col.get('colspan'))
        del col['colspan']
        for k in range(1, cSpanLen):
            rows[0].insert(i,col)


# rowspan check in full table
for i in range(len(rows)):
    row = rows[i]
    for j in range(len(row)):
        col = row[j]
        del col['style']
        if (col.get('rowspan')):
            rSpanLen = int(col.get('rowspan'))
            del col['rowspan']
            for k in range(1, rSpanLen):
                rows[i+k].insert(j,col)


# create table again
for i in range(len(rows)):
    row = rows[i]
    fullTable += '<tr>'
    for j in range(len(row)):
        col = row[j]
        rowStr=str(col)
        fullTable += rowStr
    fullTable += '</tr>'

fullTable += '</table>'

# table links changed
fullTable = fullTable.replace('/wiki/', 'https://en.wikipedia.org/wiki/')
fullTable = fullTable.replace('\n', '')
fullTable = fullTable.replace('<br/>', '')

# save file as a name of url
page=os.path.split(url)[1]
fname='outuput_{}.html'.format(page)
singleTable = codecs.open(fname, 'w', 'utf-8')
singleTable.write(fullTable)



# here we can start scraping in this table there rowspan and colspan table changed to simple table
soupTable = BeautifulSoup(fullTable, "lxml")
urlLinks = soupTable.findAll('a');
print(urlLinks)

# and so on .............
#这段代码是用beautifulsoup python3.5编写的
#从wikipedia获取一个html格式的wikitable,其中包含链接
从bs4导入BeautifulSoup
导入请求
导入编解码器
导入操作系统
url=”https://en.wikipedia.org/wiki/Ministry_of_Agriculture_%26_Farmers_Welfare"
fullTable=''
rPage=requests.get(url)
汤=美汤(rPage.content,“lxml”)
table=soup.find(“table”,{“class”:“wikitable”})
行=table.findAll(“tr”)
行长度=[len(r.findAll(['th','td']),表示行中的r]
ncols=最大值(行长度)
nrows=len(行)
#行和列转换列表的列表
对于范围内的i(len(行)):
行[i]=行[i].findAll(['th','td'])
#收割台-colspan检入收割台
对于范围内的i(len(行[0]):
col=行[0][i]
如果(col.get('colspan')):
cSpanLen=int(col.get('colspan'))
del col['colspan']
对于范围内的k(1,cSpanLen):
行[0]。插入(i,列)
#行范围签入完整表
对于范围内的i(len(行)):
行=行[i]
对于范围内的j(长(行)):
col=行[j]
del col['style']
如果(列get('rowspan')):
rSpanLen=int(col.get('rowspan'))
del col['rowspan']
对于范围内的k(1,rSpanLen):
行[i+k]。插入(j,col)
#再次创建表
对于范围内的i(len(行)):
行=行[i]
完整表+=“”
对于范围内的j(长(行)):
col=行[j]
行str=str(列)
fullTable+=rowStr
完整表+=“”
完整表+=“”
#表链接已更改
fullTable=fullTable.replace('/wiki/','https://en.wikipedia.org/wiki/')
fullTable=fullTable.replace('\n','')
fullTable=fullTable.replace('
','') #将文件另存为url的名称 page=os.path.split(url)[1] fname='outuput.{}.html'。格式(第页) singleTable=codecs.open(fname,'w','utf-8') singleTable.write(fullTable) #在这里,我们可以开始在这个表中刮擦,其中rowspan和colspan表更改为simple表 soupTable=BeautifulSoup(可满表,“lxml”) urlinks=soupTable.findAll('a'); 打印(URL链接) #等等。。。。。。。。。。。。。