从html python中提取表内容

从html python中提取表内容,python,python-3.x,web-scraping,beautifulsoup,urllib2,Python,Python 3.x,Web Scraping,Beautifulsoup,Urllib2,我是Python新手。我想从维基网站上用该国的州列表刮取iso代码。 所需输出: mapState={'Alabama': 'US-AL', 'Alaska': 'US-AK',.....,'Wyoming':'US-WY}' 以下是我尝试的代码: import requests from bs4 import BeautifulSoup def crawl_wiki(): url = 'https://en.wikipedia.org/wiki/ISO_3166-2:US'

我是Python新手。我想从维基网站上用该国的州列表刮取iso代码。

所需输出:

mapState={'Alabama': 'US-AL', 'Alaska': 'US-AK',.....,'Wyoming':'US-WY}'
以下是我尝试的代码:

import requests
from bs4 import BeautifulSoup
def crawl_wiki():
    url = 'https://en.wikipedia.org/wiki/ISO_3166-2:US'
    source_code = requests.get(url)
    plain_text = source_code.text
    print(plain_text)

crawl_wiki()
我从网站上得到了文本。但我不知道如何用代码获得状态的指令。请帮助我找到一些解决方案。

尝试以下方法:

import bs4
import requests

response = requests.get('https://en.wikipedia.org/wiki/ISO_3166-2:US')
html = response.content.decode('utf-8')

soup = bs4.BeautifulSoup(html, "lxml")
code_list = soup.select("#mw-content-text > div > table:nth-child(11) > tbody > tr > td:nth-child(1) > span")
name_list = soup.select("#mw-content-text > div > table:nth-child(11) > tbody > tr > td:nth-child(2) > a")


mapState = {}
## mapState={'Alabama': 'US-AL', 'Alaska': 'US-AK',.....,'Wyoming':'US-WY}'

for i in range(len(code_list)):
    mapState[code_list[i].string] = name_list[i].string


print(mapState)
请尝试阅读html-

然后提取数据进行听写

示例-

import pandas as pd

df = pd.read_html("https://en.wikipedia.org/wiki/ISO_3166-2:US")[0].to_dict()
print(df)
输出:

{'Alabama': 'US-AL', 'Alaska': 'US-AK', 'Arizona': 'US-AZ', 'Arkansas': 'US-AR', 'California': 'US-CA', 'Colorado': 'US-CO', 'Connecticut': 'US-CT', 'Delaware': 'US-DE', 'Florida': 'US-FL', 'Georgia': 'US-GA', 'Hawaii': 'US-HI', 'Idaho': 'US-ID', 'Illinois': 'US-IL', 'Indiana': 'US-IN', 'Iowa': 'US-IA', 'Kansas': 'US-KS', 'Kentucky': 'US-KY', 'Louisiana': 'US-LA', 'Maine': 'US-ME', 'Maryland': 'US-MD', 'Massachusetts': 'US-MA', 'Michigan': 'US-MI', 'Minnesota': 'US-MN', 'Mississippi': 'US-MS', 'Missouri': 'US-MO', 'Montana': 'US-MT', 'Nebraska': 'US-NE', 'Nevada': 'US-NV', 'New Hampshire': 'US-NH', 'New Jersey': 'US-NJ', 'New Mexico': 'US-NM', 'New York': 'US-NY', 'North Carolina': 'US-NC', 'North Dakota': 'US-ND', 'Ohio': 'US-OH', 'Oklahoma': 'US-OK', 'Oregon': 'US-OR', 'Pennsylvania': 'US-PA', 'Rhode Island': 'US-RI', 'South Carolina': 'US-SC', 'South Dakota': 'US-SD', 'Tennessee': 'US-TN', 'Texas': 'US-TX', 'Utah': 'US-UT', 'Vermont': 'US-VT', 'Virginia': 'US-VA', 'Washington': 'US-WA', 'West Virginia': 'US-WV', 'Wisconsin': 'US-WI', 'Wyoming': 'US-WY', 'District of Columbia': 'US-DC', 'American Samoa': 'US-AS', 'Guam': 'US-GU', 'Northern Mariana Islands': 'US-MP', 'Puerto Rico': 'US-PR', 'United States Minor Outlying Islands': 'US-UM', 'Virgin Islands, U.S.': 'US-VI'}

这是一个简化的DOC方案,类似于BeautifulSoup

import requests
from simplified_scrapy.simplified_doc import SimplifiedDoc 
url = 'https://en.wikipedia.org/wiki/ISO_3166-2:US'
response = requests.get(url)
doc = SimplifiedDoc(response.text,start='Subdivision category',end='</table>')
datas = [tr.tds for tr in doc.trs]
mapState = {}
for tds in datas:
  mapState[tds[1].a.text]=tds[0].text
import requests
from simplified_scrapy.simplified_doc import SimplifiedDoc 
url = 'https://en.wikipedia.org/wiki/ISO_3166-2:US'
response = requests.get(url)
doc = SimplifiedDoc(response.text,start='Subdivision category',end='</table>')
datas = [tr.tds for tr in doc.trs]
mapState = {}
for tds in datas:
  mapState[tds[1].a.text]=tds[0].text