Saya belajar python requests
dan BeautifulSoup. Untuk latihan, saya telah memilih untuk menulis parser tiket parkir NYC cepat. Saya bisa mendapatkan respon html yang cukup jelek. Saya perlu mengambil lineItemsTable
dan mengurai semua tiket.
Anda dapat mereproduksi halaman dengan membuka di sini: https://paydirect.link2gov.com/NYCParking-Plate/ItemSearch
dan memasukkan NY
piringT630134C
soup = BeautifulSoup(plateRequest.text)
#print(soup.prettify())
#print soup.find_all('tr')
table = soup.find("table", { "class" : "lineItemsTable" })
for row in table.findAll("tr"):
cells = row.findAll("td")
print cells
Bisakah seseorang membantu saya? Sederhana mencari semua tr
tidak membawa saya kemana-mana.
python
beautifulsoup
Cmag
sumber
sumber
Jawaban:
Ini dia:
data = [] table = soup.find('table', attrs={'class':'lineItemsTable'}) table_body = table.find('tbody') rows = table_body.find_all('tr') for row in rows: cols = row.find_all('td') cols = [ele.text.strip() for ele in cols] data.append([ele for ele in cols if ele]) # Get rid of empty values
Ini memberi Anda:
[ [u'1359711259', u'SRF', u'08/05/2013', u'5310 4 AVE', u'K', u'19', u'125.00', u'$'], [u'7086775850', u'PAS', u'12/14/2013', u'3908 6th Ave', u'K', u'40', u'125.00', u'$'], [u'7355010165', u'OMT', u'12/14/2013', u'3908 6th Ave', u'K', u'40', u'145.00', u'$'], [u'4002488755', u'OMT', u'02/12/2014', u'NB 1ST AVE @ E 23RD ST', u'5', u'115.00', u'$'], [u'7913806837', u'OMT', u'03/03/2014', u'5015 4th Ave', u'K', u'46', u'115.00', u'$'], [u'5080015366', u'OMT', u'03/10/2014', u'EB 65TH ST @ 16TH AV E', u'7', u'50.00', u'$'], [u'7208770670', u'OMT', u'04/08/2014', u'333 15th St', u'K', u'70', u'65.00', u'$'], [u'$0.00\n\n\nPayment Amount:'] ]
Beberapa hal yang perlu diperhatikan:
sumber
rows = table_body.find_all('tr') AttributeError: 'NoneType' object has no attribute 'find_all'
find_all
denganfindAll
table = soup.find('table', attrs={'class':'analysis'})
tidak menunjukkan tbody di sana, jadi cukup temukan td dan tr melakukan pekerjaan itu. Jadi menurut saya penyebab mendapatkan errorAttributeError: 'NoneType' object has no attribute 'find_all'
adalah ketika kita melewatkan tag atau field yang tidak ada di html halaman tersebut.Soal, beginilah cara Anda mengurai hasil html mereka:
table = soup.find("table", { "class" : "lineItemsTable" }) for row in table.findAll("tr"): cells = row.findAll("td") if len(cells) == 9: summons = cells[1].find(text=True) plateType = cells[2].find(text=True) vDate = cells[3].find(text=True) location = cells[4].find(text=True) borough = cells[5].find(text=True) vCode = cells[6].find(text=True) amount = cells[7].find(text=True) print amount
sumber
Pembaruan: 2020
Jika seorang programmer hanya tertarik untuk mengurai tabel dari halaman web, mereka dapat menggunakan metode pandas
pandas.read_html
.Katakanlah kita ingin mengekstrak tabel data PDB dari situs web: https://worldpopulationreview.com/countries/countries-by-gdp/#worldCountries
Kemudian kode berikut melakukan pekerjaan dengan sempurna (Tidak perlu beautifulsoup dan fancy html):
import pandas as pd import requests url = "https://worldpopulationreview.com/countries/countries-by-gdp/#worldCountries" r = requests.get(url) df_list = pd.read_html(r.text) # this parses all the tables in webpages to a list df = df_list[0] df.head()
Keluaran
sumber
pd.read_html
alih-alih seluruh permintaan menari dan sup indah.Berikut adalah contoh kerja untuk generik
<table>
. ( link pertanyaan rusak )Mengekstrak tabel dari sini negara-negara berdasarkan PDB (Produk Domestik Bruto).
htmltable = soup.find('table', { 'class' : 'table table-striped' }) # where the dictionary specify unique attributes for the 'table' tag
The
tableDataText
Fungsi mengurai segmen html dimulai dengan tag<table>
diikuti oleh beberapa<tr>
(baris tabel) dan batin<td>
(tabel data) tag. Ini mengembalikan daftar baris dengan kolom dalam. Hanya menerima satu<th>
(header tabel / data) di baris pertama.def tableDataText(table): rows = [] trs = table.find_all('tr') headerow = [td.get_text(strip=True) for td in trs[0].find_all('th')] # header row if headerow: # if there is a header row include first rows.append(headerow) trs = trs[1:] for tr in trs: # for every table row rows.append([td.get_text(strip=True) for td in tr.find_all('td')]) # data row return rows
Menggunakannya kita dapatkan (dua baris pertama).
list_table = tableDataText(htmltable) list_table[:2] [['Rank', 'Name', "GDP (IMF '19)", "GDP (UN '16)", 'GDP Per Capita', '2019 Population'], ['1', 'United States', '21.41 trillion', '18.62 trillion', '$65,064', '329,064,917']]
Itu dapat dengan mudah diubah menjadi
pandas.DataFrame
alat yang lebih canggih.import pandas as pd dftable = pd.DataFrame(list_table[1:], columns=list_table[0]) dftable.head(4)
sumber
from behave import * from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as ec import pandas as pd import requests from bs4 import BeautifulSoup from tabulate import tabulate class readTableDataFromDB: def LookupValueFromColumnSingleKey(context, tablexpath, rowName, columnName): print("element present readData From Table") element = context.driver.find_elements_by_xpath(tablexpath+"/descendant::th") indexrow = 1 indexcolumn = 1 for values in element: valuepresent = values.text print("text present here::"+valuepresent+"rowName::"+rowName) if valuepresent.find(columnName) != -1: print("current row"+str(indexrow) +"value"+valuepresent) break else: indexrow = indexrow+1 indexvalue = context.driver.find_elements_by_xpath( tablexpath+"/descendant::tr/td[1]") for valuescolumn in indexvalue: valuepresentcolumn = valuescolumn.text print("Team text present here::" + valuepresentcolumn+"columnName::"+rowName) print(indexcolumn) if valuepresentcolumn.find(rowName) != -1: print("current column"+str(indexcolumn) + "value"+valuepresentcolumn) break else: indexcolumn = indexcolumn+1 print("index column"+str(indexcolumn)) print(tablexpath +"//descendant::tr["+str(indexcolumn)+"]/td["+str(indexrow)+"]") #lookupelement = context.driver.find_element_by_xpath(tablexpath +"//descendant::tr["+str(indexcolumn)+"]/td["+str(indexrow)+"]") #print(lookupelement.text) return context.driver.find_elements_by_xpath(tablexpath+"//descendant::tr["+str(indexcolumn)+"]/td["+str(indexrow)+"]") def LookupValueFromColumnTwoKeyssss(context, tablexpath, rowName, columnName, columnName1): print("element present readData From Table") element = context.driver.find_elements_by_xpath( tablexpath+"/descendant::th") indexrow = 1 indexcolumn = 1 indexcolumn1 = 1 for values in element: valuepresent = values.text print("text present here::"+valuepresent) indexrow = indexrow+1 if valuepresent == columnName: print("current row value"+str(indexrow)+"value"+valuepresent) break for values in element: valuepresent = values.text print("text present here::"+valuepresent) indexrow = indexrow+1 if valuepresent.find(columnName1) != -1: print("current row value"+str(indexrow)+"value"+valuepresent) break indexvalue = context.driver.find_elements_by_xpath( tablexpath+"/descendant::tr/td[1]") for valuescolumn in indexvalue: valuepresentcolumn = valuescolumn.text print("Team text present here::"+valuepresentcolumn) print(indexcolumn) indexcolumn = indexcolumn+1 if valuepresent.find(rowName) != -1: print("current column"+str(indexcolumn) + "value"+valuepresentcolumn) break print("indexrow"+str(indexrow)) print("index column"+str(indexcolumn)) lookupelement = context.driver.find_element_by_xpath( tablexpath+"//descendant::tr["+str(indexcolumn)+"]/td["+str(indexrow)+"]") print(tablexpath + "//descendant::tr["+str(indexcolumn)+"]/td["+str(indexrow)+"]") print(lookupelement.text) return context.driver.find_element_by_xpath(tablexpath+"//descendant::tr["+str(indexrow)+"]/td["+str(indexcolumn)+"]")
sumber