从0开始的爬虫之旅(day1/2/3)
import requests
import bs4
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('err')
return ''
def fillUnivList(ulist, html):
soup = bs4.BeautifulSoup(html, 'html.parser')
p = 0
for tr in soup.find('tbody').children:
if isinstance(tr, bs4.element.Tag):
tds = tr('td')
Univ_name = tds[1]('span')[0].string
ulist.append([tds[0].string.strip(), Univ_name.strip(), tds[4].string.strip()])
def printUnivList(ulist, num):
tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"
print(tplt.format('排名', '学校名称', '总分',chr(12288)))
for i in range(num):
u = ulist[i]
print(tplt.format(u[0],u[1],u[2],chr(12288)))
def main():
uinfo = []
url = 'https://www.shanghairanking.cn/rankings/bcur/2024'
html = getHTMLText(url)
fillUnivList(uinfo, html)
# print(uinfo)
printUnivList(uinfo, 20)
if __name__ == '__main__':
main()
定向爬虫,后面再来优化界面吧(如果我还记得的话
本文来自博客园,作者:Liyukio,转载请注明原文链接:https://www.cnblogs.com/Liyukio/p/18688622

浙公网安备 33010602011771号