爬虫

(2)请用requests库的get()函数访问如下一个网站20次,打印返回状态,text()内容,计算text()属性和content属性所返回网页内容的长度。(不同学号选做如下网页,必做及格)

b : 搜狗主页(尾号3,4学号做)

from pip._vendor import requests

print('访问搜狗网站 获取Response对象')
r = requests.get("http://www.sogou.com")
x = 1
while x <= 20:
    print('' + str(x) + '次的返回状态打印:' + str(r.status_code))
    print('' + str(x) + '次的text()打印:' + str(r.text))
    print('' + str(x) + '次的text()属性长度打印:' + str(len(r.text)))
    print('' + str(x) + '次的content属性长度打印:' +str(len(r.content)))
    x += 1

 

 

3)这是一个简单的html页面,请保持为字符串,完成后面的计算要求。(良好)

from bs4 import BeautifulSoup
html=BeautifulSoup("<!DOCTYPE html>\n<html>\n<head>\n<meta charset=‘utf-8‘>\n<title>17信计</title>\n</head>\n<body>\n<h1>我的第一标题</h1>\n<p id=‘frist‘>我的第一段落。</p >\n</body>\n</table>\n</html>","html.parser")
print(html.head,"学号后两位:04")

 

 

 

 

 

 

(4) 爬中国大学排名网站内容

from bs4 import BeautifulSoup
import requests
allUniv=[]
def getHTMLText(url):
    try:
        r=requests.get(url,timeout=30)
        r.raise_for_status()
        r.encoding ='utf-8'
        return r.text
    except:
        return ""
def fillUnivList(soup):
    data = soup.find_all('tr')
    for tr in data:
        ltd = tr.find_all('td')
        if len(ltd)==0:
            continue
        singleUniv = []
        for td in ltd:
            singleUniv.append(td.string)
        allUniv.append(singleUniv)
def printUnivList(num):
    print("{1:^2}{2:{0}^10}{3:{0}^6}{4:{0}^4}{5:{0}^10}".format((chr(12288)),"排名","学校名称","省市","总分","培训规模"))
    for i in range(num):
        u=allUniv[i]
        print("{1:^4}{2:{0}^10}{3:{0}^5}{4:{0}^8.1f}{5:{0}^10}".format((chr(12288)),u[0],u[1],u[2],eval(u[3]),u[6]))
def main(num):
    url='http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html'
    html = getHTMLText(url)
    soup = BeautifulSoup(html,"html.parser")
    fillUnivList(soup)
    printUnivList(num)
main(10)

(显示已超出范围,不知如何是好)

 

posted @ 2020-12-13 22:49  假粉  阅读(81)  评论(0)    收藏  举报