爬虫作业
(2)请用requests库的get()函数访问如下一个网站20次,打印返回状态,text()内容,计算text()属性和content属性所返回网页内容的长度。(不同学号选做如下网页,必做及格)d: 360搜索主页(尾号7,8学号做)
方法一代码:
import requests
r=requests.get("https://hao.360.com/")
r.encoding='utf-8'
r.raise_for_status()
for i in range(20):
print(i)
print("函数测试输出:\n",r.text)
n=len(r.text)
m=len(r.content)
print("text属性下网页长度为:{},context属性下网页长度:{}".format(n,m))

方法二:代码
import requests
for i in range(20):
r = requests.get("https://hao.360.com/")
print("网页返回状态:{}".format(r.status_code))
print("text内容为:{}".format(r.text))
print("\n")
print("text内容长度为:{}".format(len(r.text)))
print("content内容长度为:{}".format(len(r.content)))
结果:


这是一个简单的html页面,请保持为字符串,完成后面的计算要求。(良好)
a 打印head标签内容和你的学号后两位

b,获取body标签的内容

c. 获取id 为first的标签对象

d. 获取并打印html页面中的中文字符
代码:

结果:

爬中国大学排名网站内容,http://www.zuihaodaxue.com/zuihaodaxuepaiming2018.html
a,爬取大学排名(学号尾号7,8,爬取年费2018,))
import csv
import os
import requests
from bs4 import BeautifulSoup
allUniv = []
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding ='utf-8'
return r.text
except:
return ""
def fillUnivList(soup):
data = soup.find_all('tr')
for tr in data:
ltd = tr.find_all('td')
if len(ltd)==0:
continue
singleUniv = []
for td in ltd:
singleUniv.append(td.string)
allUniv.append(singleUniv)
def writercsv(save_road,num,title):
if os.path.isfile(save_road):
with open(save_road,'a',newline='')as f:
csv_write=csv.writer(f,dialect='excel')
for i in range(num):
u=allUniv[i]
csv_write.writerow(u)
else:
with open(save_road,'w',newline='')as f:
csv_write=csv.writer(f,dialect='excel')
csv_write.writerow(title)
for i in range(num):
u=allUniv[i]
csv_write.writerow(u)
title=["排名","学校名称","省市","总分","生源质量","培养结果","科研规模",
"科研质量","顶尖成果","顶尖人才","科技服务","产学研究合作","成果转化","学生国际化"]
save_road="E:\\排名.csv"
def main():
url = 'http://www.zuihaodaxue.com/zuihaodaxuepaiming2018.html'
html = getHTMLText(url)
soup = BeautifulSoup(html, "html.parser")
fillUnivList(soup)
writercsv(save_road,30,title)
main()



浙公网安备 33010602011771号