爬取

之前上课做了一个爬取的程序

爬取阶段的代码

 

# -*- coding = utf-8 -*-
import export as export
import pymysql
import requests
from bs4 import BeautifulSoup


try:
db = pymysql.connect(host="localhost", user="root", password="libin1214", database="book",charset="utf8")

print("数据库连接成功")
except pymysql.Error as e:
print("数据库连接失败:"+str(e))

cursor = db.cursor()

headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36 Edg/101.0.1210.32'
}
url = "https://openaccess.thecvf.com/CVPR2021?day=all"

html=requests.get(url)


soup = BeautifulSoup(html.content,'html.parser')



soup.a.contents == 'pdf'

pdfs = soup.findAll(name="a", text="pdf")
lis = []
jianjie = ""
for i, pdf in enumerate(pdfs):
pdf_name = pdf["href"].split('/')[-1]
print(pdf_name)
name = pdf_name.split('.')[0].replace("_CVPR_2021_paper", "")
link = "http://openaccess.thecvf.com/content/CVPR_2021/html/" + name + "_CVPR_2021_paper.html"
url1 = link
html1 = requests.get(url1)
soup1 = BeautifulSoup(html1.content, 'html.parser')
weizhi = soup1.find('div', attrs={'id': 'abstract'})
if weizhi:
jianjie = weizhi.get_text();
print("这是第" + str(i) + "条数据")
keyword = str(name).split('_')
keywords = ''
for k in range(len(keyword)):
if (k == 0):
keywords += keyword[k]
else:
keywords += ',' + keyword[k]
info = {}
info['title'] = name
info['link'] = link
info['abstract'] = jianjie
info['keywords'] = keywords
lis.append(info)
#for i in range(len(lis)):
cursor = db.cursor()
cols = ",".join('`{}`'.format(k) for k in lis[i].keys())
print(cols) # '`name`, `age`'

val_cols = ','.join('%({})s'.format(k) for k in lis[i].keys())
print(val_cols) # '%(name)s, %(age)s'

sql = "insert into lunwen(%s) values(%s)"
res_sql = sql % (cols, val_cols)
print(res_sql)

cursor.execute(res_sql, lis[i]) # 将字典a传入
db.commit()
num = 1
print(num)
print("成功")

posted @ 2022-05-21 09:37  李彬159  阅读(146)  评论(0)    收藏  举报