6月6日

今天开始实现顶会热词。但是面对这个题目束手无策,我不知道怎么去爬取数据。在网上找到了学长以前的代码,拿过来进行尝试。

import requests
import pymysql
from bs4 import BeautifulSoup

db = pymysql.connect(host='127.0.0.1',

user='root',
password='root',
db='ding',
charset='utf8')

cursor = db.cursor()

headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
url = "http://openaccess.thecvf.com/CVPR2019.py?day=2019-06-20"
html = requests.get(url)

soup = BeautifulSoup(html.content, 'html.parser')

soup.a.contents == 'pdf'

pdfs = soup.findAll(name="a", text="pdf")

lis = []
jianjie = ""
print(pdfs)
print("1111111111111111")
for i, pdf in enumerate(pdfs):

pdf_name = pdf["href"].split('/')[-1]
name = pdf_name.split('.')[0].replace("_CVPR_2019_paper", "")

link = "http://openaccess.thecvf.com/content_CVPR_2019/html/" + name + "_CVPR_2019_paper.html"
url1 = link
html1 = requests.get(url1)
soup1 = BeautifulSoup(html1.content, 'html.parser')
weizhi = soup1.find('div', attrs={'id': 'abstract'})
if weizhi:
jianjie = weizhi.get_text();
print("这是第" + str(i) + "条数据")
keyword = str(name).split('_')
keywords = ''
for k in range(len(keyword)):
if (k == 0):
keywords += keyword[k]
else:
keywords += ',' + keyword[k]
info = {}
info['title'] = name
#print("这是第" + name + "条数据")
info['abstract'] = jianjie
info['link'] = link
info['keywords'] = keywords
lis.append(info)

cursor = db.cursor()
for i in range(len(lis)):
cols = ", ".join('`{}`'.format(k) for k in lis[i].keys())
print(cols) # '`name`, `age`'

val_cols = ', '.join('%({})s'.format(k) for k in lis[i].keys())
print(val_cols) # '%(name)s, %(age)s'

sql = "insert into lunwen(%s) values(%s)"
res_sql = sql % (cols, val_cols)
print(res_sql)

cursor.execute(res_sql, lis[i]) # 将字典a传入
db.commit()
num = 1
print(num)
print("成功")


但是多次运行没有修改成功,运行后没有反应。
posted @ 2021-06-06 21:21  不咬牙  阅读(53)  评论(0编辑  收藏  举报