代码改变世界

数据结构化与保存

2018-04-12 20:56  Molemole  阅读(147)  评论(0编辑  收藏  举报

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas

#获取点击次数
def getClickCount(newsUrl):
newId=re.search('\_(.*).html',newsUrl).group(1).split('/')[1]
clickUrl="http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newId)
clickStr = requests.get(clickUrl).text
count = re.search("hits'\).html\('(.*)'\);", clickStr).group(1)
return count

#获取新闻详情
def getNewsDetail(newsurl):
resd=requests.get(newsurl)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')

news={}
news['title']=soupd.select('.show-title')[0].text
# news['newsurl']=newsurl
info=soupd.select('.show-info')[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['click'] = int(getClickCount(newsurl))
if info.find('来源')>0:
news['source'] =info[info.find('来源:'):].split()[0].lstrip('来源:')
else:
news['source']='none'
if info.find('作者:') > 0:
news['author'] = info[info.find('作者:'):].split()[0].lstrip('作者:')
else:
news['author'] = 'none'
# news['content']=soupd.select('.show-content')[0].text.strip()

#获取文章内容并写入到文件中
content=soupd.select('.show-content')[0].text.strip()
writeNewsContent(content)

return news

def getListPage(listPageUrl):
res=requests.get(listPageUrl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')

newsList=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
a=news.select('a')[0].attrs['href']
newsList.append(getNewsDetail(a))
return (newsList)

#数据写入文件
def writeNewsContent(content):
f=open('gzccNews.txt','a',encoding='utf-8')
f.write(content)
f.close()

def getPageNumber():
ListPageUrl="http://news.gzcc.cn/html/xiaoyuanxinwen/"
res=requests.get(ListPageUrl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
n = int(soup.select('.a1')[0].text.rstrip('条'))//10+1
return n


newsTotal=[]
firstPage='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newsTotal.extend(getListPage(firstPage))

n=getPageNumber()
for i in range(n,n+1):
listUrl= 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
newsTotal.extend(getListPage(listUrl))

df=pandas.DataFrame(newsTotal)
# df.to_excel("news.xlsx")

# print(df.head(6))
# print(df[['author','click','source']])
# print(df[df['click']>3000])

sou=['国际学院','学生工作处']
print(df[df['source'].isin(sou)])