获取全部校园新闻的总页数

import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime

def writeNewsDetail(content):
f=open('gzccNews.txt','a',encoding='utf-8')
f.write(content)
f.close()

def getClickCount(newsUrl):
newsId = re.findall('\_(.*).html', newsUrl)[0].split('/')[1]
clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id=9024&modelid=80'.format(newsId)

return(int(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');")))

def getNewsDetail(newsUrl):
resd = requests.get(newsUrl)
resd.encoding = 'utf-8'
soupd = BeautifulSoup(resd.text, 'html.parser')

news={}
news['title'] = soupd.select('.show-title')[0].text
info = soupd.select('.show-info')[0].text
news['dt'] = datetime.strftime(info.lstrip('发布时间:')[:19],'%Y-%m-%d %H:%M:%S')
if info.find('来源:')>0:
news['source']=info[info.find('来源:'):].split()[0].lstrip('来源:')
else:
news['source']='none'
news['content']=soupd.select('.show-content')[0].text.strip()
writeNewsDetail(news['content'])
news['click']=getClickCount(newsUrl)
news['newsUrl']=newsUrl
print(news)
return (news)



def getListPage(pageUrl):
res=requests.get(pageUrl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')

newsList=[]
for news in soup.select('li'):
if len(news.select('.new-list-title'))>0:
newsUrl=news.select('a')[0].attrs['href']
newsList.append(getNewsDetail(newsUrl))
return (newsList)


def getPageN(): # 新闻列表页的总页数
res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
n = int(soup.select('.a1')[0].text.rstrip('条'))
return (n // 10 + 1)

newsTotal=[]
firstPageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newsTotal.extend(getListPage(firstPageUrl))
n = getPageN()
for i in range(n, n+1):
listPageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
newsTotal.extend(getListPage(listPageUrl))

print(newsTotal)
posted @ 2018-04-12 11:27  126刘畅  阅读(101)  评论(0编辑  收藏  举报