获取全部校园新闻

1.取出一个新闻列表页的全部新闻 包装成函数。

2.获取总的新闻篇数,算出新闻总页数。

3.获取全部新闻列表页的全部新闻详情。

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re


# 获取新闻点击次数
def getNewsId(url):
    newsId = re.findall(r'\_(.*).html', url)[0][-4:]
    clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
    clickRes = requests.get(clickUrl)
    # 利用正则表达式获取新闻点击次数
    clickCount = int(re.search("hits'\).html\('(.*)'\);", clickRes.text).group(1))
    return clickCount


# 获取新闻细节
def getNewsDetail(newsUrl):
    resd = requests.get(newsUrl)
    resd.encoding = 'utf-8'
    soupd = BeautifulSoup(resd.text, 'html.parser')

    content = soupd.select('#content')[0].text
    info = soupd.select('.show-info')[0].text
    # 调用getNewsId()获取点击次数
    count = getNewsId(newsUrl)
    # 识别时间格式
    date = re.search('(\d{4}.\d{2}.\d{2}\s\d{2}.\d{2}.\d{2})', info).group(1)
    # 识别一个至三个数据
    if(info.find('作者:')>0):
        author = re.search('作者:((.{2,4}\s|.{2,4}、){1,3})', info).group(1)
    if(info.find('审核:')>0):
        check = re.search('审核:((.{2,4}\s){1,3})', info).group(1)
    if(info.find('来源:')>0):
        sources = re.search('来源:(.*)\s*摄|点', info).group(1)
    # 用datetime将时间字符串转换为datetime类型
    dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
    # 利用format对字符串进行操作
    print('发布时间:{0}\n作者:{1}\n审核:{2}\n来源:{3}\n点击次数:{4}'.format(dateTime, author, check, sources, count))
    print(content)


def getListPage(listUrl):
    res = requests.get(listUrl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')

    for new in soup.select('li'):
        if len(new.select('.news-list-title')) > 0:
            title = new.select('.news-list-title')[0].text
            description = new.select('.news-list-description')[0].text
            newsUrl = new.select('a')[0]['href']

            print('标题:{0}\n内容:{1}\n链接:{2}'.format(title, description, newsUrl))
            # 调用getNewsDetail()获取新闻详情
            getNewsDetail(newsUrl)
            break


listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
getListPage(listUrl)
res = requests.get(listUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
listCount = int(soup.select('.a1')[0].text.rstrip(''))//10+1

for i in range(2,listCount):
    listUrl= 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
    getListPage(listUrl)

4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import jieba
 
newsurl = 'http://tv.cctv.com/cctv5/'
 
 
def sort(text):
    str = '''一!“”,。?;’"',.、:\n'''
    for s in str:
        text = text.replace(s, ' ')
    wordlist = list(jieba.cut(text))
    exclude = {'', '\u3000', '\r', '\xa0', '', '_', ' ', '', '', '', '', '', '', '', '', '', '(', ')'}
    set2 = set(wordlist) - exclude
    dict = {}
    for key in set2:
        dict[key] = wordlist.count(key)
    dictlist = list(dict.items())
    dictlist.sort(key=lambda x: x[1], reverse=True)
    print("top5关键词:")
    for i in range(5):
        print(dictlist[i])
 
 
def getContent(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup2 = BeautifulSoup(res.text, 'html.parser')
    for news in soup2.select('.l_a'):
        if len(news.select('.author'))>0:
            author=news.select('.author')[0].text
            print("作者",author)
    content = soup2.select('.la_con')[0].text.rstrip('AD_SURVEY_Add_AdPos("7000531");')
    print("正文:", content)
    sort(content)
 
 
def getNewDetails(newsurl):
    res = requests.get(newsurl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    for news in soup.select('.item'):
        #  print(news)
        title = news.select('a')[0].attrs['title']
        a = news.select('a')[0].attrs['href']
        brief = news.select('h5')[0].text.rstrip('[详细]')
        time = news.select('h6')[0].text
        dt = datetime.strptime(time, '%Y-%m-%d %H:%M')
        print("新闻标题:", title)
        print("链接:", a)
        print("内容简介:", brief)
        print("时间:", dt)
        getContent(a)
        print('\n')
    # break
 
 
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
getNewDetails(newsurl)

 

posted @ 2018-04-11 15:18  168李文辉  阅读(108)  评论(0编辑  收藏  举报