获取全部校园新闻

1.取出一个新闻列表页的全部新闻 包装成函数。

2.获取总的新闻篇数,算出新闻总页数。

3.获取全部新闻列表页的全部新闻详情。

 

import requests
import string
import re
from datetime import  datetime
newsurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(newsurl) #返回response对象
res.encoding='utf-8'
from bs4 import BeautifulSoup
soup = BeautifulSoup(res.text,'html.parser')

def getClickCount(newsUrl):
    newId=re.search('\_(.*).html',newsUrl).group(1).split(''/'')[1]
    clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}modelid=80'.format(newId)
    return(int(requests.get(clickUrl).text.split('.html')[-1].lstrip("(‘").rstrip("‘);")))

    def WriteContect(content):
        f = open('/news.txt', 'a', 'w',encoding = 'utf-8')
        f.write(content)
        f.close()

//获取所有的新闻详情
def getNewsDetail(newsurl):
    resd=requests.get(newsurl)
    resd.encoding='utf-8'
    soupd=BeautifulSoup(resd.text,'html.parser')


    title=soupd.select('.show-title')[0].text
    info=soupd.select('show-info')[0].text
    dt=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
    if info.find('来源')>0:
        source=info[info.find('来源:'):].split()[0].lstrip('来源:')
    else:
        source='none'

    content=soupd.select('.show-content')[0].text.strip()
    click=getClickCount(newsurl)
    print(click,title,newsurl,source,dt)

def getListPage(listPageUrl):
    res=requests.get(listPageUrl)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
        if len(news.select('.news-list-title'))>0:
            a=news.select('a')[0].attrs['href']
            getNewsDetail(a)




resn=requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
resn.encoding='utf-8'
soupn=BeautifulSoup(resn.text,'html.parser')
n=int(soupn.select('.a1')[0].text.rstrip('条'))

# //计算新闻总页数

for i in range(n,n+1):
    pageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/[].html'.format(i)
    getListPage(pageUrl)

  

 

4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。

 

import requests
import pandas
newsurl='https://voice.hupu.com/nba'
res = requests.get(newsurl)
res.encoding='utf-8'
from bs4 import BeautifulSoup
soup = BeautifulSoup(res.text,'html.parser')
title=soup.select('a')[0]
info=(soup.select('.comeFrom')[0].a)

# content=soup.select('.artical-main-content')[0].text.strip()
content=(soup.select('.artical-main-content'))
print(title,info,content)

  

posted on 2018-04-11 17:09  140-吴华锐  阅读(110)  评论(0编辑  收藏  举报