用requests库和BeautifulSoup4库爬取新闻列表9-28

  • 用requests库和BeautifulSoup4库,爬取校园新闻列表的时间、标题、链接、来源、详细内容。
    import requests
    from bs4 import BeautifulSoup
    
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    
    for news in soup.select('li'):
        if len(news.select('.news-list-title'))>0:
            title = news.select('.news-list-title')[0].text #标题
            url = news.select('a')[0]['href'] #路径
            time = news.select('.news-list-info')[0].contents[0].text #时间
            source = news.select('.news-list-info')[0].contents[1].text #来源
            #正文
            resd = requests.get(url)
            resd.encoding='utf-8'
            soupd = BeautifulSoup(resd.text,'html.parser')
            detail = soupd.select('.show-content')[0].text
            print(time,title,url,source)
            print(detail)
            break

     

  • 将其中的时间str转换成datetime类型。
    
    

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime

    
    

    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')

    
    

    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text #标题
    url = news.select('a')[0]['href'] #路径
    time = news.select('.news-list-info')[0].contents[0].text #时间
    dt = datetime.strptime(time,'%Y-%m-%d')
    source = news.select('.news-list-info')[0].contents[1].text #来源
    #正文
    resd = requests.get(url)
    resd.encoding='utf-8'
    soupd = BeautifulSoup(resd.text,'html.parser')
    detail = soupd.select('.show-content')[0].text
    print(dt,title,url,source)
    print(detail)
    break

    
    
    
    

     

  • 将取得详细内容的代码包装成函数。
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    
    def getdetail(url):
        resd = requests.get(url)
        resd.encoding='utf-8'
        soupd = BeautifulSoup(resd.text,'html.parser')
        return soupd.select('.show-content')[0].text
    
    for news in soup.select('li'):
        if len(news.select('.news-list-title'))>0:
            title = news.select('.news-list-title')[0].text #标题
            url = news.select('a')[0]['href'] #路径
            time = news.select('.news-list-info')[0].contents[0].text #时间
            dt = datetime.strptime(time,'%Y-%m-%d')
            source = news.select('.news-list-info')[0].contents[1].text #来源
            #正文
            detail =  getdetail(url)
            print(dt,title,url,source)
            print(detail)
            break

     

  • 选一个自己感兴趣的主题,做类似的操作,为后面“爬取网络数据并进行文本分析”做准备。
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    
    gzccurl = 'http://www.lbldy.com/tag/gqdy/'
    res = requests.get(gzccurl)
    res.encoding = 'utf-8'
    
    soup = BeautifulSoup(res.text,'html.parser')
    def getdetail(url):
        resd = requests.get(url)
        resd.encoding= 'utf-8'
        soupd = BeautifulSoup(resd.text,'html.parser')
        return(soupd.select('.show-content')[0].text)
    
    for news in soup.select('h4'):
        
            print(news)
    
    

     

posted @ 2017-09-28 20:55  洪英杰  阅读(166)  评论(0编辑  收藏  举报