爬取校园新闻首页的新闻的详情,使用正则表达式,函数抽离

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
def get_soup(url):
    req = requests.get(url)
    req.encoding = 'utf-8'
    soup = BeautifulSoup(req.text, 'html.parser')
    return soup

def getDownNum(urls):
    html_id = list()
    j = 0
    for i in urls:
        html_id.append(re.search('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_(.*).html', i).group(1).split('/')[-1])
        down_url = 'http://oa.gzcc.cn/api.php?op=count&id=' + html_id[j] + '&modelid=80'
        reqd = requests.get(down_url)
        down_num.append(re.search("\('#hits'\).html\('(.*)'\);", reqd.text).group(1))
        j=j+1
    return down_num




soup = get_soup('http://news.gzcc.cn/html/xiaoyuanxinwen/')
li_list = soup.select('li')
title = list()
a = list()
for new in li_list:
    if(len(new.select('.news-list-text'))>0):
        title.append(new.select('.news-list-text')[0].select('.news-list-title')[0].text)
        a.append(new.a.attrs['href'])
info_list = list()
con_list = list()
for curl in a:
    con_soup = get_soup(curl)
    con_list.append(con_soup.select('#content')[0].text)
    info_list.append(con_soup.select('.show-info')[0].text.split("\xa0\xa0"))
cs = list()
for i in range(len(con_list)):
    cs.append(''.join(con_list[0]))

down_num = list()
down_num = getDownNum(a)

for i in range(len(info_list)):
    print('标题:'+title[i])
    print('链接:'+a[i])
    for j in range(len(info_list[i])):
        if(len(info_list[i][j])>0 and info_list[i][j]!=' '):
            if(j!=len(info_list[i])-1):
                print(info_list[i][j])
            else:
                print(info_list[i][j].rstrip('次'), down_num[i],'次')

    print(cs[i])

  

 

posted @ 2018-04-09 18:09  192邓锦秀  阅读(143)  评论(0编辑  收藏  举报