使用正则表达式,取得点击次数,函数抽离

1. 用正则表达式判定邮箱是否输入正确。

e='454181644@qq.com'
r='^(\w)+([\.\_\-]\w+)*@(\w)+((\.\w{2,3}){1,3})$'
print(re.match(r,e))

2. 用正则表达式识别出全部电话号码。

tel='版权所有:广州商学院   地址:广州市黄埔区九龙大道206号  学校办公室:020-82876130  招生电话:020-82872773'
a=re.search('(\d{3,4})-(\d{6,8})',tel).group(2)
print(a)

3. 用正则表达式进行英文分词。re.split('',news)
import re
news='''
An empty street,An empty house,A hole inside my heart,I'm all alone,The rooms are getting smaller,I wonder how,I wonder why,I wonder where they are,The days we had,The songs we sang together,Oh yeah,And oh my love,I'm holding on forever,Reaching for a love that seems so far,So I say a little prayer,And hope my dreams will take me there,Where the skies are blue to see you once again, my love
'''
new = re.split("[\s+\n\.\,\']", news)
print(new)

4. 使用正则表达式取得新闻编号

import re
url='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
a=re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/(.*).html',url).group(1)
print(a)
 

5. 生成点击次数的Request URL

import re
url='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
a=re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/(.*).html',url).group(1)
srac='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(a)
print(srac)
6. 获取点击次数

import requests
from bs4 import BeautifulSoup
url='http://oa.gzcc.cn/api.php?op=count&id=9183&modelid=80'
srac=requests.get(url)
print(srac.text.split('.html')[-1].lstrip("('").rstrip("');"))

7. 将456步骤定义成一个函数 def getClickCount(newsUrl)

8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):

import requests
import re
from bs4 import BeautifulSoup
from datetime import datetime

url='http://news.gzcc.cn/html/xiaoyuanxinwen/'
res=requests.get(url)
res.encoding="utf-8"
soup=BeautifulSoup(res.text,"html.parser")

#def getClickCount(newsUrl)
def getClickCount(newUrl):
    newId=re.search('\_(.*).html',newUrl).group(1).split('/')[1]
    clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newId)
    return (int(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');")))

#def getNewDetail(newsUrl):
def getNewDetail(newsUrl):
    resd = requests.get(newsUrl)
    resd.encoding = 'utf-8'
    soupd = BeautifulSoup(resd.text, 'html.parser')
    print(t)
    print(newsUrl)
    info = soupd.select('.show-info')[0].text
    d = re.search('发布时间:(.*) \xa0\xa0 \xa0\xa0作者:', info).group(1)
    dt = datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
    print('发布时间:{}'.format(dt))
    print('作者:' + re.search('作者:(.*)审核:', info).group(1))
    print('审核:' + re.search('审核:(.*)来源:', info).group(1))
    print('来源:' + re.search('来源:(.*)摄影:', info).group(1))
    print('摄影:' + re.search('摄影:(.*)点击', info).group(1))
    print(getClickCount(a))
    print('正文:'+soupd.select('.show-content')[0].text)

for news in soup.select("li"):
    if len(news.select(".news-list-title")) > 0:
        t=news.select('.news-list-title')[0].text
        a = news.select('a')[0].attrs['href']  # 新闻链接
        getNewDetail(a)
        break

 

posted @ 2018-04-11 23:26  091梁耀  阅读(187)  评论(0编辑  收藏  举报