测试开发Python培训:抓取新浪微博评论提取目标数据-技术篇

测试开发Python培训:抓取新浪微博评论提取目标数据-技术篇

 

   在前面我分享了几个新浪微博的自动化脚本的实现,下面我们继续实现新的需求,功能需求如下:

1,登陆微博 
2,抓取评论页内容
3,用正则表达式过滤出用户名,评论时间和评论内容
4,内容存入数据库
5,统计评论次数

# -*- coding: utf-8 -*-
import requests
import base64
import re
import urllib
import rsa
import json
import binascii
import MySQLdb

class Userlogin:
    def userlogin(self,username,password,pagecount):
        session = requests.Session()
        url_prelogin = 'http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.5)&_=1364875106625'
        url_login = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.5)'

        #get servertime,nonce, pubkey,rsakv
        resp = session.get(url_prelogin)
        json_data  = re.search('\((.*)\)', resp.content).group(1)
        data       = json.loads(json_data)
        servertime = data['servertime']
        nonce      = data['nonce']
        pubkey     = data['pubkey']
        rsakv      = data['rsakv']

        # calculate su
        su  = base64.b64encode(urllib.quote(username))

        #calculate sp
        rsaPublickey= int(pubkey,16)
        key = rsa.PublicKey(rsaPublickey,65537)
        message = str(servertime) +'\t' + str(nonce) + '\n' + str(password)
        sp = binascii.b2a_hex(rsa.encrypt(message,key))
        postdata = {
                            'entry': 'weibo',
                            'gateway': '1',
                            'from': '',
                            'savestate': '7',
                            'userticket': '1',
                            'ssosimplelogin': '1',
                            'vsnf': '1',
                            'vsnval': '',
                            'su': su,
                            'service': 'miniblog',
                            'servertime': servertime,
                            'nonce': nonce,
                            'pwencode': 'rsa2',
                            'sp': sp,
                            'encoding': 'UTF-8',
                           'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
                            'returntype': 'META',
                            'rsakv' : rsakv,
                            }
        resp = session.post(url_login,data=postdata)
        # print resp.headers
        login_url = re.findall('replace\(\'(.*)\'\)',resp.content)
        #
        respo = session.get(login_url[0])
        uid = re.findall('"uniqueid":"(\d+)",',respo.content)[0]
        url = "http://weibo.com/u/"+uid
        respo = session.get(url)
        # print respo.content #获取首页的内容html
#以上为成功登陆微博
################################################################################ #获取数据库连接 conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='weiboanalysis',charset='utf8') curs = conn.cursor() curs.execute('delete from outbox') myheaders={} myheaders['set-cookie'] = resp.headers['set-cookie'] myheaders['Referer'] = 'http://weibo.com/comment/inbox?leftnav=1&wvr=5' # print myheaders #以下是开始抓取信息 for i in range(1,int(pagecount)+1): forwardUrl = """http://weibo.com/comment/inbox?topnav=1&wvr=5&f=1&page=%d"""%i r = session.post(forwardUrl,headers=myheaders) page = r.content # print page #获取并过滤出用户名,存在pagename数组 pagename = re.findall('<a\s*title=[^>]*usercard[^>]*>',page) for n in range(0,len(pagename)): pagename[n] = pagename[n].split('\\"')[1] #获取并过滤出评论时间,存在pagetime数组 pagetime = re.findall('WB_time S_func2[^>]*>[^>]*>',page) for t in range(0,len(pagetime)): pagetime[t] = pagetime[t].split('>')[1].split('<')[0] #获取并过滤出评论内容,存在pagecont数组 pagecont={} pagecontent = re.findall(r'<p class=\\\"detail\\(.*?)<\\\/p>',page) for t in range(0,len(pagecontent)): a = pagecontent[t].split("<\/a>") b = a[len(a)-1] c = re.sub(r"<img(.*?)>",'[表情]',b) #去掉图片表情 d = re.sub(r"<span(.*?)span>",'',c) pagecont[t] = re.sub(r"\\t|:|:",'',d) #去掉最后的/t和最前的冒号 for index in range(0,len(pagetime)): sql = """ insert into outbox(uname,time,text) values('%s','%s','%s')"""%(pagename[index],pagetime[index],pagecont[index]) curs.execute(sql) conn.commit() curs.close() conn.close()

通过数据库读取数据,主要是为了验证功能。

# -*- encoding:utf-8 -*-
__author__ = 'lanzao'
import MySQLdb

class OutboxAnalysis:

    def getMost(self,num):<span style="white-space:pre">		</span>#查看评论最多的前num个人
        conn =  MySQLdb.connect(host='localhost',user='root',passwd='root',db='weiboanalysis',charset='utf8')
        curs = conn.cursor()
        sql="""
        select uid,uname,count(uname) as count
        from outbox
        group by uname
        order by count(uname) desc
        limit %d;
        """% int(num)
        curs.execute(sql)
        conn.commit()
        print "******************评论次数排行榜************************"
        for item in curs.fetchall():
            print item[1]+" ",str(item[2])+"次"
        print "*******************************************************"
        curs.close()
        conn.close()

    def getUser(self,user):<span style="white-space:pre">	</span>#查看某用户评论
        conn =  MySQLdb.connect(host='localhost',user='root',passwd='root',db='weiboanalysis',charset='utf8')
        curs = conn.cursor()
        curs.execute("""select * from outbox where uname='%s'"""%user)
        print "*****************************************"
        for item in curs.fetchall():
            print item[1]+"   ",item[2]+"   ",item[3]
        print "*****************************************"
        curs.close()
        conn.close()

 

上一层可以通过数据驱动模块,处理excel表,进行驱动,调用以上代码实现自动化测试。

posted @ 2015-11-16 15:40  北京茑萝信息  阅读(517)  评论(0)    收藏  举报