协程-爬虫示例

from gevent import monkey;monkey.patch_all()#打补丁,使gevent识别I/O阻塞进而实现协程
import requests,re,gevent,time


def get_info(url):#爬网页函数
    res = requests.get(url)
    print(len(res.text))
    return res.text


def prase(res):#解析网页数据函数
    res_name = re.findall(r'title="(?P<name>\S+\s?\S*?)"', res)
    move_name = []
    for i in res_name[::2]:
        move_name.append(i.split('"')[0])
    move_actor = re.findall(r'主演:\S+', res)#(?P<name>主演:\S+)/?\n{0,1}
    for i in range(len(move_name)):
        with open('movie_info.txt','a') as f:
            f.write('电影名:%s , %s'%(move_name[i],move_actor[i].split('<')[0]))
            f.write('\n')

urls = [
    'http://maoyan.com/board/7',
    'http://maoyan.com/board/6',
    'http://maoyan.com/board/1',
    'http://maoyan.com/board/2',
    'http://maoyan.com/board/4',
]

if __name__ == '__main__':
    start = time.time()
    # g_l = []
    for url in urls:
        # print(url)
        g = gevent.spawn(prase,get_info(url))
        # g_l.append(g)
    # gevent.joinall(g_l)
    g.join()#之所以只加一个join而不用joinall是因为主进程会等get_info(url)作为参数执行完了,主进程不会等prase执行完所以让主进程等最后一个prase即可
    print('解析结束',time.time()-start)
没啥好解释的直接看脚本

协程确实运行很快,轻量化,节约cpu与内存使用,可以实现高并发量(伪)

posted @ 2017-08-31 15:55  风火林山  阅读(97)  评论(0编辑  收藏  举报