高性能异步爬虫-多任务异步协程模板

import asyncio
import aiohttp

#请求头
# heades ={}

async def get_request(url):
    #实例化一个请求对象
    async  with aiohttp.ClientSession() as sess:
        #和request用法一样
        async with await sess.get(url=url) as response:
            page_text =await response.text()
            print(page_text)
            #字节类型就.read()
            return page_text


#回调函数
def parse(task):
    page_text = task.result()
    print('返回值',page_text)

if __name__ == '__main__':
    urls =['https://www.cnblogs.com/bobo-zhang/p/10735140.html','https://www.cnblogs.com/bobo-zhang/p/9686978.html']
    tasks=[]
    for url in urls:
        c = get_request(url)
        task= asyncio.ensure_future(c)
        task.add_done_callback(parse)
        tasks.append(task)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
posted @ 2020-06-07 13:05  python爬虫工程师  阅读(143)  评论(0)    收藏  举报