爬虫协程爬取

运用了python本身自带的协程库asyncio

协程的思路就是运用了多个程序作用一样

当程序有多个io操作时,会大大降低程序运行的效率

为了提高效率,我们在使用python协程库爬取的时候,当我们遇到一个函数中的io操作拖慢我们的时间时

协程可以让我们的cpu不停下来,去运行其他的代码,当其他的代码也遇到阻碍时再切换

代码如下:

```

import requests
from lxml import etree
import asyncio

def base_url(i):
if (i == 1):
url = 'https://www.qjnu.edu.cn/channels/9260.html'
else:
url='https://www.qjnu.edu.cn/channels/9260_'+str(i)+'.html'

return url

def trs(url):
request = requests.get(url)
request.encoding = 'utf-8'
html = etree.HTML(request.text)
return html

async def down_1(html):
name_list = html.xpath('//div[@class="media"]/h4/a/text()')
url_list = html.xpath('//div[@class="media"]/h4/a/@href')
for i in range(len(name_list)):
if key in name_list[i]:
with open('学校党员主题网址.txt', 'a', encoding='UTF-8') as fp:
fp.write(url_list[i] + '\n')

async def down_2(html):
name_list = html.xpath('//div[@class="media"]/h4/a/text()')
url_list = html.xpath('//div[@class="media"]/h4/a/@href')
for i in range(len(name_list)):
if key in name_list[i]:
html = trs(url_list[i])
tex_list = html.xpath('//div[@class="field-item even"]//p/span/text()')
name = name_list[i]
with open(name + '.txt', 'w', encoding='UTF-8') as fp:
fp.write(str(tex_list))

async def main(html):
tasks = [
asyncio.create_task(down_1(html)),
asyncio.create_task(down_2(html))
]
await asyncio.wait(tasks)

if __name__ == '__main__':
page = int(input('请输入需要爬取的页数:'))
key = str(input('请输入要查找的关键词:'))
for i in range(1,page+1):
url = base_url(i)
html = trs(url)
asyncio.run(main(html))

```

--------------------------------------------------------------------------------------------------------

经过修改,逻辑上已经没有问题

代码也能够跑通

在上面那个代码的情况下我进行了一点点改进:减少了一个函数的调用

其他函数上的简化我没有想到要怎么修改

修改后的代码如下:

```

import requests
from lxml import etree
import asyncio

def base_url(i,url):
if (url==[]):
if (i == 1):
url = 'https://www.qjnu.edu.cn/channels/9260.html'
else:
url='https://www.qjnu.edu.cn/channels/9260_'+str(i)+'.html'
request = requests.get(url)
request.encoding = 'utf-8'
html = etree.HTML(request.text)

return html

async def down_1(html):
name_list = html.xpath('//div[@class="media"]/h4/a/text()')
url_list = html.xpath('//div[@class="media"]/h4/a/@href')
for i in range(len(name_list)):
if key in name_list[i]:
with open('学校党员主题网址.txt', 'a', encoding='UTF-8') as fp:
fp.write(url_list[i] + '\n')

async def down_2(html):
name_list = html.xpath('//div[@class="media"]/h4/a/text()')
url_list = html.xpath('//div[@class="media"]/h4/a/@href')
for i in range(len(name_list)):
if key in name_list[i]:
url = url_list[i]
html = base_url(i,url)
tex_list = html.xpath('//div[@class="field-item even"]//p/span/text()')
with open(name_list[i] + '.txt', 'w', encoding='UTF-8') as fp:
fp.write(str(tex_list))

async def main(html):
tasks = [
asyncio.create_task(down_1(html)),
asyncio.create_task(down_2(html))
]
await asyncio.wait(tasks)

if __name__ == '__main__':
page = int(input('请输入需要爬取的页数:'))
key = str(input('请输入要查找的关键词:'))
for i in range(1,page+1):
url = []
html = base_url(i,url)
asyncio.run(main(html))
```
posted @ 2022-06-16 09:54  皓_月  阅读(133)  评论(0)    收藏  举报