# 使用进程池的进程爬取网页内容,使用回调函数处理数据,用到了正则表达式和re模块
import re
from urllib.request import urlopen
from multiprocessing import Pool
def get_page(url,pattern):
response=urlopen(url).read().decode('utf-8')
return pattern,response # 返回正则表达式编译结果 网页内容
def parse_page(info):
pattern,page_content=info # 接收到正则表达式编译结果,与网页内容
res=re.findall(pattern,page_content) # 调用re模块的方法,用正则匹配到网页的内容
for item in res:
dic={
'index':item[0].strip(),
'title':item[1].strip(),
'actor':item[2].strip(),
'time':item[3].strip(),
}
print(dic)
if __name__ == '__main__':
regex = r'<dd>.*?<.*?class="board-index.*?>(\d+)</i>.*?title="(.*?)".*?class="movie-item-info".*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>'
pattern1=re.compile(regex,re.S) # 将正则表达式编译后存到变量中
url_dic={'http://maoyan.com/board/7':pattern1} # 一个url对应一个正则
p=Pool()
res_l=[]
for url,pattern in url_dic.items():
res=p.apply_async(get_page,args=(url,pattern),callback=parse_page)
res_l.append(res)
for i in res_l:
i.get()