爬取豆瓣电影
.: 从当前位置开始
*: 查找所有
?: 找到第一个不找

.*?: 非贪婪匹配
.*: 贪婪匹配

(.*?): 提取括号内的数据

电影排名、电影url、电影名称、导演-主演-类型、电影评分、评价人数、电影简介
<div class="item">.*?<em class="">(.*?)</em>
.*?<a href="(.*?)">.*?<span class="title">(.*?)</span>
.*?导演:(.*?)</p>.*?<span class="rating_num".*?>(.*?)</span>
.*?<span>(.*?)人评价</span>.*?<span class="inq">(.*?)</span>


 1 ''''''
 2 '''
 3 https://movie.douban.com/top250?start=0&filter=
 4 https://movie.douban.com/top250?start=25&filter=
 5 https://movie.douban.com/top250?start=50&filter=
 6 
 7 1.发送请求
 8 2.解析数据
 9 3.保存数据
10 '''
11 import requests
12 import re
13 
14 # 爬虫三部曲
15 # 1.发送请求
16 def get_page(base_url):
17     response = requests.get(base_url)
18     return response
19 
20 # 2.解析文本
21 def parse_index(text):
22 
23     res = re.findall('<div class="item">.*?<em class="">(.*?)</em>.*?<a href="(.*?)">.*?<span class="title">(.*?)</span>.*?导演:(.*?)</p>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价</span>.*?<span class="inq">(.*?)</span>', text, re.S)
24     # print(res)
25     return res
26 
27 # 3.保存数据
28 def save_data(data):
29     with open('douban.txt', 'a', encoding='utf-8') as f:
30         f.write(data)
31 
32 # main + 回车键
33 if __name__ == '__main__':
34     # num = 10
35     # base_url = 'https://movie.douban.com/top250?start={}&filter='.format(num)
36 
37     num = 0
38     for line in range(10):
39         base_url = f'https://movie.douban.com/top250?start={num}&filter='
40         num += 25
41         print(base_url)
42 
43         # 1.发送请求,调用函数
44         response = get_page(base_url)
45 
46         # 2.解析文本
47         movie_list = parse_index(response.text)
48 
49         # 3.保存数据
50         # 数据的格式化
51         for movie in movie_list:
52             # print(movie)
53 
54             # 解压赋值
55             # 电影排名、电影url、电影名称、导演 - 主演 - 类型、电影评分、评价人数、电影简介
56             v_top, v_url, v_name, v_daoyan, v_point, v_num, v_desc = movie
57             # v_top = movie[0]
58             # v_url = movie[1]
59             moive_content = f'''
60             电影排名: {v_top}
61             电影url: {v_url}
62             电影名称: {v_name}
63             导演主演: {v_daoyan}
64             电影评分: {v_point}
65             评价人数: {v_num}
66             电影简介: {v_desc}
67             \n
68             '''
69 
70             print(moive_content)
71 
72             # 保存数据
73             save_data(moive_content)

      爬取视频:

 1 ''''''
 2 '''
 3 视频选项:
 4     1.梨视频
 5 '''
 6 import requests
 7 
 8 # 往视频源地址发送请求
 9 response = requests.get(
10     'https://video.pearvideo.com/mp4/adshort/20190625/cont-1570302-14057031_adpkg-ad_hd.mp4')
11 
12 # 打印二进制流,比如图片、视频等数据
13 print(response.content)
14 
15 # 保存视频到本地
16 with open('视频.mp4', 'wb') as f:
17     f.write(response.content)
18 
19 '''
20 1、先往梨视频主页发送请求
21     https://www.pearvideo.com/
22     
23     解析获取所有视频的id:
24         video_1570302
25         
26         re.findall()
27         
28 
29 2、获取视频详情页url:
30     惊险!男子抢上地铁滑倒,就脚进去了
31     https://www.pearvideo.com/video_1570302
32     揭秘坎儿井
33     https://www.pearvideo.com/video_1570107
34 '''
35 import requests
36 import re  # 正则,用于解析文本数据
37 # 1、先往梨视频主页发送请求
38 response = requests.get('https://www.pearvideo.com/')
39 # print(response.text)
40 
41 # re正则匹配获取所有视频id
42 # 参数1: 正则匹配规则
43 # 参数2: 解析文本
44 # 参数3: 匹配模式
45 res_list = re.findall('<a href="video_(.*?)"', response.text, re.S)
46 # print(res_list)
47 
48 # 拼接每一个视频详情页url
49 for v_id in res_list:
50     detail_url = 'https://www.pearvideo.com/video_' + v_id
51     # print(detail_url)
52 
53     # 对每一个视频详情页发送请求获取视 频源url
54     response = requests.get(url=detail_url)
55     # print(response.text)
56 
57     # 解析并提取详情页视频url
58     # 视频url
59     video_url = re.findall('srcUrl="(.*?)"', response.text, re.S)[0]
60     print(video_url)
61 
62     # 视频名称
63     video_name = re.findall(
64         '<h1 class="video-tt">(.*?)</h1>', response.text, re.S)[0]
65 
66     print(video_name)
67 
68     # 往视频url发送请求获取视频二进制流
69     v_response = requests.get(video_url)
70 
71     with open('%s.mp4' % video_name, 'wb') as f:
72         f.write(v_response.content)
73         print(video_name, '视频爬取完成')