学习爬虫day02

requests高级用法

ssl认证

''''https和http有什么区别?''''
    https=http + ssl/tsl
    解释:是加密之后传输的,原来是明文的,现在在网络中传输是加密后的,它的私钥在浏览器中内置了,公钥是有证书机构(被浏览器认证过后的)签发的,没有被浏览器认证过的,使用的时候会提示不安全
# 1.1 不认证证书了
import requests
respone = requests.get('https://www.12306.cn', verify=False)  # 不验证证书,报警告,返回200
print(respone.status_code)

# 1.2 手动携带证书访问
import requests
respone=requests.get('https://www.12306.cn',cert=('/path/server.crt','/path/key'))
print(respone.status_code)

使用代理(重要)

发送http请求,使用代理发送

超时设置(timeout)

respone=requests.get('https://www.baidu23.com',timeout=3)

异常处理

import requests
from requests.exceptions import * #可以查看requests.exceptions获取异常类型
try:
    r=requests.get('http://www.baidu.com',timeout=0.00001)
except ReadTimeout:
    print('===:')
except ConnectionError: #网络不通
    print('-----')
except Timeout:
    print('aaaaa')

except RequestException:
    print('Error')

上传文件

import requests

files = {'file': open('mn.jpg', 'rb')}
response = requests.post('http://httpbin.org/post', files=files)
print(response.status_code)

代理池搭建

'''github开源的,代理池的代码,本地跑起来'''
    爬虫技术:爬取免费的代理网站,获取免费代理,验证过后,存到本地
    使用flask搭建一个web后端,访问某个接口就可以随机返回一个可用的代理
    https://github.com/jhao104/proxy_pool
1 git clone https://github.com/jhao104/proxy_pool.git
2 创建虚拟环境,安装依赖:pip install -r requirements.txt
3 修改配置文件settings.py   ---》redis服务启动
        # 配置API服务
        HOST = "0.0.0.0"               # IP
        PORT = 5000                    # 监听端口
        # 配置数据库

        DB_CONN = 'redis://127.0.0.1:8888/0'
        # 配置 ProxyFetcher
        PROXY_FETCHER = [
            "freeProxy01",   
            "freeProxy02",
        ]
4 启动爬虫,启动web服务
        # 启动调度程序
        python proxyPool.py schedule
        # 启动webApi服务
        python proxyPool.py server

5 随机获取ip
        127.0.0.1:5000/get
import requests

res = requests.get('http://127.0.0.1:5000/get/').json()
if res['https']:
    http = 'https'
else:
    http = 'http'

proxie = {
    http:res['proxy']
}
res = requests.get('https://www.cnblogs.com/konghuanxi/p/16921862.html', proxies=proxie)
print(res.status_code)

django后端获取客户端的ip

# 写一个返回用户ip地址的django程序
def ip_test(request):
    # 获取客户端ip
    ip=request.META.get('REMOTE_ADDR')
    return HttpResponse('您的ip是:%s'%ip)
#部署在云服务器

#本地使用requests+代理访问,查看是否返回代理的ip地址
import requests

res = requests.get('http://127.0.0.1:5010/get/').json()
# 三元表达式写法
http = ('https' if res['https'] else 'http')
proxie = {
    http: http+'://'+res['proxy']
}
print(proxie)
# 服务端部署在本地,是访问不到的,内网穿透,或者部署在服务器上
# res = requests.get('http://192.168.1.143:8000/ip/', proxies=proxie)
# res = requests.get('https://46b3k95600.zicp.fun/ip/', proxies=proxie) # 不生效
res = requests.get('http://101.133.225.166/ip/', proxies=proxie)
print(res.text)
# 如果代理不可用,就不用代理了

爬取网站视频

import requests

import re

res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=1&start=1')
# print(res.text)

video_list = re.findall('<a href="(.*?)" class="vervideo-lilink actplay">', res.text)

# print(video_list)   # ['video_1665251', 'video_1665120', 'video_1664540', ]
for video in video_list:
    # video_url = 'https://www.//www.pearvideo.com/' + video
    # # print(video_url)
    # res = requests.get(video_url)
    # print(res.text)
    # break
    # 向https://www.pearvideo.com/videoStatus.jsp?contId=1646509&mrd=0.6761335369801458发送请求获取视频地址
    video_id = video.split('_')[-1]  # video_1665251  ---> 1665251
    header = {
        'Referer': 'https://www.pearvideo.com/%s' % video  # 当前地址
    }
    # res = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.6761335369801458' % video_id,
    #                    headers=header)
    # print(res.text)

    '''
    {
        "resultCode": "1",
        "resultMsg": "success", "reqId": "747490b6-9483-48df-a1ca-d5b06e4e69f8",
        "systemTime": "1669288149056",
        "videoInfo": {"playSta": "1",
                      "video_image": "https://image2.pearvideo.com/cont/20200415/cont-1669179-12360179.jpeg",
                      "videos": {"hdUrl": "", "hdflvUrl": "", "sdUrl": "", "sdflvUrl": "",
                                 "srcUrl": "https://video.pearvideo.com/mp4/adshort/20200415/1669288149056-15086283_adpkg-ad_hd.mp4"}}
    }
    '''
    res = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.6761335369801458' % video_id,
                       headers=header).json()
    real_mp4_url = res['videoInfo']['videos']['srcUrl']
    # print(real_mp4_url)
    # https://video.pearvideo.com/mp4/adshort/20200320/  1669288292507- 15029369_adpkg-ad_hd.mp4  不能播放
    real_mp4_url = real_mp4_url.replace(real_mp4_url.rsplit('/', 1)[-1].split('-')[0], 'cont-%s' % video_id)
    print(real_mp4_url)
    # https://video.pearvideo.com/mp4/adshort/20200326/ cont-1664540 -15042623_adpkg-ad_hd.mp4
    # 下载到本地
    res = requests.get(real_mp4_url)
    with open('./video/%s.mp4' % video_id, 'wb') as f:
        for line in res.iter_content():
            f.write(line)

多线程爬取视频

from multiprocessing import Pool

import requests
from threading import Thread
import re

res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=1&start=1')
# print(res.text)
video_list = re.findall('<a href="(.*?)" class="vervideo-lilink actplay">', res.text)


def task(video_list):
    for video in video_list:
        video_id = video.split('_')[-1]
        header = {
            'Referer': 'https://www.pearvideo.com/%s' % video  # 当前地址
        }
        res = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.6761335369801458' % video_id,
                           headers=header).json()
        real_mp4_url = res['videoInfo']['videos']['srcUrl']
        real_mp4_url = real_mp4_url.replace(real_mp4_url.rsplit('/', 1)[-1].split('-')[0], 'cont-%s' % video_id)
        print(real_mp4_url)
        res = requests.get(real_mp4_url)
        with open('./video/%s.mp4' % video_id, 'wb') as f:
            for line in res.iter_content():
                f.write(line)


for i in range(10):
    t = Thread(target=task, args=(video_list,))
    t.start()

爬取新闻存入数据库

import pymysql
import requests
# 解析库;bs4  pip3 install beautifulsoup4
from bs4 import BeautifulSoup

res = requests.get('https://www.autohome.com.cn/news/1/#liststart')
# print(res.text)  # 从返回的html中查找,bs是解析html,xml格式的
soup = BeautifulSoup(res.text, 'html.parser')
# 查找类名等于article的ul标签
ul_list = soup.find_all(name='ul', class_='article')
# print(len(ul_list))
for ul in ul_list:
    # 找到ul下所有的li标签
    li_list = ul.find_all(name='li')
    for li in li_list:
        h3 = li.find(name='h3')
        if h3:  # 获取h3标签的文本内容
            title = h3.text
            desc = li.find(name='p').text
            url = 'https:' + li.find(name='a').attrs.get('href')
            img = li.find(name='img').attrs.get('src')
            if not img.startswith('http'):
                img = 'https:' + img
            # print('''
            # 文章标题:%s
            # 文章摘要:%s
            # 文章地址:%s
            # 文章图片:%s
            # ''' % (title, desc, url, img))
        db = pymysql.connect(host='localhost', port=3306, user='root', password='123', database='news',
                             charset='utf8')
        cur = db.cursor()
        sql = 'INSERT INTO test(title, `desc`, url, img) VALUES (%s,%s,%s,%s)'
        cur.execute(sql, (title, desc, url, img))
        db.commit()
        cur.close()
        db.close()

BautifulSoup4 介绍

# Beautiful Soup 是一个可以从HTML或XML文件中提取数据的Python库

# pip3 install BeautifulSoup4
# 解析库解释
    BeautifulSoup('要解析的内容:xml格式字符串', "html.parser") #内置解析库html.parser
    BeautifulSoup('要解析的内容:xml格式字符串',  "lxml")  # 速度快 必须要装lxml pip3 install lxml

bs4遍历文档树

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" id='id_p' name='lqz' xx='yy'>lqz is handsome <b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
# 1 美化html:了解
# print(soup.prettify())

# 2 遍历文档树
'''
#遍历文档树:即直接通过标签名字选择,特点是选择速度快,但如果存在多个相同的标签则只返回第一个
#1、用法
#2、获取标签的名称
#3、获取标签的属性
#4、获取标签的内容
#5、嵌套选择
#6、子节点、子孙节点
#7、父节点、祖先节点
#8、兄弟节点
'''
# 1 基本用法,直接  .标签名字
# res=soup.title
# print(res)
# res=soup.a
# print(res)
# 可以嵌套使用
# res=soup.head.title
# print(res)

# 2 获取标签的名称
# 拿到的所有标签都是一个对象,Tag对象  bs4.element.Tag
# res=soup.head.title
# res=soup.body
# print(res.name)

# 3 获取标签的属性
# res=soup.p
# print(res.attrs)  # 属性字典


# 4 获取标签的内容
# res = soup.p
# print(res.text) # 把该标签子子孙孙内容拿出来拼到一起 字符串
# print(res.string) # None 必须该标签没有子标签,才能拿出文本内容
# print(list(res.strings) )# generator 生成器,把子子孙孙的文本内容放到生成器中

# 5 嵌套选择

# res=soup.html.body.a
# print(res.text)


# 6、子节点、子孙节点
# print(soup.p.contents) #p下所有子节点
# print(soup.p.children) #得到一个迭代器,包含p下所有子节点

# 7、父节点、祖先节点
# print(soup.a.parent) #获取a标签的父节点,直接父节点
# print(list(soup.a.parents)) #找到a标签所有的祖先节点,父亲的父亲,父亲的父亲的父亲...


# 8、兄弟节点
# print(soup.a.next_sibling)  # 下一个兄弟
# print(soup.a.previous_sibling)  # 上一个兄弟

print(list(soup.a.next_siblings)) #下面的兄弟们=>生成器对象
print('-----')
print(list(soup.a.previous_siblings)) #上面的兄弟们=>生成器对象
posted @ 2022-11-24 20:39  咩啊咩咩咩  阅读(51)  评论(0)    收藏  举报