Python 学习day8

1.基于豌豆荚爬取剩下的简介截图图片地址、网友评论。
2.把豌豆荚爬取的数据插入mongoDB中
  - 创建一个wandoujia库
  - 把主页的数据存放一个名为index集合中
  - 把详情页的数据存放一个名为detail集合中

如下:
复制代码
import requests
from bs4 import  BeautifulSoup
from  pymongo import   MongoClient

client = MongoClient('localhost',27017)

index_col = client['wandoujia']['index']

detail_col = client['wandoujia']['detail']

# 1、发送请求
def get_page(url):
    response = requests.get(url)
    return response

# 2、开始解析
#解析详情页
def parse_detail(text):
    soup = BeautifulSoup('text','lxml')

    try:
        name= soup.find(name="span",attrs={"class":"title"}).text
    except Exception:
        name = None

    try:
        love = soup.find(name='span',attrs={"class":"love"}).text

    except Exception:
        love = None

    try:
        commit_num  = soup.find(name='a',attrs={"class":"comment-open"}).text
    except Exception:
        commit_num = None


    try:
        commit_content = soup.find(name='div',attrs={"class":"con"}).text

    except Exception:
        commit_content = None

    try:
        download_url = soup.find(name='a',attrs={"class":"normal-dl-btn"}).attrs['href']
    except Exception:
        download_url = None


    if name and love and commit_num and commit_content and download_url:
        detail_data = {
            'name':name,
            'love':love,
            'commit_num':commit_num,
            'commit_content':commit_content,
            'download_url':download_url
        }

    if not love:
        detail_data={
            'name': name,
            'love': "没人点赞",
            'commit_num': commit_num,
            'commit_content': commit_content,
            'download_url': download_url
        }

    if not download_url:
        detail_data={
            'name': name,
            'love': love,
            'commit_num': commit_num,
            'commit_content': commit_content,
            'download_url': "没有安装包"
        }


    detail_col.insert(detail_data)
    print(f'{name}app数据插入成功!')



# 解析主页
def parse_index(data):
    soup = BeautifulSoup(data, 'lxml')

    # 获取所有app的li标签
    app_list = soup.find_all(name='li', attrs={"class": "card"})
    for app in app_list:
        # print(app)
        # print('tank' * 1000)
        # print('tank *' * 1000)
        # print(app)
        # 图标地址
        # 获取第一个img标签中的data-original属性
        img = app.find(name='img').attrs['data-original']
        # print(img)

        # 下载次数
        # 获取class为install-count的span标签中的文本
        down_num = app.find(name='span', attrs={"class": "install-count"}).text
        # print(down_num)

        import re
        # 大小
        # 根据文本正则获取到文本中包含 数字 + MB(\d+代表数字)的span标签中的文本
        size = soup.find(name='span', text=re.compile("\d+MB")).text
        # print(size)

        # 详情页地址
        # 获取class为detail-check-btn的a标签中的href属性
        # detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href']
        # print(detail_url)

        # 详情页地址
        detail_url = app.find(name='a').attrs['href']
        # print(detail_url)

        # 拼接数据
        index_data = {
            'img': img,
            'down_num': down_num,
            'size': size,
            'detail_url': detail_url
        }

        # 插入数据
        index_col.insert(index_data)
        print('主页数据插入成功!')

        # 3、往app详情页发送请求
        response = get_page(detail_url)

        # 4、解析app详情页
        parse_detail(response.text)

def main():
    for line in range(1, 33):
        url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

        # 1、往app接口发送请求
        response = get_page(url)
        # print(response.text)
        print('*' * 1000)
        # 反序列化为字典
        data = response.json()

        # 获取接口中app标签数据
        app_li = data['data']['content']
        # print(app_li)

        # 2、解析app标签数据
        parse_index(app_li)

        # 执行完所有函数关闭mongoDB客户端
        client.close()

if __name__ == '__main__':
    main()
复制代码

课堂内容

1.解析库之bs4

复制代码
'''
pip3 install beautifulsoup4  # 安装bs4
pip3 install lxml  # 下载lxml解析器
'''
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p>

<p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
from bs4 import BeautifulSoup
#从bs4中导入BeautifulSoup对象
#参数一:解析文本
#参数二:解析器(html.parser、lxml...)
soup = BeautifulSoup(html_doc, 'lxml')

print(soup)
print('*' * 100)
print(type(soup))
print('*' * 100)
# 文档美化
html = soup.prettify()
print(html)
复制代码

2.bs4之遍历文档树

复制代码
html_doc = """<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p<p class="story" id="p">Once upon a time there were three little sisters; and their names were<a href="http://example.com/elsie" class="sister" >Elsie</a>,<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;and they lived at the bottom of a well.</p><p class="story">...</p>"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc,'lxml')

'''
1、用法
2、获取标签的名称
3、获取标签的属性
4、获取标签的内容
5、嵌套选择
6、子节点、子孙节点
7、父节点、祖先节点
8、兄弟节点
'''

#1.直接使用
print(soup.p)#查找第一个p标签
print(soup.a)#查找第一个a标签

#2.获取标签的名称
print(soup.head.name)#获取head标签的名称

#3.获取标签的属性
print(soup.a.attrs)#获取a标签中的所有属性
print(soup.a.attrs['href'])#获取a标签中的href属性

#4.获取标签的内容
print(soup.p.text)#$37

#5.嵌套选择
print(soup.html.head)

#6.子节点、子孙节点
print(soup.body.children)#body所有子节点,返回的是迭代器对象
print(list(soup.body.children))#强转成列表类型

print(soup.body.descendants)#子孙节点
print(list(soup.body.descendants))#子孙节点

#7.父节点、祖先节点
print(soup.p.parent)#获取p标签的父亲节点
#返回的是生成器对象
print(soup.p.parents)#获取p标签所有的祖先节点
print(list(soup.p.parents))

#8.兄弟节点
#找下一个兄弟
print(soup.p.next_siblings)
print(list(soup.p.next_siblings))
#找上一个兄弟
print(soup.a.previous_sibling)#找到第一个a标签的上一个兄弟节点
#找到a标签上面的所有兄弟节点
print(soup.a.previous_sibling)#返回的是生成器
print(list(soup.a.previous_sibling))
复制代码

3.bs4之搜索文档树

复制代码
html_doc = """<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<b>tank</b><a href="http://example.com/elsie" class="sister" >Elsie</a>,<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;and they lived at the bottom of a well.<hr></hr></p><p class="story">...</p>"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc,'lxml')
#字符串过滤器
#name
p_tag = soup.find(name='p')
print(p_tag) # 根据文本p查找某个标签
# 找到所有标签名为p的节点
tag_s1 = soup.find_all(name='p')
print(tag_s1)
#attrs
#查找第一个class为sister的节点
p = soup.find(attrs={"class":"sister"})
print(p)
#查找所有class为sister的节点
tag_s2 = soup.find_all(attrs={"class":"sister"})
print(tag_s2)
#text
text = soup.find(text="$37")
print(text)
#配合使用:
#找到一个id为link2、文本为Lacie的a标签
a_tag = soup.find(name="a",attrs={"id":"link2"},text = "Lacie")
print(a_tag)


#正则过滤器
import re
#name
p_tag = soup.find(name=re.compile('p'))
print(p_tag)

#列表过滤器
import re
#name
tags = soup.find_all(name=['p','a',re.compile('html')])
print(tags)


#-bool过滤器
#True匹配
#找到有id 的p标签
p = soup.find(name='p',attrs={"id":True})
print(p)

#方法过滤器
#匹配标签名为a、属性有id没有class的标签
def have_id_class(tag):
    if tag.name == 'a' and tag.has_attr('id')and tag.has_attr('class'):
        return tag
tag = soup.find(name = have_id_class)
print(tag)
复制代码

4.爬取豌豆荚app数据

复制代码
import requests
from bs4 import  BeautifulSoup
#1,发送请求
def get_page(url):
    response = requests.get(url)
    return  response
#2.开始解析
def parse_index(data):
    soup = BeautifulSoup(data,'lxml')

    #获取所有app 的li标签
    app_list = soup.find_all(name='li',attrs={"class":"card"})
    for app in app_list:
        img = app.find(name='img').attrs['data-original']
        print(img)

        #下载次数
        down_num = app.find(name='span',attrs={"class":"install-count"}).text
        print(down_num)

        import re
        #大小
        size = soup.find(name='span',text=re.compile("\d+MB")).text
        print(size)
        #详情页地址
        #获取class为detail-check-btn的a标签中的href属性
        detail_url = app.find(name='a').attrs['href']
        print(detail_url)
        #3.往详情页发送请求
        response = get_page(detail_url)
        #4.解析app详情页
        parse_detail(response.text)



def parse_detail(text):
    soup = BeautifulSoup(text,'lxml')

    #app名称
    name = soup.find(name="span",attrs={"class":"title"}).text
    print(name)

    #好评率
    love = soup.find(name='span',attrs={"class":"love"}).text
    print(love)

    #评论数
    commit_num = soup.find(name='a',attrs={"class":"comment-open"}).text
    print(commit_num)

    #小编点评
    commit_content = soup.find(name='div',attrs={"class":"con"}).text
    print(commit_content)

    #app下载链接
    download_url=soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']
    print(
        f'''
        =========begin============
        app名称:{name}
        好评率:{love}
        评论数:{commit_num}
        小编点评:{commit_content}
        app下载链接:{download_url}
        ==========end===============
        '''
    )

def main():
    for line in range(1,33):
        url =f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=1XgmoJKndXkl17m9HGiCMmJx"
        #1.往app接口发送请求
        response = get_page(url)
        #print(respnse.text)
        print('*'*1000)
        #反序列化为字典
        data = response.json()
        #获取接口中app标签数据
        app_li = data['data']['content']
        #print(app_li)
        #2.解析app标签数据
        parse_index(app_li)


if __name__ == '__main__':
    main()
复制代码

5.pymongo的简单使用方法

复制代码
from pymongo import MongoClient

#1.链接mongoDB客户端
#参数1:mongoDB的ip地址
#参数2:mongoDB的端口号 默认:27017
client = MongoClient('localhost',27017)
print(client)

#2.进入tank_db库,没有则创建
print(client['tank_db'])

#3.创建集合
print(client['tank_db']['people'])


#4.给tank_db库插入数据
#1.插入一条
data1 = {
    'name':'tank',
    'age':18,
    'sex':'male'
}
client['tank_db']['people'].insert(data1)


#2.插入多条
data1 = {
    'name': '*',
    'age': 18,
    'sex': 'male'
}
data2 = {
    'name': '**,
    'age': 21,
    'sex': 'female'
}
data3 = {
    'name': '***,
    'age': 73,
    'sex': 'female'
}
client['tank_db']['people'].insert([data1, data2, data3])

# 5、查数据
# 查看所有数据
data_s = client['tank_db']['people'].find()
print(data_s)  # <pymongo.cursor.Cursor object at 0x000002EEA6720128>
# 需要循环打印所有数据
for data in data_s:
    print(data)

# 查看一条数据
data = client['tank_db']['people'].find_one()
print(data)

#官方推荐使用
#插入一条insert_one
client['tank_db']['people'].insert_one()
#插入多条insert_many
client['tank_db']['people'].insert_many()
复制代码
posted @ 2019-06-21 23:16  awwwwwwwwwwwwsl  阅读(166)  评论(0)    收藏  举报