python爬虫Day 03

一 Selenium剩余部分

1.元素交互操作:
- 点击、清除
click
clear

- ActionChains
是一个动作链对象,需要把driver驱动传给它。
动作链对象可以操作一系列设定好的动作行为。

- iframe的切换
driver.switch_to.frame('iframeResult')

- 执行js代码
execute_script()

元素交互操作
''''''
'''
点击、清除
'''
# from selenium import webdriver
# from selenium.webdriver import ActionChains
# from selenium.webdriver.common.keys import Keys  # 键盘按键操作
# import time
#
#
#
# driver = webdriver.Chrome()
#
# try:
#     driver.implicitly_wait(10)
#     driver.get('https://www.jd.com/')
#     time.sleep(5)
#
#     # 点击、清除
#     input = driver.find_element_by_id('key')
#     input.send_keys('围城')
#
#     # 通过class查找搜索按钮
#     search = driver.find_element_by_class_name('button')
#     search.click()  # 点击搜索按钮
#
#     time.sleep(3)
#
#     input2 = driver.find_element_by_id('key')
#     input2.clear()  # 清空输入框
#
#     time.sleep(1)
#
#     input2.send_keys('墨菲定律')
#     input2.send_keys(Keys.ENTER)
#
#     time.sleep(10)
#
# finally:
#     driver.close()



'''
ActionChains: 动作链
'''
# from selenium import webdriver
# from selenium.webdriver import ActionChains
# from selenium.webdriver.common.keys import Keys  # 键盘按键操作
# import time
#
#
#
# driver = webdriver.Chrome()
# try:
#     driver.implicitly_wait(10)
#     driver.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
#     time.sleep(5)
#
#     # 遗弃方法
#     # driver.switch_to_frame()
#     # 新方法
#     driver.switch_to.frame('iframeResult')
#     time.sleep(1)
#
#     # 获取动作链对象
#     action = ActionChains(driver)
#
#     # 起始方块id: draggable
#     source = driver.find_element_by_id('draggable')
#
#     # 目标方块id: droppable
#     target = driver.find_element_by_id('droppable')
#
#     # 方式一: 秒移
#     # 起始方块瞬间移动到目标方块中
#     # 拟定好一个动作,需要调用执行方法perform
#     # action.drag_and_drop(source, target).perform()
#
#
#     time.sleep(10)
#
# finally:
#     driver.close()


# from selenium import webdriver
# from selenium.webdriver import ActionChains
# from selenium.webdriver.common.keys import Keys  # 键盘按键操作
# import time
#
# driver = webdriver.Chrome()
# try:
#     driver.implicitly_wait(10)
#     driver.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
#     time.sleep(5)
#
#     # 遗弃方法
#     # driver.switch_to_frame()
#     # 新方法
#     driver.switch_to.frame('iframeResult')
#     time.sleep(1)
#
#     # 起始方块id: draggable
#     source = driver.find_element_by_id('draggable')
#
#     # 目标方块id: droppable
#     target = driver.find_element_by_id('droppable')
#
#     print(source.size)  # 大小
#     print(source.tag_name)  # 标签名
#     print(source.text)  # 文本
#     print(source.location)  # 坐标: X与Y轴
#
#     # 找到滑动距离
#     distance = target.location['x'] - source.location['x']
#
#     # 摁住起始滑块
#     ActionChains(driver).click_and_hold(source).perform()
#
#     # 方式二: 一点一点移动
#     s = 0
#     while s < distance:
#         # 获取动作链对象
#         # 每一次位移s距离
#         ActionChains(driver).move_by_offset(xoffset=2, yoffset=0).perform()
#         s += 2
#
#         time.sleep(0.1)
#
#     # 松开起始滑块
#     ActionChains(driver).release().perform()
#
#
#     time.sleep(10)
#
# finally:
#     driver.close()



'''
执行js代码
'''
from selenium import webdriver
import time

driver = webdriver.Chrome()
try:
    driver.implicitly_wait(10)

    driver.get('https://www.baidu.com/')

    driver.execute_script(
        '''
        alert("浙江大学,是浙江最好的大学!")
        '''
    )

    time.sleep(10)

finally:
    driver.close()

其他操作

 

''''''
'''
模拟浏览器的前进后退
'''
import time
from selenium import webdriver

browser = webdriver.Chrome()
browser.get('https://www.baidu.com')

browser.get('https://www.taobao.com')

browser.get('http://www.sina.com.cn/')

# 回退
browser.back()

time.sleep(5)

# 前进
browser.forward()

time.sleep(3)

browser.close()

 

爬取京东商品信息

 

# ''''''
# '''
# 初级版:
#     from tank!
# '''
# import time
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
#
# driver = webdriver.Chrome()
#
# num = 1
#
# try:
#     driver.implicitly_wait(10)
#     # 往京东发送请求
#     driver.get('https://www.jd.com/')
#
#     # 往京东主页输入框输入墨菲定律,按回车键
#     input_tag = driver.find_element_by_id('key')
#     input_tag.send_keys('墨菲定律')
#     input_tag.send_keys(Keys.ENTER)
#
#     time.sleep(5)
#
#
#     good_list = driver.find_elements_by_class_name('gl-item')
#     for good in good_list:
#         # print(good)
#         # 商品名称
#         good_name = good.find_element_by_css_selector('.p-name em').text
#         # print(good_name)
#
#         # 商品链接
#         good_url = good.find_element_by_css_selector('.p-name a').get_attribute('href')
#         # print(good_url)
#
#         # 商品价格
#         good_price = good.find_element_by_class_name('p-price').text
#         # print(good_price)
#
#         # 商品评价
#         good_commit = good.find_element_by_class_name('p-commit').text
#
#         good_content = f'''
#         num: {num}
#         商品名称: {good_name}
#         商品链接: {good_url}
#         商品价格: {good_price}
#         商品评价: {good_commit}
#         \n
#         '''
#
#         print(good_content)
#
#         with open('jd.txt', 'a', encoding='utf-8') as f:
#             f.write(good_content)
#         num += 1
#
#     print('商品信息写入成功!')
#
#
# finally:
#     driver.close()



''''''
'''
中级版
'''
# import time
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
#
# driver = webdriver.Chrome()
#
# num = 1
#
# try:
#     driver.implicitly_wait(10)
#     # 往京东发送请求
#     driver.get('https://www.jd.com/')
#
#     # 往京东主页输入框输入墨菲定律,按回车键
#     input_tag = driver.find_element_by_id('key')
#     input_tag.send_keys('墨菲定律')
#     input_tag.send_keys(Keys.ENTER)
#
#     time.sleep(5)
#
#     # 下拉滑动5000px
#     js_code = '''
#         window.scrollTo(0, 5000)
#     '''
#
#     driver.execute_script(js_code)
#
#     # 等待5秒,待商品数据加载
#     time.sleep(5)
#
#     good_list = driver.find_elements_by_class_name('gl-item')
#     for good in good_list:
#         # print(good)
#         # 商品名称
#         good_name = good.find_element_by_css_selector('.p-name em').text
#         # print(good_name)
#
#         # 商品链接
#         good_url = good.find_element_by_css_selector('.p-name a').get_attribute('href')
#         # print(good_url)
#
#         # 商品价格
#         good_price = good.find_element_by_class_name('p-price').text
#         # print(good_price)
#
#         # 商品评价
#         good_commit = good.find_element_by_class_name('p-commit').text
#
#         good_content = f'''
#         num: {num}
#         商品名称: {good_name}
#         商品链接: {good_url}
#         商品价格: {good_price}
#         商品评价: {good_commit}
#         \n
#         '''
#
#         print(good_content)
#
#         with open('jd.txt', 'a', encoding='utf-8') as f:
#             f.write(good_content)
#         num += 1
#
#     print('商品信息写入成功!')
#
#     # 找到下一页并点击
#     next_tag = driver.find_element_by_class_name('pn-next')
#     next_tag.click()
#
#     time.sleep(10)
#
# finally:
#     driver.close()


'''
狂暴版
'''
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys


def get_good(driver):
    num = 1
    try:
        time.sleep(5)

        # 下拉滑动5000px
        js_code = '''
            window.scrollTo(0, 5000)
        '''
        driver.execute_script(js_code)

        # 等待5秒,待商品数据加载
        time.sleep(5)
        good_list = driver.find_elements_by_class_name('gl-item')
        for good in good_list:
            # 商品名称
            good_name = good.find_element_by_css_selector('.p-name em').text

            # 商品链接
            good_url = good.find_element_by_css_selector('.p-name a').get_attribute('href')

            # 商品价格
            good_price = good.find_element_by_class_name('p-price').text

            # 商品评价
            good_commit = good.find_element_by_class_name('p-commit').text

            good_content = f'''
            num: {num}
            商品名称: {good_name}
            商品链接: {good_url}
            商品价格: {good_price}
            商品评价: {good_commit}
            \n
            '''
            print(good_content)
            with open('jd.txt', 'a', encoding='utf-8') as f:
                f.write(good_content)
            num += 1

        print('商品信息写入成功!')

        # 找到下一页并点击
        next_tag = driver.find_element_by_class_name('pn-next')
        next_tag.click()

        time.sleep(5)
        # 递归调用函数本身
        get_good(driver)

    finally:
        driver.close()


if __name__ == '__main__':
    driver = webdriver.Chrome()
    try:
        driver.implicitly_wait(10)
        # 往京东发送请求
        driver.get('https://www.jd.com/')
        # 往京东主页输入框输入墨菲定律,按回车键
        input_tag = driver.find_element_by_id('key')
        input_tag.send_keys('墨菲定律')
        input_tag.send_keys(Keys.ENTER)

        # 调用获取商品信息函数
        get_good(driver)

    finally:
        driver.close()

 

二 BeautifulSoup4
BS4

1.什么BeautifulSoup?
bs4是一个解析库,可以通过某种(解析器)来帮我们提取想要的数据。

2.为什么要使用bs4?
因为它可以通过简洁的语法快速提取用户想要的数据内容。

3.解析器的分类
- lxml
- html.parser

4.安装与使用
- 遍历文档树
- 搜索文档树

补充知识点:

数据格式:

json数据:
{
"name": "tank"
}

XML数据:
<name>tank</name>

HTML:
<html></html>

生成器: yield 值(把值放进生成器中)
def f():

# return 1
yield 1
yield 2
yield 3

g = f()
print(g)

for line in g:
print(line)

bs4安装与使用
''''''
'''
安装解析器:
pip3 install lxml

安装解析库:
pip3 install bs4
'''
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p>
<p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
from bs4 import BeautifulSoup

# python自带的解析库
# soup = BeautifulSoup(html_doc, 'html.parser')

# 调用bs4得到一个soup对象
soup = BeautifulSoup(html_doc, 'lxml')

# bs4对象
print(soup)

# bs4类型
print(type(soup))

# 美化功能
html = soup.prettify()
print(html)

bs4解析库之遍历文档树

 

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p>
<p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')
# print(soup)
# print(type(soup))
# 遍历文档树
# 1、直接使用  *****
print(soup.html)
print(type(soup.html))
print(soup.a)
print(soup.p)

# 2、获取标签的名称
print(soup.a.name)

# 3、获取标签的属性   *****
print(soup.a.attrs)  # 获取a标签中所有的属性
print(soup.a.attrs['href'])

# 4、获取标签的文本内容  *****
print(soup.p.text)  # $37

# 5、嵌套选择
print(soup.html.body.p)

# 6、子节点、子孙节点
print(soup.p.children)  # 返回迭代器对象
print(list(soup.p.children))  # [<b>$37</b>]

# 7、父节点、祖先节点
print(soup.b.parent)
print(soup.b.parents)
print(list(soup.b.parents))

# 8、兄弟节点  (sibling: 兄弟姐妹)
print(soup.a)
# 获取下一个兄弟节点
print(soup.a.next_sibling)

# 获取下一个的所有兄弟节点,返回的是一个生成器
print(soup.a.next_siblings)
print(list(soup.a.next_siblings))

# 获取上一个兄弟节点
print(soup.a.previous_sibling)
# 获取上一个的所有兄弟节点,返回的是一个生成器
print(list(soup.a.previous_siblings))

 

bs4之搜索文档树

 

''''''
'''
find: 找第一个
find_all: 找所有

标签查找与属性查找:
name 属性匹配

    name 标签名
    attrs 属性查找匹配
    text 文本匹配
            
    标签:
        - 字符串过滤器   
            字符串全局匹配
            
        - 正则过滤器
            re模块匹配
            
        - 列表过滤器
            列表内的数据匹配
            
        - bool过滤器
            True匹配
            
        - 方法过滤器
            用于一些要的属性以及不需要的属性查找。
    属性:
        - class_
        - id
'''
html_doc = """
<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<a href="http://example.com/elsie" class="sister" >Elsie</a><a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>and they lived at the bottom of a well.</p><p class="story">...</p>
"""
from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')

# name 标签名
# attrs 属性查找匹配
# text 文本匹配
# find与find_all搜索文档

'''

字符串过滤器
'''
p = soup.find(name='p')
p_s = soup.find_all(name='p')

print(p)
print(p_s)

# name + attrs
p = soup.find(name='p', attrs={"id": "p"})
print(p)

# name + text
tag = soup.find(name='title', text="The Dormouse's story")
print(tag)

# name + attrs + text
tag = soup.find(name='a', attrs={"class": "sister"}, text="Elsie")
print(tag)

'''
- 正则过滤器
re模块匹配
'''
import re
# name
# 根据re模块匹配带有a的节点
a = soup.find(name=re.compile('a'))
print(a)

a_s = soup.find_all(name=re.compile('a'))
print(a_s)


# attrs
a = soup.find(attrs={"id": re.compile('link')})
print(a)


# - 列表过滤器
# 列表内的数据匹配
print(soup.find(name=['a', 'p', 'html', re.compile('a')]))
print(soup.find_all(name=['a', 'p', 'html', re.compile('a')]))


# - bool过滤器
# True匹配
print(soup.find(name=True, attrs={"id": True}))

# - 方法过滤器
# 用于一些要的属性以及不需要的属性查找。

def have_id_not_class(tag):
    # print(tag.name)
    if tag.name == 'p' and tag.has_attr("id") and not tag.has_attr("class"):
        return tag

# print(soup.find_all(name=函数对象))
print(soup.find_all(name=have_id_not_class))


# 补充知识点:
# id
a = soup.find(id='link2')
print(a)

# class
p = soup.find(class_='sister')
print(p)

 

 
posted @ 2019-07-03 20:27  believe_dawn  阅读(113)  评论(0)    收藏  举报