crawlspider 全站数据爬取之LinkExtractor,Rule

创建crawlspider
scrapy genspider -t crawl crawlall www.xxx.com

打开crawlall.py

import scrapy
from scrapy.linkextractors import LinkExtractor  # LinkExtractor是链接提取器
from scrapy.spiders import CrawlSpider, Rule  # Rule 规则解析器

class AllspiderSpider(CrawlSpider):
    name = 'allspider'
    # allowed_domains = ['www.xxxx.com']
    start_urls = ['http://wz.sun0769.com/political/index/politicsNewest?id=1&page=1']
    
    """
      LinkExtractor作为参数作用于rule,根据start_urls里的链接进行页码拼接,根据规则(allow='正则')进行指定链接的提取
      怎么进行指定链接的提取?
            allow表示指定好的规则,allow的值是正则表达式
    """
    # rules 是规则解析器, 将链接提取到的链接进行规则(callbakc)的解析操作
    # follow=True 可以将链接提取器继续作用到链接提取器提取到的链接所对应的页面中
    rules = (
        Rule(LinkExtractor(allow=r'id=1&page=\d+'), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        print(response)
        # item = {}
        # return item

运行结果

<200 http://wz.sun0769.com/political/index/politicsNewest?id=1&page=2>
<200 http://wz.sun0769.com/political/index/politicsNewest?id=1&page=0>

posted @ 2020-07-05 11:06  bibicode  阅读(421)  评论(0)    收藏  举报