scrapy
Scrapy框架
win: - twisted 依赖 ,安装不了,找whl文件
- pip3 install wheel
- pip3 install ****.whl
- pip3 install pywin32
Django: django-admin startproject mysite cd mysite python manage.py startapp app01 Scrapy
# 创建项目 scrapy startproject sp1 sp1 - sp1 - spiders目录 - middlewares.py 中间件 - items.py 设置数据存储模板,用于数据格式化 如django的model - pipelines.py 持久化 - settings.py 配置文件 - scrapy.cfg 项目的主配置信息, # 创建爬虫 cd sp1 scrapy genspider example example.com # 执行爬虫,进入project scrapy crawl baidu scrapy crawl baidu --nolog # 无日志
# 展示所有爬虫列表
scrapy list
Scrapy 使用了 Twisted异步网络库来处理网络通讯。整体架构大致如下
import sys import io sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="gb18030") # 在爬虫顶部加
# 小试牛刀
import scrapy from scrapy.selector import HtmlXPathSelector from scrapy.http.request import Request class DigSpider(scrapy.Spider):
def __init__():
声明浏览器对象
def closed(self,spider):
关闭浏览器对象
# 在中间件process_response方法中获取浏览器对象(spider.bro)
# 在中间件process_response方法中,获取页面源码数据bro.page_source
# 将源码数据赋值给HtmlResponse(url,body,request,encoding)的body参数
name = "dig" # 通过此名称启动爬虫命令 allowed_domains = ["chouti.com"] start_urls = ['http://dig.chouti.com/', ] has_request_set = {} def parse(self, response): hxs = HtmlXPathSelector(response) # HtmlXpathSelector用于结构化HTML代码并提供选择器功能 page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract() for page in page_list: page_url = 'http://dig.chouti.com%s' % page key = self.md5(page_url) if key in self.has_request_set: pass else: self.has_request_set[key] = page_url obj = Request(url=page_url, method='GET', callback=self.parse) yield obj @staticmethod def md5(val): import hashlib ha = hashlib.md5() ha.update(bytes(val, encoding='utf-8')) key = ha.hexdigest() return key
--------->HtmlXPathSelector提供了类似beautifulsoup解析html的功能
具体使用方法:
from scrapy.selector import Selector, HtmlXPathSelector from scrapy.http import HtmlResponse html = """<!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <title></title> </head> <body> <ul> <li class="item-"><a id='i1' href="link.html">first item</a></li> <li class="item-0"><a id='i2' href="link.html">first item</a></li> <li class="item-1"><a href="link2.html">second item<span>vv</span></a></li> </ul> <div><a href="link1.html">second item</a></div> </body> </html> """ # //子子孙孙 /当前孩子 # 先对字符串进行封装,使其成为一个response对象 response = HtmlResponse(url='http://example.com', body=html, encoding='utf-8') # Selector对象 # hxs = HtmlXPathSelector(response) 已被弃用 # hxs = Selector(response).xpath("//a") # 找到所有的a标签 # hxs = Selector(response).xpath("//a[@id]") # 找到所有有id属性的a标签 # hxs = Selector(response).xpath("//a[@id='i1']") # 找到所有有id属性且是i1的a标签 # hxs = Selector(response).xpath('//a[@href="link.html"][@id="i1"]') # 多个属性并列 # hxs = Selector(response=response).xpath('//a[contains(@href, "link")]') # href属性包含link # hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]') # href属性以link开头 # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]') # 正则匹配 以re:test开始 # 使用extract()变成标签 # hxs = Selector(response).xpath("//a/text()")[0].extract() # 取所有a标签的text # hxs = Selector(response).xpath('//a/@href')[0].extract_first() # 取第一个a标签的href # print(hxs)
item_list = response.xpath("//div[@id='content-list']/div[@class='item']") for item in item_list: text = item.xpath('.//a/text()').extract_first() href = item.xpath('.//a/@href').extract_first() print(href, text.strip())
scrapy 爬虫 ***.py
scrapy.Formdata(url=,formdata=,callbock=)
在spider文件夹下的具体爬虫.py
我们声明当下载完一个page会自动执行parse方法,在这个方法下
如果yield的是一个 item对象,会被引擎分配到pipeline.py具体下载
如果是一个request对象,会被引擎分配到scheduler下,继续执行
from ..items import MyItem obj = MyItem(name=name, school=school, url=url) yield obj
class Baidu(scrapy.Spider): 在Spider类下有一个start_request方法,指定了callback 默认parse
from scrapy.http import Request post_dict = { 'phone': '8615131255089', 'password': 'woshiniba', 'oneMonth': 1, }
# 在响应中获取cookies
from scrapy.http.cookies import CookieJar cookie_jar = CookieJar() # 对象,中封装了 cookies cookie_jar.extract_cookies(response, response.request) # 去响应中获取cookies for k, v in cookie_jar._cookies.items(): for i, j in v.items(): for m, n in j.items(): self.cookie_dict[m] = n.value
--------------------------- Request( url="http://dig.chouti.com/login", method='POST', cookies=self.cookie_dict, body=urllib.parse.urlencode(post_dict), headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}, callback=self.parse2,
dont_filter = False # 遵循去重规则 )
import urllib.parse urllib.parse.urlencode(post_dict) phone=8615131255089&password=woshiniba&oneMonth=1
请求传参 在使用scrapy进行数据爬去时,如果解析的数据不是同一张页面 scrapy.Request(url,callback,meta={'item':item} 如何接受item : item = response.meta['item']
scrapy 格式化处理--->items.py
# items.py # 用来格式化数据的,
class MyItem(scrapy.Item): name = scrapy.Field() school = scrapy.Field() url = scrapy.Field()
scrapy 保存--->pipeline.py
# 首先要配置好我们要如何下载的类的路径
custom_settings = { 'ITEM_PIPELINES':{ 'spider1.pipelines.FilePipeline': 100 ,
'spider1.pipelines.DbPipeline':101 } }
# 每行后面的整型值,确定了他们运行的顺序,item按数字从低到高的顺序,通过pipeline,通常将这些数字定义在0-1000范围内。
scrapy 中间件 --->middlewares.py
爬虫中间件
# SPIDER_MIDDLEWARES = { # 'sp1.middlewares.Sp1SpiderMiddleware': 543, # }
class SpiderMiddleware(object): def process_spider_input(self,response, spider): """ 下载完成,执行,然后交给parse处理 """ pass def process_spider_output(self,response, result, spider): """ spider处理完成,返回时调用 :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable) """ return result def process_spider_exception(self,response, exception, spider): """ 异常调用 :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline """ return None def process_start_requests(self,start_requests, spider): """ 爬虫启动时调用 :return: 包含 Request 对象的可迭代对象 """ return start_requests
下载中间件

class BigfilePipeline(object): def process_item(self, item, spider): print(item,spider,"------------------") # 创建一个下载文件的任务 if item['type'] == 'file': agent = Agent(reactor) d = agent.request( method=b'GET', uri=bytes(item['url'], encoding='ascii') ) # 当文件开始下载之后,自动执行 self._cb_bodyready 方法 d.addCallback(self._cb_bodyready, file_name=item['file_name']) return d else: return item def _cb_bodyready(self, txresponse, file_name): # 创建 Deferred 对象,控制直到下载完成后,再关闭链接 d = defer.Deferred() d.addBoth(self.download_result) # 下载完成/异常/错误之后执行的回调函数 txresponse.deliverBody(_ResponseReader(d, txresponse, file_name)) return d def download_result(self, response): pass
# DOWNLOADER_MIDDLEWARES = { # 'sp1.middlewares.Sp1DownloaderMiddleware': 543, # }
class DownMiddleware1(object): def process_request(self, request, spider): """ 请求需要被下载时,经过所有下载器中间件的process_request调用 :return: None,继续后续中间件去下载; Response对象,停止process_request的执行,开始执行process_response Request对象,停止中间件的执行,将Request重新调度器 raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception """
request.headers['User-Agent'] = 'xxx'
request.meta['proxy'] = 'http;//ip:port' pass def process_response(self, request, response, spider): """ spider处理完成,返回时调用 :return: Response 对象:转交给其他中间件process_response Request 对象:停止中间件,request会被重新调度下载 raise IgnoreRequest 异常:调用Request.errback """ print('response1')
from scrapy.http import HtmlResponse
懒加载处理
return HtmlREsponse(url,body,request,encoding='utf8')
return response def process_exception(self, request, exception, spider): """ 当下载处理器(download handler)或 process_request() (下载中间件)抛出异常 :return: None:继续交给后续中间件处理异常; Response对象:停止后续process_exception方法 Request对象:停止中间件,request将会被重新调用下载 """ return None

# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals from time import sleep from scrapy.http import HtmlResponse class WangyinewsproDownloaderMiddleware(object): def process_request(self, request, spider): return None def process_response(self, request, response, spider): #指定拦截的响应 if request.url in spider.news_urls: #处理响应对象: url = request.url bro = spider.bro #获取了在爬虫文件中创建好的浏览器对象 bro.get(url=url) sleep(2) js = 'window.scrollTo(0,document.body.scrollHeight)' bro.execute_script(js) sleep(1) bro.execute_script(js) sleep(1) bro.execute_script(js) sleep(1) #我们需求中需要的数据源(携带了动态加载出来新闻数据的页面源码数据) page_text = bro.page_source #创建一个新的响应对象并且将上述获取的数据源加载到该响应对象中,然后响应对象返回 return HtmlResponse(url=bro.current_url,body=page_text,encoding='utf-8',request=request) return response
scrapy 自定制命令
- 注意 ROBOTSTXT_OBEY = False
- 在spiders同级目录创建任意目录 如:commands
- 在其中创建crawlall.py(此文件名就是自定义的命令)
-
from scrapy.commands import ScrapyCommand from scrapy.utils.project import get_project_settings class Command(ScrapyCommand): requires_project = True def syntax(self): return '[options]' def short_desc(self): return 'Runs all of the spiders' def run(self, args, opts): spider_list = self.crawler_process.spiders.list() for name in spider_list: self.crawler_process.crawl(name, **opts.__dict__) self.crawler_process.start()
在setting.py中配置COMMANDS_MODULE="项目名称.目录名称"
- 在项目目录中执行命令scrapy crawlall
scrapy 自定义扩展 ---内置信号
利用信号进行操作
from scrapy import signals class MyExtension(object): def __init__(self, value): self.value = value @classmethod def from_crawler(cls, crawler): val = crawler.settings.getint('MMMM') ext = cls(val) crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened) crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed) return ext def spider_opened(self, spider): print('open') def spider_closed(self, spider): print('close')
scrapy scheduler--->去重
scrapy默认使用 scrapy.dupefilter.RFPDupeFilter 进行去重,相关配置有:
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter' DUPEFILTER_DEBUG = False JOBDIR = "保存范文记录的日志路径,如:/root/" # 最终路径为 /root/requests.seen
class RepeatUrl: def __init__(self): self.visited_url = set() @classmethod def from_settings(cls, settings): # 初始化时,调用 return cls() def request_seen(self, request): # 检测当前请求是否已经被访问过 True表示已经访问过 if request.url in self.visited_url: return True self.visited_url.add(request.url) return False def open(self): #开始爬去请求时,调用 print('open replication') def close(self, reason): # 结束爬虫爬取时,调用 print('close replication') def log(self, request, spider): # 记录日志 print('repeat', request.url) 自定义URL去重操作
scrapy setting --->简述
每个爬虫自定义配置
# 1. 爬虫名称 BOT_NAME = 'step8_king' # 2. 爬虫应用路径 SPIDER_MODULES = ['step8_king.spiders'] NEWSPIDER_MODULE = 'step8_king.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent # 3. 客户端 user-agent请求头 # USER_AGENT = 'step8_king (+http://www.yourdomain.com)' # Obey robots.txt rules # 4. 禁止爬虫配置 # ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) # 5. 并发请求数 # CONCURRENT_REQUESTS = 4 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs # 6. 延迟下载秒数 # DOWNLOAD_DELAY = 2 # The download delay setting will honor only one of: # 7. 单域名访问并发数,并且延迟下次秒数也应用在每个域名 # CONCURRENT_REQUESTS_PER_DOMAIN = 2 # 单IP访问并发数,如果有值则忽略:CONCURRENT_REQUESTS_PER_DOMAIN,并且延迟下次秒数也应用在每个IP # CONCURRENT_REQUESTS_PER_IP = 3 # Disable cookies (enabled by default) # 8. 是否支持cookie,cookiejar进行操作cookie # COOKIES_ENABLED = True # COOKIES_DEBUG = True # Disable Telnet Console (enabled by default) # 9. Telnet用于查看当前爬虫的信息,操作爬虫等... # 使用telnet ip port ,然后通过命令操作 # TELNETCONSOLE_ENABLED = True # TELNETCONSOLE_HOST = '127.0.0.1' # TELNETCONSOLE_PORT = [6023,] # 10. 默认请求头 # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html # 11. 定义pipeline处理请求 # ITEM_PIPELINES = { # 'step8_king.pipelines.JsonPipeline': 700, # 'step8_king.pipelines.FilePipeline': 500, # } # 12. 自定义扩展,基于信号进行调用 # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html # EXTENSIONS = { # # 'step8_king.extensions.MyExtension': 500, # } # 13. 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度 # DEPTH_LIMIT = 3 # response.meta.get("depth",0) # 14. 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo # 后进先出,深度优先 # DEPTH_PRIORITY = 0 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue' # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue' # 先进先出,广度优先 # DEPTH_PRIORITY = 1 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue' # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' # 15. 调度器队列 # SCHEDULER = 'scrapy.core.scheduler.Scheduler' # from scrapy.core.scheduler import Scheduler # 16. 访问URL去重 # DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl' # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html """ 17. 自动限速算法 from scrapy.contrib.throttle import AutoThrottle 自动限速设置 1. 获取最小延迟 DOWNLOAD_DELAY 2. 获取最大延迟 AUTOTHROTTLE_MAX_DELAY 3. 设置初始下载延迟 AUTOTHROTTLE_START_DELAY 4. 当请求下载完成后,获取其"连接"时间 latency,即:请求连接到接受到响应头之间的时间 5. 用于计算的... AUTOTHROTTLE_TARGET_CONCURRENCY target_delay = latency / self.target_concurrency new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延迟时间 new_delay = max(target_delay, new_delay) new_delay = min(max(self.mindelay, new_delay), self.maxdelay) slot.delay = new_delay """ # 开始自动限速 # AUTOTHROTTLE_ENABLED = True # The initial download delay # 初始下载延迟 # AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies # 最大下载延迟 # AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests Scrapy should be sending in parallel to each remote server # 平均每秒并发数 # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: # 是否显示 # AUTOTHROTTLE_DEBUG = True # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# 日志等级
LOG_LEVEL = 'ERROR'
LOG_FILE = './log.txt'
""" 18. 启用缓存 目的用于将已经发送的请求或相应缓存下来,以便以后使用 from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware from scrapy.extensions.httpcache import DummyPolicy from scrapy.extensions.httpcache import FilesystemCacheStorage """ # 是否启用缓存策略 # HTTPCACHE_ENABLED = True # 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy" # 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy" # 缓存超时时间 # HTTPCACHE_EXPIRATION_SECS = 0 # 缓存保存路径 # HTTPCACHE_DIR = 'httpcache' # 缓存忽略的Http状态码 # HTTPCACHE_IGNORE_HTTP_CODES = [] # 缓存存储的插件 # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' """ 19. 代理,需要在环境变量中设置 from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware 方式一:使用默认 os.environ { http_proxy:http://root:woshiniba@192.168.11.11:9999/ https_proxy:http://192.168.11.11:9999/ } 方式二:使用自定义下载中间件 def to_bytes(text, encoding=None, errors='strict'): if isinstance(text, bytes): return text if not isinstance(text, six.string_types): raise TypeError('to_bytes must receive a unicode, str or bytes ' 'object, got %s' % type(text).__name__) if encoding is None: encoding = 'utf-8' return text.encode(encoding, errors) class ProxyMiddleware(object): def process_request(self, request, spider): PROXIES = [ {'ip_port': '111.11.228.75:80', 'user_pass': ''}, {'ip_port': '120.198.243.22:80', 'user_pass': ''}, {'ip_port': '111.8.60.9:8123', 'user_pass': ''}, {'ip_port': '101.71.27.120:80', 'user_pass': ''}, {'ip_port': '122.96.59.104:80', 'user_pass': ''}, {'ip_port': '122.224.249.122:8088', 'user_pass': ''}, ] proxy = random.choice(PROXIES) if proxy['user_pass'] is not None: request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass'])) request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass) print "**************ProxyMiddleware have pass************" + proxy['ip_port'] else: print "**************ProxyMiddleware no pass************" + proxy['ip_port'] request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) DOWNLOADER_MIDDLEWARES = { 'step8_king.middlewares.ProxyMiddleware': 500, } """ """ 20. Https访问 Https访问时有两种情况: 1. 要爬取网站使用的可信任证书(默认支持) DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory" DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory" 2. 要爬取网站使用的自定义证书 DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory" DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory" # https.py from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate) class MySSLFactory(ScrapyClientContextFactory): def getCertificateOptions(self): from OpenSSL import crypto v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read()) v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read()) return CertificateOptions( privateKey=v1, # pKey对象 certificate=v2, # X509对象 verify=False, method=getattr(self, 'method', getattr(self, '_ssl_method', None)) ) 其他: 相关类 scrapy.core.downloader.handlers.http.HttpDownloadHandler scrapy.core.downloader.webclient.ScrapyHTTPClientFactory scrapy.core.downloader.contextfactory.ScrapyClientContextFactory 相关配置 DOWNLOADER_HTTPCLIENTFACTORY DOWNLOADER_CLIENTCONTEXTFACTORY """ """ 21. 爬虫中间件 class SpiderMiddleware(object): def process_spider_input(self,response, spider): ''' 下载完成,执行,然后交给parse处理 :param response: :param spider: :return: ''' pass def process_spider_output(self,response, result, spider): ''' spider处理完成,返回时调用 :param response: :param result: :param spider: :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable) ''' return result def process_spider_exception(self,response, exception, spider): ''' 异常调用 :param response: :param exception: :param spider: :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline ''' return None def process_start_requests(self,start_requests, spider): ''' 爬虫启动时调用 :param start_requests: :param spider: :return: 包含 Request 对象的可迭代对象 ''' return start_requests 内置爬虫中间件: 'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50, 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500, 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700, 'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800, 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900, """ # from scrapy.contrib.spidermiddleware.referer import RefererMiddleware # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html SPIDER_MIDDLEWARES = { # 'step8_king.middlewares.SpiderMiddleware': 543, } """ 22. 下载中间件 class DownMiddleware1(object): def process_request(self, request, spider): ''' 请求需要被下载时,经过所有下载器中间件的process_request调用 :param request: :param spider: :return: None,继续后续中间件去下载; Response对象,停止process_request的执行,开始执行process_response Request对象,停止中间件的执行,将Request重新调度器 raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception ''' pass def process_response(self, request, response, spider): ''' spider处理完成,返回时调用 :param response: :param result: :param spider: :return: Response 对象:转交给其他中间件process_response Request 对象:停止中间件,request会被重新调度下载 raise IgnoreRequest 异常:调用Request.errback ''' print('response1') return response def process_exception(self, request, exception, spider): ''' 当下载处理器(download handler)或 process_request() (下载中间件)抛出异常 :param response: :param exception: :param spider: :return: None:继续交给后续中间件处理异常; Response对象:停止后续process_exception方法 Request对象:停止中间件,request将会被重新调用下载 ''' return None 默认下载中间件 { 'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100, 'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300, 'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350, 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400, 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500, 'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550, 'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580, 'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590, 'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600, 'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700, 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750, 'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830, 'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850, 'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900, } """ # from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'step8_king.middlewares.DownMiddleware1': 100, # 'step8_king.middlewares.DownMiddleware2': 500, # }
暂停爬虫
# pycharm 相当于Kill -9进程,不会接受CTRL +C的信号 # 结束命令ctrl + c scrapy crawl spider lagou -s JOBDIR=job_info/001 # settings也可以写
TinyScrapy

from twisted.web.client import getPage from twisted.internet import reactor from twisted.internet import defer url_list = ['http://www.bing.com', 'http://www.baidu.com', ] def callback(arg): print('回来一个', arg) defer_list = [] for url in url_list: ret = getPage(bytes(url, encoding='utf8')) ret.addCallback(callback) defer_list.append(ret) def stop(arg): print('已经全部现在完毕', arg) reactor.stop() d = defer.DeferredList(defer_list) d.addBoth(stop) reactor.run()

#!/usr/bin/env python # -*- coding:utf-8 -*- from twisted.web.client import getPage from twisted.internet import reactor from twisted.internet import defer @defer.inlineCallbacks def task(url): ret = getPage(bytes(url, encoding='utf8')) ret.addCallback(callback) yield ret def callback(arg): print('回来一个', arg) url_list = ['http://www.bing.com', 'http://www.baidu.com', ] defer_list = [] for url in url_list: ret = task(url) defer_list.append(ret) def stop(arg): print('已经全部现在完毕', arg) reactor.stop() d = defer.DeferredList(defer_list) d.addBoth(stop) reactor.run()

#!/usr/bin/env python # -*- coding:utf-8 -*- from twisted.internet import defer from twisted.web.client import getPage from twisted.internet import reactor import threading def _next_request(): _next_request_from_scheduler() def _next_request_from_scheduler(): ret = getPage(bytes('http://www.chouti.com', encoding='utf8')) ret.addCallback(callback) ret.addCallback(lambda _: reactor.callLater(0, _next_request)) _closewait = None @defer.inlineCallbacks def engine_start(): global _closewait _closewait = defer.Deferred() yield _closewait @defer.inlineCallbacks def task(url): reactor.callLater(0, _next_request) yield engine_start() counter = 0 def callback(arg): global counter counter +=1 if counter == 10: _closewait.callback(None) print('one', len(arg)) def stop(arg): print('all done', arg) reactor.stop() if __name__ == '__main__': url = 'http://www.cnblogs.com' defer_list = [] deferObj = task(url) defer_list.append(deferObj) v = defer.DeferredList(defer_list) v.addBoth(stop) reactor.run()

#!/usr/bin/env python # -*- coding:utf-8 -*- from twisted.web.client import getPage, defer from twisted.internet import reactor import queue class Response(object): def __init__(self, body, request): self.body = body self.request = request self.url = request.url @property def text(self): return self.body.decode('utf-8') class Request(object): def __init__(self, url, callback=None): self.url = url self.callback = callback class Scheduler(object): def __init__(self, engine): self.q = queue.Queue() self.engine = engine def enqueue_request(self, request): self.q.put(request) def next_request(self): try: req = self.q.get(block=False) except Exception as e: req = None return req def size(self): return self.q.qsize() class ExecutionEngine(object): def __init__(self): self._closewait = None self.running = True self.start_requests = None self.scheduler = Scheduler(self) self.inprogress = set() def check_empty(self, response): if not self.running: self._closewait.callback('......') def _next_request(self): while self.start_requests: try: request = next(self.start_requests) except StopIteration: self.start_requests = None else: self.scheduler.enqueue_request(request) while len(self.inprogress) < 5 and self.scheduler.size() > 0: # 最大并发数为5 request = self.scheduler.next_request() if not request: break self.inprogress.add(request) d = getPage(bytes(request.url, encoding='utf-8')) d.addBoth(self._handle_downloader_output, request) d.addBoth(lambda x, req: self.inprogress.remove(req), request) d.addBoth(lambda x: self._next_request()) if len(self.inprogress) == 0 and self.scheduler.size() == 0: self._closewait.callback(None) def _handle_downloader_output(self, body, request): """ 获取内容,执行回调函数,并且把回调函数中的返回值获取,并添加到队列中 :param response: :param request: :return: """ import types response = Response(body, request) func = request.callback or self.spider.parse gen = func(response) if isinstance(gen, types.GeneratorType): for req in gen: self.scheduler.enqueue_request(req) @defer.inlineCallbacks def start(self): self._closewait = defer.Deferred() yield self._closewait def open_spider(self, spider, start_requests): self.start_requests = start_requests self.spider = spider reactor.callLater(0, self._next_request) class Crawler(object): def __init__(self, spidercls): self.spidercls = spidercls self.spider = None self.engine = None @defer.inlineCallbacks def crawl(self): self.engine = ExecutionEngine() self.spider = self.spidercls() start_requests = iter(self.spider.start_requests()) start_requests = iter(start_requests) self.engine.open_spider(self.spider, start_requests) yield self.engine.start() class CrawlerProcess(object): def __init__(self): self._active = set() self.crawlers = set() def crawl(self, spidercls, *args, **kwargs): crawler = Crawler(spidercls) self.crawlers.add(crawler) d = crawler.crawl(*args, **kwargs) self._active.add(d) return d def start(self): dl = defer.DeferredList(self._active) dl.addBoth(self._stop_reactor) reactor.run() def _stop_reactor(self, _=None): reactor.stop() class Spider(object): def start_requests(self): for url in self.start_urls: yield Request(url) class ChoutiSpider(Spider): name = "chouti" start_urls = [ 'http://dig.chouti.com/', ] def parse(self, response): print(response.text) class CnblogsSpider(Spider): name = "cnblogs" start_urls = [ 'http://www.cnblogs.com/', ] def parse(self, response): print(response.text) if __name__ == '__main__': spider_cls_list = [ChoutiSpider, CnblogsSpider] crawler_process = CrawlerProcess() for spider_cls in spider_cls_list: crawler_process.crawl(spider_cls) crawler_process.start()

#!/usr/bin/env python # -*- coding:utf-8 -*- import types from twisted.internet import defer from twisted.web.client import getPage from twisted.internet import reactor class Request(object): def __init__(self, url, callback): self.url = url self.callback = callback self.priority = 0 class HttpResponse(object): def __init__(self, content, request): self.content = content self.request = request class ChouTiSpider(object): def start_requests(self): url_list = ['http://www.cnblogs.com/', 'http://www.bing.com'] for url in url_list: yield Request(url=url, callback=self.parse) def parse(self, response): print(response.request.url) # yield Request(url="http://www.baidu.com", callback=self.parse) from queue import Queue Q = Queue() class CallLaterOnce(object): def __init__(self, func, *a, **kw): self._func = func self._a = a self._kw = kw self._call = None def schedule(self, delay=0): if self._call is None: self._call = reactor.callLater(delay, self) def cancel(self): if self._call: self._call.cancel() def __call__(self): self._call = None return self._func(*self._a, **self._kw) class Engine(object): def __init__(self): self.nextcall = None self.crawlling = [] self.max = 5 self._closewait = None def get_response(self,content, request): response = HttpResponse(content, request) gen = request.callback(response) if isinstance(gen, types.GeneratorType): for req in gen: req.priority = request.priority + 1 Q.put(req) def rm_crawlling(self,response,d): self.crawlling.remove(d) def _next_request(self,spider): if Q.qsize() == 0 and len(self.crawlling) == 0: self._closewait.callback(None) if len(self.crawlling) >= 5: return while len(self.crawlling) < 5: try: req = Q.get(block=False) except Exception as e: req = None if not req: return d = getPage(req.url.encode('utf-8')) self.crawlling.append(d) d.addCallback(self.get_response, req) d.addCallback(self.rm_crawlling,d) d.addCallback(lambda _: self.nextcall.schedule()) @defer.inlineCallbacks def crawl(self): spider = ChouTiSpider() start_requests = iter(spider.start_requests()) flag = True while flag: try: req = next(start_requests) Q.put(req) except StopIteration as e: flag = False self.nextcall = CallLaterOnce(self._next_request,spider) self.nextcall.schedule() self._closewait = defer.Deferred() yield self._closewait @defer.inlineCallbacks def pp(self): yield self.crawl() _active = set() obj = Engine() d = obj.crawl() _active.add(d) li = defer.DeferredList(_active) li.addBoth(lambda _,*a,**kw: reactor.stop()) reactor.run()
更多文档参见:http://scrapy-chs.readthedocs.io/zh_CN/latest/index.html