scrapy爬取全书网小说并保存mysql

1.pycharm远程连接服务器 ssh、deployment、remote host设置

2.服务器命令 项目路径 scrapy startproject qsw scrapy genspider xs qunshu.92kaifa.com

3.pycharm 导入相关文件

4.编写spider

-*- coding: utf-8 -*-
import scrapy
from ..items import QswItem,Contentitem
from datetime import datetime
class XsSpider(scrapy.Spider):
    name = 'xs'
    allowed_domains = ['quanshu.92kaifa.com']
    #初始url
    start_urls = ['http://quanshu.92kaifa.com/list/2_0.html']
    #提取所有书籍的url
    def parse(self, response):
        url=response.xpath('//ul[@class="seeWell cf"]/li/a/@href').extract()
        # print(url)
        # url1=s_url+str(url)
        d_url = 'http://quanshu.92kaifa.com'
        for u in url[:1]:#这里只爬了一个url
            d_u  = d_url+u
            # 对书籍url进行请求
            yield scrapy.Request(url=d_u,callback=self.parse_connet)
    #请求书籍页,提取相关数据
    def parse_connet(self,response):
        item = QswItem()
        item['title'] =response.xpath('//div[@class="detail"]/div/h1/text()').extract_first()
        item['author'] =response.xpath('//div[@class="bookDetail"]/dl[3]/dd/a/text()').extract_first()
        item['hits'] =response.xpath('//div[@class="bookDetail"]/dl[2]/dd/text()').extract_first()
        item['state'] =response.xpath('//div[@class="bookDetail"]/dl[1]/dd/text()').extract_first()
        item['introd'] =response.xpath('//div[@id="waa"]/text()').extract_first()
        item['url'] =response.url
        item['c_time'] = datetime.now()
        # print(item)
        #对章节所在页的url进行请求
        e_url = 'http://quanshu.92kaifa.com'
        zj_url = e_url+response.xpath('//div[@class="b-oper"]/a[1]/@href').extract_first()
        return scrapy.Request(url=zj_url,callback=self.parse_info,meta={'info':item})
    #提取章节信息
    def parse_info(self,response):
        r_url = 'http://quanshu.92kaifa.com'
        info = response.meta.get('info')
        zjinfo = response.xpath('//div[@class="clearfix dirconone"]/li/a')
        infos = [(a.xpath('./text()').extract_first(),(r_url+a.xpath('./@href').extract_first()))for a in zjinfo]
        info['res_info'] = infos
        # print ( info )
        yield info
        #小说内容
        sql = 'select id,url from contents where content is NULL '  #结果为元组
        self.cursor.execute(sql)
        for i in self.cursor.fetchall():
            # 对章节url进行请求
            yield scrapy.Request(i[1],callback=self.parse_content_info,meta={'id':i[0]})
#提取详细内容
    def parse_content_info(self,response):
        item = Contentitem()
        print(''*50)
        item['content'] = ''.join(response.xpath('//div[@id="content"]/text()').extract()).strip()
        item['c_id'] = response.meta['id']
        item['c_url'] = response.url

 


5.item文件

import scrapy
class QswItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title= scrapy.Field()
    author= scrapy.Field()
    hits= scrapy.Field()
    state= scrapy.Field()
    introd= scrapy.Field()
    url= scrapy.Field()
    res_info = scrapy.Field()
    c_time = scrapy.Field()
#小说内容存储管道
class Contentitem(scrapy.Item):
    content = scrapy.Field()
    c_id = scrapy.Field()
    c_url = scrapy.Field()

 

6.pipelines

from itemadapter import ItemAdapter
import pymysql
from scrapy.exceptions import DropItem
from .items import *
import logging 
logger = logging
class QswPipeline:
    def open_spider(self,spider):
    #连接settings,MySQL字段
        data_config= spider.settings['DATA_CONFIG']
        #判断数据库类型MySQL
        if data_config['type'] == 'mysql':
            self.conn = pymysql.connect(**data_config['config'])#建立连接
            self.cursor =self.conn.cursor()#游标
            spider.conn = self.conn
            spider.cursor = self.cursor
    #存放数据
    def process_item(self,item,spider):
        print('*'*50)
        #判断管道
        if isinstance(item,QswItem):
            #查找id在indo表中
            sql ='select id from indo where title=%s and author=%s'
            #执行sql语句
            self.cursor.execute(sql,(item['title'],item['author']))
            #判断是否有id
            if self.cursor.fetchone():
                print('123456789')
            else:
                try:
                #添加字段
                    sql='insert into indo(title,author,hits,state,introd,url,c_time) VALUE (%s,%s,%s,%s,%s,%s,%s)'
                    self.cursor.execute(sql,(
                        item['title'],
                        item['author'],
                        item['hits'],
                        item['state'],
                        item['introd'],
                        item['url'],
                        item['c_time'],
                    ))
                    self.conn.commit()
                    # 章节信息写入
                    indo_id = self.cursor.lastrowid      #关联外键
                    sql = 'insert into contents (indo_id,title,order1,c_time,url) VALUES '
                    for index,infos in enumerate(item['res_info']):
​
                        title,url = infos
​
                        print(title)
                        temp = '(%s,"%s",%s,"%s","%s"),' % (indo_id,title.replace('"',''),item['hits'],item['c_time'],url)
                        sql+=temp      #sql语语句拼接
                    sql= sql[:-1]
                    try:
                        self.cursor.execute(sql)
                        self.conn.commit()
                    except Exception as e:
                        self.conn.rollback()
                        logger.warning('章节信息错误%s-%s'%(item['url'],e))
​
                except Exception as e:
                    self.conn.rollback()
                    logger.warning('信息写入错误%s-%s'%(item['url'],e))
        elif isinstance(item,Contentitem):
            print ( '-' * 50 )
            sql ='update contents set content=%s where id = %s'
            try:
                self.cursor.execute(sql,(item['content'],item['c_id']))
                self.conn.commit()
            except Exception as e:
                self.conn.rollback()
                logger.warning('内容写入错误%s-%s' %(item['url'],e))
​
        else:
            print ( '0' * 50 )
            raise DropItem
    def close_spider(self,spider):
        data_config = spider.settings['DATA_CONFIG']
        if data_config['type'] == 'mysql':
            self.conn.close()
            self.cursor.close()

 

7.settings

ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {
   'qsw.middlewares.QswDownloaderMiddleware': 543,
}
​
ITEM_PIPELINES = {
   'qsw.pipelines.QswPipeline': 300,
}
​
DOWNLOAD_DELAY = 1.5
#数据库
DATA_CONFIG = {
    'type':'mysql',
    'config':{
        'host':'192.168.153.128',
        'port':3306,
        'user':'root',
        'password':'123456',
        'db':'ssss',
        'charset':'utf8'
    }
}
​

数据库建表

create database ssss charset=utf8;
CREATE
table indo( id int unsigned PRIMARY KEY auto_increment not NULL , title varchar(64), author varchar(64), hits varchar(20), state varchar(20), introd TEXT, url varchar(128), c_time DATETIME )

 

 

CREATE TABLE contents(
'id' int(10) NOT NULL,
'indo_id' int(10) NOT NULL,
'title' varchar(64) NULL,
'content' mediumtext NULL,
'order1' varchar(20) NULL,
'c_time' datetime NOT NULL,
'c_url' varchar(128) NULL,
PRIMARY KEY ('id'),
FOREIGN KEY ('indo_id') REFERENCES 'ssss'.'indo' ('id')
);

 

posted @ 2021-02-25 13:50  瑾年ぺ  阅读(536)  评论(0)    收藏  举报