scrapy 博客爬取

item.py

import scrapy


class FulongpjtItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field()
    url = scrapy.Field()
    hits = scrapy.Field()
    comment = scrapy.Field()

 

pipeline.py

import pymysql
from pymysql import connections
class FulongpjtPipeline(object):
    def __init__(self):
        self.conn = pymysql.connect(host='127.0.0.1', user='root', passwd='123456', db='mydb')
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        for j in range(0,len(item['name'])):
            name = item['name'][j]
            url = item['url'][j]
            hits = item['hits'][j]
            comment = item['comment'][j]
            sql = "insert into boke(name,url,hits,comment) VALUES(%s,%s,%s,%s)"
            self.cursor.execute(sql,(name,url,hits,comment,))
            self.conn.commit()
        return item
    def close_spider(self,spider):
        self.conn.close()

spd.py

import scrapy
from Fulongpjt.items import FulongpjtItem
from scrapy.http import Request
import re
import urllib.request
class MyspdSpider(scrapy.Spider):
    name = "myspd"
    allowed_domains = ["hexun.com"]
    start_urls = ['http://hexun.com/']
    uid = '19940007'
    def start_requests(self):
        yield Request('http://'+str(self.uid)+'.blog.hexun.com/p1/default.html',headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE'})

    def parse(self, response):
        item = FulongpjtItem()
        item['name'] = response.xpath('//span[@class="ArticleTitleText"]/a/text()').extract()
        item['url'] = response.xpath('//span[@class="ArticleTitleText"]/a/@href').extract()
        part1 = '''<script type="text/javascript" src="(http://click.tool.hexun.com/.*?)">'''
        hcurl = re.compile(part1).findall(str(response.body))[0]
        headers2 = ('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE')
        opener =urllib.request.build_opener()
        opener.addheaders =[headers2]
        urllib.request.install_opener(opener)
        data = urllib.request.urlopen(hcurl).read()
        part2 = "click\d*?','(\d*?)'"
        part3 = "comment\d*?','(\d*?)'"
        item['hits'] = re.compile(part2).findall(str(data))
        item['comment'] = re.compile(part3).findall(str(data))
        yield item
        # 提取文章总页数
        part4 = 'blog.hexun.com/p(.*?)/'
        data2=re.compile(part4).findall(str(response.body))
        if len(data2)>2:
            totalurl = data2[-2]
        else:
            totalurl = 1
        for i in range(2,int(totalurl)+1):
            next_url = 'http://'+str(self.uid)+'.blog.hexun.com/p'+str(i)+'/default.html'
            yield Request(next_url,callback=self.parse,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE'})

 

posted @ 2017-05-11 15:13  Erick-LONG  阅读(222)  评论(0编辑  收藏  举报