当前位置: 移动技术网 > IT编程>脚本编程>Python > Scrapy爬取豆瓣图书数据并写入MySQL

Scrapy爬取豆瓣图书数据并写入MySQL

2019年11月11日  | 移动技术网IT编程  | 我要评论

赶尸录 湘西1954,胖毛,上下一体打一字

项目地址 bookspider

介绍


本篇涉及的内容主要是获取分类下的所有图书数据,并写入mysql

准备


python3.6、scrapy、twisted、mysqldb等

演示


1

代码


一、创建项目
scrapy startproject bookspider  #创建项目
scrapy genspider douban book.douban.com #创建豆瓣爬虫
二、创建测试类(main.py)
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'douban'])
三、修改配置(spiders/settings.py)
user_agent = 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) chrome/76.0.3809.100 safari/537.36' #浏览器
robotstxt_obey = false #不遵循豆瓣网站的爬虫协议
四、设置爬取的分类(spiders/douban.py)
start_urls = ['https://book.douban.com/tag/神经网络'] # 只测试爬取神经网络
五、获取分类列表页图书数据

2

from scrapy.http import request
from urllib.parse import urljoin

def parse(self, response):
    
    get_nodes = response.xpath('//div[@id="subject_list"]/ul/li/div[@class="pic"]/a')
    for node in get_nodes:
        url = node.xpath("@href").get()
        img_url = node.xpath('img/@src').get()
        yield request(url=url, meta={"img_url": img_url}, callback=self.parse_book)  # 传递img_url值 放在meta里面, parse_book回调函数,获取的详情再分析
        
    next_url = response.xpath('//div[@class="paginator"]/span[@class="next"]/a/@href').get() # 获取下一页地址
    if(next_url):
        yield request(url=urljoin(response.url, next_url), callback=self.parse) # 获取下一页内容 
六、定义数据模型(spiders/items.py)
class bookspideritem(scrapy.item):
    # define the fields for your item here like:
    # name = scrapy.field()
    name = scrapy.field()
    author = scrapy.field()
    publish = scrapy.field()
    page_num = scrapy.field()
    isbm = scrapy.field()
    binding = scrapy.field()
    publish_date = scrapy.field()
    price = scrapy.field()
    rate = scrapy.field()
    img_url = scrapy.field()
    image_path = scrapy.field()
七、获取图书详情数据

3

import re
from bookspider.items import bookspideritem

def parse_book(self, response):
    bookitem = bookspideritem()
    bookitem['name']     = response.xpath('//span[@property="v:itemreviewed"]/text()').get("").strip()
    bookitem['author']   = response.xpath('//span[contains(text(), "作者")]/following-sibling::a[1]/text()').get("").split()[-1]
    bookitem['publish']  = response.xpath('//span[contains(text(), "出版社")]/following-sibling::text()').get("").strip()
    
    page_num = response.xpath('//span[contains(text(), "页数")]/following-sibling::text()').get("").strip()
    bookitem['page_num'] = 0 if(page_num == '') else page_num

    bookitem['isbm']     = response.xpath('//span[contains(text(), "isbn")]/following-sibling::text()').get("").strip()
    bookitem['binding']  = response.xpath('//span[contains(text(), "装帧")]/following-sibling::text()').get("").strip()
    bookitem['publish_date'] = response.xpath('//span[contains(text(), "出版年")]/following-sibling::text()').get("").strip()

    price    = response.xpath('//span[contains(text(), "定价")]/following-sibling::text()').get("").strip()
    bookitem['price']    = '' if(len(price) == 0) else re.findall(r'\d+\.?\d*', price)[0]

    bookitem['rate'] = response.xpath('//div[contains(@class, "rating_self ")]/strong/text()').get("").strip()

    bookitem['img_url'] = [response.meta.get('img_url')]  #图片是列表
    
    yield bookitem
八、下载图片

1、创建images文件加
2、配置spiders/settings.py

item_pipelines = {
    'bookspider.pipelines.imagestorepipeline': 1,  #后面的数据是优先级
}
images_urls_field = "image_url"
images_store = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'images')

3、创建imagestorepipeline类(spiders/pipelines.py)

from scrapy.pipelines.images import imagespipeline
from scrapy.exceptions import dropitem
from scrapy.http import request

class imagestorepipeline(imagespipeline):

    default_headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-cn,zh;q=0.9',
        'user-agent': 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) chrome/76.0.3809.100 safari/537.36', #这个一定要
    }

    def get_media_requests(self, item, info):
        for image_url in item['img_url']:
            self.default_headers['referer'] = image_url
            yield request(image_url, headers=self.default_headers)

    def item_completed(self, results, item, info):
        image_path = [x['path'] for ok, x in results if ok]
        if not image_path:
            raise dropitem("item contains no images")
        item['image_path'] = image_path
        return item
八、写入数据库

1、配置spiders/settings.py

#设置数据库
mysql_host   = ""
mysql_dbname = ""
mysql_user   = ""
mysql_password = ""
item_pipelines = {
    'bookspider.pipelines.imagestorepipeline': 1,
    'bookspider.pipelines.mysqltwistedpipeline': 30,  
}

2、创建mysqltwistedpipeline类(spiders/pipelines.py)

import mysqldb.cursors
from twisted.enterprise import adbapi

class mysqltwistedpipeline(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod #静态方法,会优先执行from_settings, 这样self.dbpool就有值了
    def from_settings(cls, settings):

        dbpool = adbapi.connectionpool("mysqldb", host=settings['mysql_host'], db = settings['mysql_dbname'], user = settings['mysql_user'], passwd = settings['mysql_password'], charset = 'utf8', cursorclass = mysqldb.cursors.dictcursor, use_unicode = true)

        return cls(dbpool)

    def process_item(self, item, spider):
        query = self.dbpool.runinteraction(self.do_insert, item)
        query.adderrback(self.handle_error,item,spider)
    
    def do_insert(self, cursor, item):
        insert_sql = """
            insert into douban(name, author, publish, page_num, isbm, binding, publish_date, price, rate, img_url, image_path)
            values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """

        cursor.execute(insert_sql, (item['name'], item['author'], item['publish'], item['page_num'], item['isbm'], item['binding'], item['publish_date'], item['price'], item['rate'], item['img_url'], item['image_path']))

    def handle_error(self, failure, item, spider):
        print(failure)
九、测试

1、执行main.py文件

如对本文有疑问,请在下面进行留言讨论,广大热心网友会与你互动!! 点击进行留言回复

相关文章:

验证码:
移动技术网