当前位置: 移动技术网 > IT编程>脚本编程>Python > 爬虫 Scrapy框架 爬取图虫图片并下载

爬虫 Scrapy框架 爬取图虫图片并下载

2018年08月27日  | 移动技术网IT编程  | 我要评论

哪种孕妇奶粉比较好,narutom,燕赵教育网

items.py,根据需求确定自己的数据要求

 1 # -*- coding: utf-8 -*-
 2 
 3 # define here the models for your scraped items
 4 #
 5 # see documentation in:
 6 # https://doc.scrapy.org/en/latest/topics/items.html
 7 
 8 import scrapy
 9 
10 
11 class todayscrapyitem(scrapy.item):
12     # define the fields for your item here like:
13     # name = scrapy.field()
14     pass
15 
16 
17 class tuchongitem(scrapy.item):
18     title = scrapy.field() #图片名字
19     views = scrapy.field() #浏览人数
20     favorites = scrapy.field()#点赞人数
21     img_url = scrapy.field()#图片地址
22 
23     # def get_insert_sql(self):
24     #     # 存储时候用的sql语句
25     #     sql = 'insert into tuchong(title,views,favorites,img_url)' \
26     #           ' values (%s, %s, %s, %s)'
27     #     # 存储的数据
28     #     data = (self['title'], self['views'], self['favorites'], self['img_url'])
29     #     return (sql, data)

setting.py 设置headers和items

# -*- coding: utf-8 -*-

# scrapy settings for today_scrapy project
#
# for simplicity, this file contains only settings considered important or
# commonly used. you can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

bot_name = 'today_scrapy'

spider_modules = ['today_scrapy.spiders']
newspider_module = 'today_scrapy.spiders'


# crawl responsibly by identifying yourself (and your website) on the user-agent
#user_agent = 'today_scrapy (+http://www.yourdomain.com)'

# obey robots.txt rules
robotstxt_obey = false

# configure maximum concurrent requests performed by scrapy (default: 16)
#concurrent_requests = 32

# configure a delay for requests for the same website (default: 0)
# see https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# see also autothrottle settings and docs
#download_delay = 3
# the download delay setting will honor only one of:
#concurrent_requests_per_domain = 16
#concurrent_requests_per_ip = 16

# disable cookies (enabled by default)
#cookies_enabled = false

# disable telnet console (enabled by default)
#telnetconsole_enabled = false

# override the default request headers:
default_request_headers = {
  'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'accept-language': 'en',
  'user-agnet':'mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/68.0.3440.106 safari/537.36'
}

# enable or disable spider middlewares
# see https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#spider_middlewares = {
#    'today_scrapy.middlewares.todayscrapyspidermiddleware': 543,
#}

# enable or disable downloader middlewares
# see https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#downloader_middlewares = {
#    'today_scrapy.middlewares.todayscrapydownloadermiddleware': 543,
#}

# enable or disable extensions
# see https://doc.scrapy.org/en/latest/topics/extensions.html
#extensions = {
#    'scrapy.extensions.telnet.telnetconsole': none,
#}

# configure item pipelines
# see https://doc.scrapy.org/en/latest/topics/item-pipeline.html
item_pipelines = {
   # 'today_scrapy.pipelines.todayscrapypipeline': 300,
    'today_scrapy.pipelines.tuchongpipeline': 200,

}

# enable and configure the autothrottle extension (disabled by default)
# see https://doc.scrapy.org/en/latest/topics/autothrottle.html
#autothrottle_enabled = true
# the initial download delay
#autothrottle_start_delay = 5
# the maximum download delay to be set in case of high latencies
#autothrottle_max_delay = 60
# the average number of requests scrapy should be sending in parallel to
# each remote server
#autothrottle_target_concurrency = 1.0
# enable showing throttling stats for every response received:
#autothrottle_debug = false

# enable and configure http caching (disabled by default)
# see https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#httpcache_enabled = true
#httpcache_expiration_secs = 0
#httpcache_dir = 'httpcache'
#httpcache_ignore_http_codes = []
#httpcache_storage = 'scrapy.extensions.httpcache.filesystemcachestorage'

pipelines.py 将图片下载到指定文件夹

 1 # -*- coding: utf-8 -*-
 2 
 3 # define your item pipelines here
 4 #
 5 # don't forget to add your pipeline to the item_pipelines setting
 6 # see: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
 7 import os
 8 import requests
 9 
10 class todayscrapypipeline(object):
11     def process_item(self, item, spider):
12         return item
13 
14 class tuchongpipeline(object):
15     def process_item(self, item, spider):
16         img_url = item['img_url'] #从items中得到图片url地址
17         img_title= item['title'] #得到图片的名字
18         headers = {
19             'user-agnet': 'mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/68.0.3440.106 safari/537.36',
20             'cookie':'webp_enabled=1; bad_ide7dfc0b0-b3b6-11e7-b58e-df773034efe4=78baed41-a870-11e8-b7fd-370d61367b46; _ga=ga1.2.1188216139.1535263387; _gid=ga1.2.1476686092.1535263387; phpsessid=4k7pb6hmkml8tjsbg0knii25n6'
21         }
22         if not os.path.exists(img_title):
23             os.mkdir(img_title)
24         filename =img_url.split('/')[-1]
25         with open(img_title+'/'+filename, 'wb+') as f:
26             f.write(requests.get(img_url, headers=headers).content)
27         f.close()
28         return item

爬虫文件

tuchong.py

图片的url可以直接拼接

 1 # -*- coding: utf-8 -*-
 2 import scrapy
 3 import json
 4 from today_scrapy.items import tuchongitem
 5 
 6 
 7 class tuchongspider(scrapy.spider):
 8     name = 'tuchong'
 9     allowed_domains = ['tuchong.com']
10     start_urls = ['http://tuchong.com/']
11 
12     def start_requests(self):
13         for pag in range(1, 20):
14             referer_url = 'https://tuchong.com/rest/tags/自然/posts?page={}&count=20'.format(pag)   # url中红字部分可以换
15             form_req = scrapy.request(url=referer_url, callback=self.parse)
16             form_req.headers['referer'] = referer_url
17             yield form_req
18 
19     def parse(self, response):
20         tuchong_info_html = json.loads(response.text)
21         # print(tuchong_info_html)
22         postlist_c = len(tuchong_info_html['postlist'])
23         # print(postlist_c)
24         for c in range(postlist_c):
25             print(c)
26             # print(tuchong_info_html['postlist'][c])
27             title = tuchong_info_html['postlist'][c]['title']
28             print('图集名称:'+title)
29             views = tuchong_info_html['postlist'][c]['views']
30             print('有'+str(views)+'人浏览')
31             favorites = tuchong_info_html['postlist'][c]['favorites']
32             print('喜欢的人数:'+str(favorites))
33             images_c = len(tuchong_info_html['postlist'][c]['images'])
34             for img_c in range(images_c):
35                 user_id = tuchong_info_html['postlist'][c]['images'][img_c]['user_id']
36                 img_id = tuchong_info_html['postlist'][c]['images'][img_c]['img_id']
37                 img_url = 'https://photo.tuchong.com/{}/f/{}.jpg'.format(user_id,img_id)
38                 item = tuchongitem()
39                 item['title'] = title
40                 item['img_url'] = img_url
41             # 返回我们的item
42                 yield item

 

如对本文有疑问,请在下面进行留言讨论,广大热心网友会与你互动!! 点击进行留言回复

相关文章:

验证码:
移动技术网