我们采用广度优先策略, 我们把类别和商品信息的抓取分开来做.
1. 创建爬虫项目
2. 根据需求, 定义数据数据模型
3. 实现分类爬虫
4. 保存分类信息
5. 实现商品爬虫
6. 保存商品信息
7. 实现随机User-Agent和代理IP下载器中间件, 解决IP反爬.
爬虫数据模型, 我们只能根据需求, 定义一个大概, 随着对项目实现可能会对数据模型做相应的修改.
class Category(scrapy.Item):
"""商品类别"""
# 大分类名称
b_category_name = scrapy.Field()
# 大分类URL
b_category_url = scrapy.Field()
# 中分类名称
m_category_name = scrapy.Field()
# 中分类URL
m_category_url = scrapy.Field()
# 小分类名称
s_category_name = scrapy.Field()
# 小分类URL
s_category_url = scrapy.Field()
class Product(scrapy.Item):
# 商品类别
product_category = scrapy.Field()
# 商品ID
product_sku_id = scrapy.Field()
# 商品名称
product_name = scrapy.Field()
# 商品图片URL
product_img_url = scrapy.Field()
# 商品店铺
product_shop = scrapy.Field()
# 图书信息, 作者,出版社
product_book_info = scrapy.Field()
# 商品选项
product_option = scrapy.Field()
# 商品评论数量
product_comments = scrapy.Field()
# 商品促销
product_ad = scrapy.Field()
# 商品价格
product_price = scrapy.Field()
5.2.1 创建爬虫
5.2.2. 指定起始URL
5.2.3. 解析数据, 交给引擎
# -*- coding: utf-8 -*-
import scrapy
import json
from mall_spider.items import Category
class JdCategorySpider(scrapy.Spider):
name = 'jd_category'
allowed_domains = ['dc.3.cn']
start_urls = ['https://dc.3.cn/category/get']
# 频道URL模板
channel_url_pattern = 'https://channel.jd.com/{}.html'
# 列表URL模板
list_url_pattern = 'https://list.jd.com/list.html?cat={}'
def parse(self, response):
# 把传递过来的信息GBK进行解码, 因为京东的类别信息, 是使用GBK, 编码的
categorys = json.loads(response.body.decode('GBK'))
# 取出"data" 键中分类列表
categorys = categorys['data']
# 遍历分类列表
for category in categorys:
item = Category()
# 获取大分类,包含子分类; 注: 第一层的分类都在在0索引上;
b_category = category['s'][0]
# 获取大分类信息(分类URL,名称)
b_category_info = b_category['n']
# 解析大分类信息, 获取大分类名称和URL
item['b_category_name'], item['b_category_url'] = self.get_category_item(b_category_info)
# 获取中分类列表
m_category_s = b_category['s']
# 遍历第二层分类列表
for m_category in m_category_s:
# 获取中分类信息
m_category_info = m_category['n']
item['m_category_name'], item['m_category_url'] = self.get_category_item(m_category_info)
# 获取小分类列表
s_category_s = m_category['s']
# 遍历小分类分类列表
for s_category in s_category_s:
# 获取第三层分类名称
s_category_info = s_category['n']
# 获取三级分类信息
item['s_category_name'], item['s_category_url'] = self.get_category_item(s_category_info)
# print(item['s_category_name'])
# 把分类信息交给引擎
yield item
def get_category_item(self, category_info):
# 使用 `|` 分割类型信息字符串
categorys = category_info.split('|')
# 类别的名称
category_name = categorys[1]
# 类别的URL
category_url = categorys[0]
# 获取 category_url 中 `-` 个数
count = category_url.count('-')
if category_url.count('jd.com') != 0:
# 其他就是本身就是URL, 前面补一个协议头
category_url = 'https://' + category_url
elif count == 1:
# 如果包含一个 '-' 是二级分类的频道
category_url = self.channel_url_pattern.format(category_url)
else:
# 如果包含2个 '-' 是三级分类的列表
# 1. 把 `-` 替换为 ','
category_url = category_url.replace('-', ',')
# 2. 生成具体列表的URL
category_url = self.list_url_pattern.format(category_url)
return category_name, category_url
from mall_spider.spiders.jd_category import JdCategorySpider
from pymongo import MongoClient
class CategoryPipeline(object):
def open_spider(self, spider):
if isinstance(spider, JdCategorySpider):
# 建立MongoDB数据库链接
self.client = MongoClient(MONGO_URL)
# 获取要操作集合
self.category = self.client['jd']['category']
def process_item(self, item, spider):
if isinstance(spider, JdCategorySpider):
# 把数据插入到mongo中
self.category.insert_one(dict(item))
return item
def close_spider(self, spider):
"""关闭"""
if isinstance(spider, JdCategorySpider):
self.client.close()
# 在settings.py开启, 类别的Pipeline
ITEM_PIPELINES = {
'mall_spider.pipelines.CategoryPipeline': 300,
}
{
"code": "0",
"wareInfo": {
"recommendInfo": {
"recommendList": null
},
// 商品店铺信息
"shopInfo": {
"shop": {
"shopId": 1000000127,
"name": "京东Apple产品专营店",
...
},
"basicInfo": {
"gift": false,
"bookInfo": {
// 如果是书,这里是书的选项信息
"display": false
},
"colorSizeInfo": {
// 商品选项信息列表 有的没有
"colorSize": [{
"buttons": [{
"no": "1",
"skuList": ["100000177738", "100000287117", "100000287145", "100000309448", "100000309450", "100000375233", "100000435832", "100000458753", "100000458755", "100001860767", "100001860773"],
"text": "金色"
}, {
"no": "2",
"skuList": ["100000177764", "100000287113", "100000287135", "100000435780", "100000435816", "100000435818", "100000569049", "100000602206", "100000602208", "100001860765", "100002539302"],
"text": "深空灰色"
}, {
"no": "3",
"skuList": ["100000177740", "100000177784", "100000287147", "100000435834", "100000458737", "100000458739", "100000602174", "100000602176", "100000602204", "100001860789", "100002539304"],
"text": "银色"
}],
"title": "颜色"
}, {
"buttons": [{
"no": "1",
"skuList": ["100000177738", "100000177740", "100000177764", "100000177784", "100000287113", "100000287117", "100000287135", "100000287145", "100000287147"],
"text": "公开版"
},
...
],
"title": "版本"
}, {
"buttons": [{
"no": "1",
"skuList": ["100000177764", "100000287145", "100000287147", "100000375233", "100000435818", "100000458739", "100000458755", "100000602204", "100000602208", "100001860765", "100001860773", "100001860789"],
"text": "64GB"
},
...
],
"title": "内存"
}],
"colorSizeTips": "#与其他已选项无法组成可售商品,请重选"
},
...
// 品牌ID
"brandID": "14026",
...
// 商品图片
"wareImage": [{
"small": "https://m.360buyimg.com/mobilecms/s720x720_jfs/t1/3/15/4536/138660/5b997bf8Ed72ebce7/819dcf182d743897.jpg!q70.jpg.webp",
...
}
...
],
...
// 商品名称
"name": "Apple iPhone XS Max (A2104) 256GB 深空灰色 移动联通电信4G手机 双卡双待",
// 商品类别id
"category": "9987;653;655"
}
}
}
{
...
// 商品促销信息
"ads": [{
"id": "AD_4749506",
"ad": "【即刻预约,21号秒杀到手价2999】
1、前100名晒单送腾讯企鹅影院季卡,联系客服领取!!
2、曲面爆款,5.5万好评推荐!升级55Q1D超清全面屏电视"
}],
...
}
商品评论信息(PC端)
{"CommentsCount":[
{
"CommentCountStr":"10万+",
"CommentCount":100000, //评论数量
"AverageScore":5,
"GoodRate":0.98, //好评率
"PoorCountStr":"600+",
"PoorCount":600, // 差评数量
...
}]}
[
{
"op": "5499.00",
"m": "5999.00",
"id": "J_4749506", //商品skuid
"p": "3299.00" // 商品价格
}
]
# -*- coding: utf-8 -*-
import scrapy
import json
from jsonpath import jsonpath
class JdProductSpider(scrapy.Spider):
name = 'jd_product'
allowed_domains = ['jd.com', 'p.3.cn']
def start_requests(self):
category = { "b_category_name" : "家用电器",
"b_category_url" : "https://jiadian.jd.com",
"m_category_name" : "洗衣机",
"m_category_url" : "https://list.jd.com/list.html?cat=737,794,880",
"s_category_name" : "洗衣机配件",
"s_category_url" : "https://list.jd.com/list.html?cat=737,794,877" }
yield scrapy.Request(category['s_category_url'], self.parse, meta={'category': category})
def parse(self, response):
# 获取类别信息
category = response.meta['category']
# 获取类别的URL
category_url = response.url.split('&')[0]
# 获取所有商品的sku_ids
sku_ids = response.xpath('//p[contains(@class, "j-sku-item")]/@data-sku').extract()
# 遍历sku_ids, 构建基本详情信息的请求
for sku_id in sku_ids:
item = {
'product_category': category,
'product_sku_id':sku_id
}
product_url = 'https://cdnware.m.jd.com/c1/skuDetail/apple/7.3.0/{}.json'.format(sku_id)
yield scrapy.Request(product_url, callback=self.parse_product, meta={'item': item})
# 获取下一页的URL
next_url = response.xpath('//a[@class="pn-next"]/@href').extract_first()
if next_url:
# 补全URL
next_url = response.urljoin(next_url)
# 构建下一页请求
yield scrapy.Request(next_url, callback=self.parse, meta={'category': category})
def parse_product(self, response):
# 取出传递过来的数据
item = response.meta['item']
# 把响应数据数据转为字典
product_dic = json.loads(response.text)
# 获取商品名称
item['product_name'] = product_dic['wareInfo']['basicInfo']['name']
if item['product_name']:
# 获取类别id, 把 `;` 替换为 ,
item['product_category_id'] = product_dic['wareInfo']['basicInfo']['category'].replace(';', ',')
# 获取店铺信息
product_shop = jsonpath(product_dic, '$..shop')
if product_shop:
product_shop = product_shop[0]
if product_shop is None:
item['product_shop'] = {'name':'京东自营'}
else:
item['product_shop'] = {
"shopId": product_shop['shopId'],
"name": product_shop['name'],
"score": product_shop['score'],
"url": product_shop['url'],
}
# 如果是书, 记录书的信息
if product_dic['wareInfo']['basicInfo']['bookInfo']['display']:
item['product_book_info'] = product_dic['wareInfo']['basicInfo']['bookInfo']
# 删除display
del item['book_info']['display']
# 获取商品选购信息
color_sizes = jsonpath(product_dic, '$..colorSize')
product_option = {}
if color_sizes:
for color_size in color_sizes[0]:
title = color_size['title']
texts = jsonpath(color_size, '$..text')
product_option.update({title:texts})
# print(product_option)
item['product_option'] = product_option
# 商品图片
item['product_img_url'] = jsonpath(product_dic, '$..wareImage[0].small')[0]
# 构建促销信息的请求
ad_url = 'https://cd.jd.com/promotion/v2?skuId={}&area=1_72_4137_0&cat={}'.format(item['product_sku_id'], item['product_category_id'])
yield scrapy.Request(ad_url, callback=self.parse_ad, meta={'item': item})
def parse_ad(self, response):
"""获取商品促销"""
item = response.meta['item']
ad_dic = json.loads(response.body.decode('GB18030'))
ad = ad_dic['ads'][0]['ad']
item['product_ad'] = ad
# for key, value in item.items():
# print('{} = {}'.format(key, value))
# 构建平均信息请求
comments_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds={}'.format(item['product_sku_id'])
yield scrapy.Request(comments_url, callback=self.parse_comments, meta={'item': item})
def parse_comments(self, response):
"""解析商品评论信息"""
item = response.meta['item']
comments_dic = json.loads(response.text)
comments = {
'comment_count': jsonpath(comments_dic, '$..CommentCount')[0],
'good_rate': jsonpath(comments_dic, '$..GoodRate')[0],
'poor_count': jsonpath(comments_dic, '$..PoorCount')[0],
}
item['product_comments'] = comments
# print(item)
# 构建价格请求
price_url = 'https://p.3.cn/prices/mgets?skuIds=J_{}'.format(item['product_sku_id'])
yield scrapy.Request(price_url, callback=self.parse_price, meta={'item': item})
def parse_price(self, response):
"""解析价格"""
item = response.meta['item']
item['product_price'] = json.loads(response.text)[0]['p']
# print(item)
yield item
1. 修改爬虫类
from scrapy_redis.spiders import RedisSpider
import pickle
# 1. 修改继承关系: 继承RedisSpider
class JdProductSpider(RedisSpider):
name = 'jd_product'
allowed_domains = ['jd.com', 'p.3.cn']
# 2. 指定redis_key
redis_key = 'jd_product:start_category'
# 3. 把重写start_requests 改为 重写 make_request_from_data
def make_request_from_data(self, data):
# 把从Redis中读取到分类信息, 转换为字典
category = pickle.loads(data)
return scrapy.Request(category['s_category_url'], self.parse, meta={'category': category})
2. 在settings文件中配置scrapy_redis
# MongoDB数据库的URL
MONGO_URL = 'mongodb://127.0.0.1:27017'
# REDIS数据链接
REDIS_URL = ' redis://127.0.0.1:6379/0'
# 去重容器类: 用于把已爬指纹存储到基于Redis的set集合中
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 调度器: 用于把待爬请求存储到基于Redis的队列
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是不进行调度持久化:
# 如果是True, 当程序结束的时候, 会保留Redis中已爬指纹和待爬的请求
# 如果是False, 当程序结束的时候, 会清空Redis中已爬指纹和待爬的请求
SCHEDULER_PERSIST = True
3. 写一个程序用于把MongoDB中分类信息, 放入到爬虫redis_key指定的列表中
实现方法 add_category_to_redis:
代码
from redis import StrictRedis
from pymongo import MongoClient
import pickle
from mall_spider.settings import MONGO_URL, REDIS_URL
from mall_spider.spiders.jd_product import JdProductSpider
# 把MongoDB中分类信息, 添加到Redis中
def add_category_to_redis():
# 链接MongoDB
client = MongoClient(MONGO_URL)
# 链接Redis
redis = StrictRedis.from_url(REDIS_URL)
cursor = client['jd']['category'].find()
# 读取MongoDB中分类信息, 序列化后, 添加到商品爬虫redis_key指定的list
for category in cursor:
redis.rpush(JdProductSpider.redis_key, pickle.dumps(category))
# 关闭MongoDB的链接
client.close()
if __name__ == '__main__':
add_category_to_redis()
class ProductPipeline(object):
def open_spider(self, spider):
if isinstance(spider, JdProductSpider):
# 建立MongoDB数据库链接
self.client = MongoClient(MONGO_URL)
# 获取要操作集合
self.category = self.client['jd']['product']
def process_item(self, item, spider):
if isinstance(spider, JdProductSpider):
# 把数据插入到mongo中
self.category.insert_one(dict(item))
return item
def close_spider(self, spider):
"""关闭"""
if isinstance(spider, JdProductSpider):
self.client.close()
ITEM_PIPELINES = {
'mall_spider.pipelines.CategoryPipeline': 300,
# 开启商品管道
'mall_spider.pipelines.ProductPipeline': 301,
}
为了避免IP反爬, 我们实现随机User-Agent和代理IP的中间件
import requests
import random
# 准备请求头
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
class RandomUserAgent(object):
def process_request(self, request, spider):
if request.url.startswith('https://cdnware.m.jd.com'):
# 如果使用手机抓包, 获取到商品信息; 生成请求请求头
request.headers['user-agent'] = 'JD4iPhone/164880 (iPhone; iOS 12.1.2; Scale/2.00)'
else:
# 随机获取一个请求头, 进行设置
request.headers['user-agent'] = random.choice(USER_AGENTS)
"""
9.2. 实现代理IP中间件
步骤:
在middlewares.py中, 实现ProxyMiddleware类
实现process_request方法
从代理池中获取一个随机的代理IP
设置给request.meta['proxy']
"""
from twisted.internet import defer
from twisted.internet.error import TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError
from twisted.web.client import ResponseFailed
from scrapy.core.downloader.handlers.http11 import TunnelError
class ProxyMiddleware(object):
EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError, TunnelError)
def process_request(self, request, spider):
"""
从代理池中获取一个随机的代理IP
设置给request.meta['proxy']
"""
response = requests.get('http://localhost:6868/random?protocol=https&domain=jd.com')
request.meta['proxy'] = response.content.decode()
request.meta['dont_redirect'] = True
return None
def process_exception(self, request, exception, spider):
if isinstance(exception, self.EXCEPTIONS_TO_RETRY):
# 获取代理IP
proxy = request.meta['proxy']
# 提取IP地址
ip = re.findall('https://(.+):d+', proxy)[0]
params = {
'ip': ip,
'domain': 'jd.com'
}
requests.get('http://localhost:6868/disable_domain', params=params)
# 构建请求返回
req = request.copy()
req.dont_filter = True
return req
# 配置下载器中间件
DOWNLOADER_MIDDLEWARES = {
'mall_spider.middlewares.RandomUserAgent': 500,
'mall_spider.middlewares.ProxyMiddl eware': 543,
}
页面更新:2024-04-13
本站资料均由网友自行发布提供,仅用于学习交流。如有版权问题,请与我联系,QQ:4156828
© CopyRight 2008-2024 All Rights Reserved. Powered By bs178.com 闽ICP备11008920号-3
闽公网安备35020302034844号