当前位置: 首页 > article >正文

scrapy爬取京东的数据

import scrapy
from scrapy import Fieldclass JdItem(scrapy.Item):# define the fields for your item here like:# name = scrapy.Field()title = scrapy.Field()  # 标题price = scrapy.Field()  # 价格comment_num = scrapy.Field()  # 评价条数url = scrapy.Field()  # 商品链接info = scrapy.Field()  # 详细信息
# -*- coding: utf-8 -*-import scrapy
import time
from JD.items import JdItemclass JingdongSpider(scrapy.Spider):name = 'jd'allowed_domains = ['jd.com']  # 有的时候写个www.jd.com会导致search.jd.com无法爬取keyword = "手机"page = 1url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&click=0'next_url = 'https://search.jd.com/s_new.php?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&scrolling=y&show_items=%s'def start_requests(self):yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse)def parse(self, response):"""爬取每页的前三十个商品,数据直接展示在原网页中:param response::return:"""ids = []for li in response.xpath('//*[@id="J_goodsList"]/ul/li'):item = JdItem()title = li.xpath('div/div/a/em/text()').extract()  # 标题price = li.xpath('div/div/strong/i/text()').extract()  # 价格comment_num = li.xpath('div/div/strong/a/text()').extract()  # 评价条数id = li.xpath('@data-pid').extract()  # idids.append(''.join(id))url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract()  # 需要跟进的链接item['title'] = ''.join(title)item['price'] = ''.join(price)item['comment_num'] = ''.join(comment_num)item['url'] = ''.join(url)if item['url'].startswith('//'):item['url'] = 'https:' + item['url']elif not item['url'].startswith('https:'):item['info'] = Noneyield itemcontinueyield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item})headers = {'referer': response.url}# 后三十页的链接访问会检查referer,referer是就是本页的实际链接# referer错误会跳转到:https://www.jd.com/?se=denyself.page += 1yield scrapy.Request(self.next_url % (self.keyword, self.keyword, self.page, ','.join(ids)),callback=self.next_parse, headers=headers)def next_parse(self, response):"""爬取每页的后三十个商品,数据展示在一个特殊链接中:url+id(这个id是前三十个商品的id):param response::return:"""for li in response.xpath('//li[@class="gl-item"]'):item = JdItem()title = li.xpath('div/div/a/em/text()').extract()  # 标题price = li.xpath('div/div/strong/i/text()').extract()  # 价格comment_num = li.xpath('div/div/strong/a/text()').extract()  # 评价条数url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract()  # 需要跟进的链接item['title'] = ''.join(title)item['price'] = ''.join(price)item['comment_num'] = ''.join(comment_num)item['url'] = ''.join(url)if item['url'].startswith('//'):item['url'] = 'https:' + item['url']elif not item['url'].startswith('https:'):item['info'] = Noneyield itemcontinueyield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item})if self.page < 200:self.page += 1yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse)def info_parse(self, response):"""链接跟进,爬取每件商品的详细信息,所有的信息都保存在item的一个子字段info中:param response::return:"""item = response.meta['item']item['info'] = {}type = response.xpath('//div[@class="inner border"]/div[@class="head"]/a/text()').extract()name = response.xpath('//div[@class="item ellipsis"]/text()').extract()item['info']['type'] = ''.join(type)item['info']['name'] = ''.join(name)for div in response.xpath('//div[@class="Ptable"]/div[@class="Ptable-item"]'):h3 = ''.join(div.xpath('h3/text()').extract())if h3 == '':h3 = "未知"dt = div.xpath('dl/dt/text()').extract()dd = div.xpath('dl/dd[not(@class)]/text()').extract()item['info'][h3] = {}for t, d in zip(dt, dd):item['info'][h3][t] = dyield item
# -*- coding: utf-8 -*-# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from JD import settingsclass JdPipeline(object):def __init__(self):# 获取setting中主机名,端口号和集合名# host = settings['MONGODB_HOST']# port = settings['MONGODB_PORT']# dbname = settings['MONGODB_DBNAME']# col = settings['MONGODB_COL']# 创建一个mongo实例client = MongoClient(host="127.0.0.1", port=27017)# 访问数据库db = client["JingDong"]# 访问集合self.col = db["JingDongPhone"]def process_item(self, item, spider):data = dict(item)self.col.insert(data)return item
# -*- coding: utf-8 -*-# Scrapy settings for JD project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'JD'SPIDER_MODULES = ['JD.spiders']
NEWSPIDER_MODULE = 'JD.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'# Obey robots.txt rules
ROBOTSTXT_OBEY = False# 主机环回地址
MONGODB_HOST = '127.0.0.1'
# 端口号,默认27017
MONGODB_POST = 27017
# 设置数据库名称
MONGODB_DBNAME = 'JingDong'
# 设置集合名称
MONGODB_COL = 'JingDongPhone'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)
# COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#     "authority": "search.jd.com",
#     "method": "GET",
#     "path": "/s_new.php?keyword=iphone&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&suggest=1.his.0.0",
#     "scheme": "https",
#     "cookie": "shshshfpa=817b0820-cebe-1aa5-5eaa-8f8d3b5945b3-1564123066; shshshfpb=nyq8Po%20NsVQIn1ih2zomJlw%3D%3D; TrackID=167xINX9YcKrGWU-JQ0EQWhwYstFi3gNOxFq6Em4_l6J6OECY5-pwyzHxRFr6TTZkLHI1m_3orstgEzPhWGk1pkbfG_ASOMSSscDY_oEz4XQ; pinId=DRR64H7p6D2CxuR9knABB7V9-x-f3wj7; qrsc=3; __jdu=502413746; areaId=2; PCSYCityID=CN_310000_310100_310112; xtest=3925.cf6b6759; ipLoc-djd=2-2825-51931-0; rkv=V0600; user-key=88fa50f5-ec38-48ff-9efe-beecaa5ffc96; cn=0; unpl=V2_ZzNtbRdfQEF8DRMDeR9ZDGIHFAhKUhcRd1tBVnNLXAcwB0FbclRCFX0UR1xnGlgUZwMZWEpcRxVFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZH8YXwBnARRYRWdzEkU4dlB8G1oEVwIiXHIVF0l1CkJRfxkRAWYAF11AUUYSRQl2Vw%3d%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_d92e84df3748457d94e53b639c13f5b7|1569383391707; __jda=122270672.502413746.1564123065.1569380256.1569383392.14; __jdc=122270672; shshshfp=f5c7274df7eb1773b86a72faa494fed4; 3AB9D23F7A4B3C9B=SHZYIPK2KZYMKRXEKXFSLWSYQOVLA745EZ4NXQTNMFLQMVFRJKZT7VYDEIY6L2USE2KDEOHO2IPNACME4W7GJ2LKTM",
#     "referer": "https://search.jd.com/Search?keyword=iphone&enc=utf-8&suggest=1.his.0.0&wq=&pvid=ec4030074500424391b371d06a8a62fd",
# }# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'JD.middlewares.JdSpiderMiddleware': 543,
# }# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'JD.middlewares.JdDownloaderMiddleware': 543,
# }# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {'JD.pipelines.JdPipeline': 300,
}# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

 

http://www.lryc.cn/news/2415770.html

相关文章:

  • java基础之:集合
  • MySQL查看数据库状态命令详解
  • 情侣网站开源源码-带后台
  • httpclient发送Get请求和Post请求
  • OpenLayers源码解析2 View.js
  • sql的IndexOf和LastIndexOf
  • Android Studio gradle build error: PKIX path building failed解决方法
  • C语言执行效率如何保证,看这一文就够了!
  • Smoke Test和Ad hoc Test
  • 让你的计算机也能提供云桌面服务:搭建私有云
  • 基于LinkedhashMap实现的LRU算法
  • 【高级部署】-Kubernetes K8S理论篇
  • JavaScript:求学生成绩案例及错误分析。
  • linux就该这么学pdf+下载链接
  • 在Esri官网申请60天试用版 ArcGIS Desktop(过期)
  • informix的一些函数,时间
  • H.266资料网站
  • 热点博客,技术历程和技术积累 (个人)
  • 人脸识别方案选择
  • Web前端学习路线,超全面整理「HTML+CSS+JS+Ajax+jQuery+VUE
  • 网络视频会议 OpenMeetings 介绍 运行 开发
  • 【STM32】嵌入式(片上)Flash的读写(以STM32F407ZGT6为例,HAL库)
  • VMware vCenter/vSphere/vSan/Esxi/7.0 lic许可
  • godaddy又支持支付宝支付了。今天backorder了一个域名,尝试用支付宝支付。居然可以支付了。当时的汇率结算。...
  • java快速培训
  • Java中常用的代码汇总
  • 文本框限制只能输入数字、汉子、字母的方法
  • ShopNC多用户商城系统,B2B2C企业级电商平台
  • 果然,ChatGPT还是被拿去搞黄色了...
  • 奥特曼在银行里下象棋的梗