From 7c30c82bae5bb1034045b2b8c29d0ebc7c186ba7 Mon Sep 17 00:00:00 2001 From: doepy <8428487+doepy@user.noreply.gitee.com> Date: Sun, 14 Mar 2021 22:10:16 +0800 Subject: [PATCH 1/4] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20=E7=AC=AC=E5=8D=81?= =?UTF-8?q?=E4=B8=80=E5=91=A8=5F=E7=AC=AC=E4=BA=8C=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/.keep" diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/.keep" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/.keep" new file mode 100644 index 00000000..e69de29b -- Gitee From 4adf3098955a2e408990fa6ef95c512b7ddf0720 Mon Sep 17 00:00:00 2001 From: doepy <8428487+doepy@user.noreply.gitee.com> Date: Sun, 14 Mar 2021 22:10:52 +0800 Subject: [PATCH 2/4] =?UTF-8?q?=E7=AC=AC=E5=8D=81=E4=B8=80=E5=91=A8=5F?= =?UTF-8?q?=E7=AC=AC=E4=BA=8C=E8=8A=82=20=E7=AC=AC=E5=8D=81=E4=B8=80?= =?UTF-8?q?=E5=91=A8=5F=E7=AC=AC=E4=BA=8C=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../items.py" | 19 ++ .../jd_search.py" | 66 +++++++ .../middlewares.py" | 184 ++++++++++++++++++ .../pipelines.py" | 34 ++++ .../settings.py" | 115 +++++++++++ 5 files changed, 418 insertions(+) create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/items.py" create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/jd_search.py" create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/middlewares.py" create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/pipelines.py" create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/settings.py" diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/items.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/items.py" new file mode 100644 index 00000000..ab9fe506 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/items.py" @@ -0,0 +1,19 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class JdCrawlerScrapyItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + sku_id = scrapy.Field() + img = scrapy.Field() + price = scrapy.Field() + title = scrapy.Field() + shop = scrapy.Field() + icons = scrapy.Field() + sta_date = scrapy.Field() + keyword = scrapy.Field() \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/jd_search.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/jd_search.py" new file mode 100644 index 00000000..f4adb6f2 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/jd_search.py" @@ -0,0 +1,66 @@ +import scrapy +import json +import time +from bs4 import BeautifulSoup +from jd_crawler_scrapy.jd_crawler_scrapy.items import JdCrawlerScrapyItem +from scrapy.exceptions import CloseSpider + +class JdSearch(scrapy.Spider): + name = "jd_search" + + def start_resquest(self): + for keyword in ["鼠标","键盘","显卡","耳机"]: + for page_num in range(1,11): + url = f"https://search.jd.com/Search?keyword={keyword}&page={page_num}" + + # 选用FormRequest是因为它即可以发送GET请求,又可以发送POST请求 + yield scrapy.FormRequest( + meta={"keyword": keyword, "sta_date": time.strftime("%Y-%m-%d")}, + dont_filter= False, + url = url, + method = 'GET', + #formdata = date, 如果是post请求携带数据使用formdata + callback=self.parse_search, + errback=self.process_error + ) + break + + def parse_search(self,response): + print(response) + soup = BeautifulSoup(response.text,"lxml") + item_array = soup.select("ul[class='gl-warp clearfix'] li[class='gl-item']") + for item in item_array: + try: + sku_id = item.attrs["date-sku"] + img = item.select("img[data-img='1']") + price = item.select("div[class='p-price']") + title = item.select("div[class='p-name p-name-type-2']") + shop = item.select("div[class='p-shop']") + icons = item.select("div[class='p-icons']") + + img = img[0].attrs['data-lazy-img'] if img else "" + price = price[0].strong.i.text if price else "" + title = title[0].text.strip() if title else "" + shop = shop[0].span.a.attrs['title'] if shop[0].text.strip() else "" + icons = json.dumps([tag_ele.text for tag_ele in icons[0].select("i")]) if icons else '[]' + + item = JdCrawlerScrapyItem() + + item["sku_id"] = sku_id + item["img"] = img + item["price"] = price + item["title"] = title + item["shop"] = shop + item["icons"] = icons + item["sta_date"] = response.meta["sta_date"] + item["keyword"] = response.meta["keyword"] + yield item + except Exception as e: + print(e.args) + def process_error(self, failure): + print(failure) + if "身份已过期" in str(failure.value): + raise CloseSpider + #记录异常 + #发送通知 + #重做任务 \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/middlewares.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/middlewares.py" new file mode 100644 index 00000000..bc6df464 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/middlewares.py" @@ -0,0 +1,184 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy.utils.response import response_status_message +from scrapy.dupefilters import RFPDupeFilter +from scrapy import signals +from scrapy.downloadermiddlewares.retry import RetryMiddleware +from itemadapter import is_item, ItemAdapter +from w3lib.url import canonicalize_url +from scrapy.utils.python import to_bytes +import random + +import hashlib +import weakref + +_fingerprint_cache = weakref.WeakKeyDictionary() + +class JdCrawlerScrapySpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class JdCrawlerScrapyDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + +class CookieMiddleware: + def process_request(self, request, spider): + request.headers['cookie'] = "__jdu=1614325433253970893691; shshshfpa=625e93d5-fd42-706f-f2d3-ab101d15ab0f-1614325435; shshshfpb=gaY1DAmAE0ZGoq6pKTzwp1g%3D%3D; qrsc=3; unpl=V2_ZzNtbRcDRkF2D0YHch1dBGIARVkSUkURIFtEAHgbC1VnBEcOclRCFnUUR1ZnGV8UZAMZX0NcQBBFCEdkexhdBGYCEFpBU3MXcgkaH0spXwVXAxNtQlBDEHcLQlF5G18BZQsUXEtfQRJ9CHZkfClsNWYzE21DZwh7dEVGU3scXgZjBhBfQVNBHXMJT1x5HlQFVwIiXg%3d%3d; __jdv=76161171|hao.360.com|t_1000003625_360mz|tuiguang|de4e261b850042f5a475db3e23fa16db|1615555825008; areaId=17; ipLoc-djd=17-1381-50713-0; PCSYCityID=CN_420000_420100_420115; shshshfp=2ef78080f2acedbd438b03639d0c23f9; shshshsID=c25819d14bbc4c305d9dde6736271c1d_2_1615555832536; __jda=122270672.1614325433253970893691.1614325433.1614491521.1615555825.3; __jdb=122270672.2.1614325433253970893691|3.1615555825; __jdc=122270672; rkv=1.0; 3AB9D23F7A4B3C9B=NG6VDTXK6GA3XU67UGNLBN42IXRZPG6VW5H3OO7EGHU7HFONTMVI6QYEAK75CUFUTHZPEZ2JQFZE3W2YCJHHLF5BN4" + + def process_response(self, request, response,spider): + """ + 根据response返回的内容判断cookie身份是否已过期 + dangqiam + :param request: + :param response: + :param spider: + :return: + """ + if "身份过期" in response.text: + raise Exception("当前cookie身份已过期") + + def process_exception(self, request, exception, spider): + # 如果异常是cookie池空了,可以在这里完成cookie池的补充 + # + if isinstance(exception, ImportError): + retry_times = request.meta.get('retry_times', 1) + request.meta['retru_times'] = retry_times -1 + return request + +class UAMiddleware: + def process_request(self, request, spider): + """ + 在正式请求前为当前请求添加headers + :param request: + :param spider: + :return: + """ + request.headers["user-agent"] = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Mobile Safari/537.36" + + +class MyRetryMiddleware(RetryMiddleware): + """ + 解决对方服务器返回正常状态码200,但是根据IP需要进行验证码验证的情况 + 通过换IP可以解决验证码,那么应该可以重试 + + """ + def process_response(self, request, response, spider): + if request.meta.get('dont_retry', False): + return response + if "验证码" in response.text: + reason = response_status_message(response.status) + return self._retry(request, reason, spider) or response + return response + +class MyRFPDupeFilter(RFPDupeFilter): + """ + 过滤器是在到达下载之前九生成了过滤指纹,如果我们的下载器中间件报错,那么过滤指纹仍然生效,但是没有实际请求, + 所以我们可以通过一些特殊参数来进行自定义过滤规则 + + + """ + def request_fingerprint(self, request, include_headers=None, keep_gragments=False): + cache = _fingerprint_cache.setdefault(request, {}) + cache_key = (include_headers, keep_gragments) + if cache_key not in cache: + fp = hashlib.sha1() + fp.update(to_bytes(request.method)) + fp.update(to_bytes(canonicalize_url(request.url, keep_fragments=keep_gragments))) + fp.update(request.body or b'') + fp.update(request.meta.get("batch_no", "").encode("utf-8")) + cache[cache_key] = fp.hexdigest() + return cache[cache_key] + + + + + diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/pipelines.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/pipelines.py" new file mode 100644 index 00000000..40a27eba --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/pipelines.py" @@ -0,0 +1,34 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter +import pymysql +from jd_crawler_scrapy.jd_crawler_scrapy.items import JdCrawlerScrapyItem + + +class JdCrawlerScrapyPipeline: + def __init__(self): + self.mysql_con = None + + + + + def process_item(self, item, spider): + if not self.mysql_con: + self.mysql_con = pymysql.connect(**spider.settings["MYSQL_CONF"]) + + if isinstance(item, JdCrawlerScrapyItem): + cursor = self.mysql_con.cursor() + SQL = """"INSERT INTO jd_search(sku_id,img,price,title,shop,icons,sta_date,keyword) + VALUES ('{}', '{}','{}','{}','{}','{}','{}','{}')""".format( + item['sku_id'], item['img'], item['price'], item['title'], item['shop'], item['icons'], item['sta_date'], item['keyword'] + ) + cursor.execute(SQL) + self.mysql_con.commit() + cursor.close() + + return item diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/settings.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/settings.py" new file mode 100644 index 00000000..f52f24bc --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\272\214\350\212\202/settings.py" @@ -0,0 +1,115 @@ +# Scrapy settings for jd_crawler_scrapy project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'jd_crawler_scrapy' + +SPIDER_MODULES = ['jd_crawler_scrapy.spiders'] +NEWSPIDER_MODULE = 'jd_crawler_scrapy.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'jd_crawler_scrapy (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +#enable redirect +REDIRECT_ENABLE = False + +#retry +RETRY_ENABLE = False +RETRY_HTTP_CODES = [500, 502, 503, 408, 429] + +# Configure maximum concurrent requests performed by Scrapy (default: 16) 请求限制 +CONCURRENT_REQUESTS = 1 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapySpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +DOWNLOADER_MIDDLEWARES = { +# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapyDownloaderMiddleware': 543, + 'jd_crawler_scrapy.middlewares.UAMiddleware': 100, + 'jd_crawler_scrapy.middlewares.MyRetryMiddleware': 200, + 'jd_crawler_scrapy.middlewares.CookieMiddleware': 150, +} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'jd_crawler_scrapy.pipelines.JdCrawlerScrapyPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' + + +#MYSQL_CONF +MYSQL_CONF = { + "host": "127.0.0.1", + "user": "root", + "password": "lmc.123", + "db": "tunan_class3" +} + + +#log +#LOG_FILE = "F:\log\jd_search.log" +LOG_LEVEL= "DEBUG" + +#dup +DUOEFILTER_CLASS = "jd_crawler_scrapy.middleware.MyRFPDupefilter" -- Gitee From 6a2ab2c5029383386145262e1be579af1ce05519 Mon Sep 17 00:00:00 2001 From: doepy <8428487+doepy@user.noreply.gitee.com> Date: Sun, 14 Mar 2021 22:11:05 +0800 Subject: [PATCH 3/4] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20=E7=AC=AC=E5=8D=81?= =?UTF-8?q?=E4=B8=80=E5=91=A8=5F=E7=AC=AC=E4=B8=89=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/.keep" diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/.keep" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/.keep" new file mode 100644 index 00000000..e69de29b -- Gitee From 02e1b7cd63d6ef7fbc9ab7083978f51d58045c57 Mon Sep 17 00:00:00 2001 From: doepy <8428487+doepy@user.noreply.gitee.com> Date: Sun, 14 Mar 2021 22:11:33 +0800 Subject: [PATCH 4/4] =?UTF-8?q?=E7=AC=AC=E5=8D=81=E4=B8=80=E5=91=A8=5F?= =?UTF-8?q?=E7=AC=AC=E4=B8=89=E8=8A=82=20=E7=AC=AC=E5=8D=81=E4=B8=80?= =?UTF-8?q?=E5=91=A8=5F=E7=AC=AC=E4=B8=89=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../redisCURD\347\273\203\344\271\240.md" | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 "\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/redisCURD\347\273\203\344\271\240.md" diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/redisCURD\347\273\203\344\271\240.md" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/redisCURD\347\273\203\344\271\240.md" new file mode 100644 index 00000000..0e2f56ee --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/3\347\217\255/3\347\217\255_\351\231\266\345\206\266/\347\254\254\345\215\201\344\270\200\345\221\250\344\275\234\344\270\232/\347\254\254\345\215\201\344\270\200\345\221\250_\347\254\254\344\270\211\350\212\202/redisCURD\347\273\203\344\271\240.md" @@ -0,0 +1,95 @@ + + +``` +C:\Users\tao>redis-cli +127.0.0.1:6379> select 2 +OK +127.0.0.1:6379[2]> keys * +(empty list or set) +127.0.0.1:6379[2]> set 'a' 1 +OK +127.0.0.1:6379[2]> get 'a' +"1" +[127.0.0.1:6379[2]> set s value [EX s127.0.0.1:6379[2]> set 'a' 2 +OK] +127.0.0.1:6379[2]> del 'a' +(integer) 1 +127.0.0.1:6379[2]> keys * +(empty list or set) +127.0.0.1:6379[2]> set 'a ' 1 +OK +127.0.0.1:6379[2]> hset 'hash_test' 'a ' 1 +(integer) 1 +127.0.0.1:6379[2]> hset 'hash_test' 'b' 1 +(integer) 1 +127.0.0.1:6379[2]> hget 'hash_test' 'a' +(nil) + +127.0.0.1:6379[2]> hget 'hash_test' 'b ' +(nil) +127.0.0.1:6379[2]> hdel 'hash_test' 'b' +(integer) 1 +127.0.0.1:6379[2]> lpush 'list_test' 1 +(integer) 1 +127.0.0.1:6379[2]> lpush 'list_test' 2 +(integer) 2 +127.0.0.1:6379[2]> lpush 'list_test' 3 +(integer) 3 +127.0.0.1:6379[2]> lindex 'list_test' 1 +"2" +127.0.0.1:6379[2]> lrange 'list_test' 0 2 +1) "3" +2) "2" +3) "1" +127.0.0.1:6379[2]> linsert 'list_test' before 1 'a' +(integer) 4 +127.0.0.1:6379[2]> linsert 'list_test' before 'a' 'b' +(integer) 5 +127.0.0.1:6379[2]> rpop 'list_test' +"1" +127.0.0.1:6379[2]> lpop 'list_test' +"3" +127.0.0.1:6379[2]> lrem 'list_test' 1 1 +(integer) 0 +127.0.0.1:6379[2]> lrem 'list_test' 2 'a' +(integer) 1 +127.0.0.1:6379[2]> sadd 'set_test' 1 +(integer) 1 +127.0.0.1:6379[2]> sadd 'set_test' 2 +(integer) 1 +127.0.0.1:6379[2]> smembers 'set_test' +1) "1" +2) "2" +127.0.0.1:6379[2]> sismember 'set_test' 2 +(integer) 1 +127.0.0.1:6379[2]> srandmenber 'set_test'2 +Invalid argument(s) +127.0.0.1:6379[2]> srandmember 'set_test'2 +Invalid argument(s) +127.0.0.1:6379[2]> srandmember 'set_test' 2 +1) "1" +2) "2" +127.0.0.1:6379[2]> spop 'set_test' +"2" +127.0.0.1:6379[2]> zadd 'zset_test' 10 'a' +(integer) 1 +127.0.0.1:6379[2]> zadd 'zset_test' 1 'b' +(integer) 1 +127.0.0.1:6379[2]> zadd 'zset_test' 22 'c' +(integer) 1 +127.0.0.1:6379[2]> zcard 'zset_test' +(integer) 3 +127.0.0.1:6379[2]> zcount 'zset_test' 2 33 +(integer) 2 +127.0.0.1:6379[2]> zrangebyscore 'zset_test' 2 11 +1) "a" +127.0.0.1:6379[2]> zincrby 'zset_test' 1 'b' +"2" +127.0.0.1:6379[2]> zremrangebyrank 'zset_test' 1 2 +(integer) 2 +127.0.0.1:6379[2]> zremrangebyscore 'zset_test' 1 4 +(integer) 1 +127.0.0.1:6379[2]> + +``` + -- Gitee