149 lines
5.6 KiB
Python
149 lines
5.6 KiB
Python
# Define here the models for your spider middleware
|
||
#
|
||
# See documentation in:
|
||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||
|
||
from scrapy import signals
|
||
import random,logging
|
||
from pathlib import Path
|
||
from Comics.settings import PROXY_LIST
|
||
# File cache settings begin
|
||
import pickle,time
|
||
from scrapy.http.request import Request
|
||
from scrapy.spiders import Spider
|
||
from scrapy.utils.python import to_bytes, to_unicode
|
||
from w3lib.http import headers_dict_to_raw, headers_raw_to_dict
|
||
from scrapy.extensions.httpcache import FilesystemCacheStorage
|
||
from Comics.settings import HTTPCACHE_ALLOW_PREFIXS, HTTPCACHE_PROXY_DOMAINS
|
||
# File cache settings end
|
||
# useful for handling different item types with a single interface
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
class ProxyMiddleware(object):
|
||
def process_request(self, request, spider):
|
||
url = request.url
|
||
logging.debug(f"proxy url=== {url} {str(url).split('.')[-1]}")
|
||
domain = str(url).replace("https://", "").replace("http://", "").split("/")[0]
|
||
if str(url).split('.')[-1] not in HTTPCACHE_ALLOW_PREFIXS or domain in HTTPCACHE_PROXY_DOMAINS:
|
||
if len(PROXY_LIST) != 0:
|
||
request.meta["proxy"] = random.choice(PROXY_LIST)
|
||
else:
|
||
logging.debug(f"skip proxy... {url}")
|
||
|
||
################################################################
|
||
# HTTP/HTTPS文件缓存中间件
|
||
################################################################
|
||
class MyFilesystemCacheStorage(FilesystemCacheStorage):
|
||
|
||
def verify_next(self, url):
|
||
logging.debug(f"cache url=== {url} {str(url).split('.')[-1]}")
|
||
if str(url).split('.')[-1] not in HTTPCACHE_ALLOW_PREFIXS:
|
||
logging.debug(f"取消缓存 === {url}")
|
||
return False
|
||
else:
|
||
logging.debug(f"开始缓存 === {url}")
|
||
return True
|
||
|
||
def retrieve_response(self, spider: Spider, request: Request):
|
||
if self.verify_next(request._get_url()):
|
||
super().retrieve_response(spider, request)
|
||
|
||
|
||
def store_response(self, spider: Spider, request: Request, response):
|
||
if self.verify_next(request._get_url()):
|
||
super().store_response(spider, request, response)
|
||
|
||
|
||
class ComicsSpiderMiddleware:
|
||
# Not all methods need to be defined. If a method is not defined,
|
||
# scrapy acts as if the spider middleware does not modify the
|
||
# passed objects.
|
||
|
||
@classmethod
|
||
def from_crawler(cls, crawler):
|
||
# This method is used by Scrapy to create your spiders.
|
||
s = cls()
|
||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||
return s
|
||
|
||
def process_spider_input(self, response, spider):
|
||
# Called for each response that goes through the spider
|
||
# middleware and into the spider.
|
||
|
||
# Should return None or raise an exception.
|
||
return None
|
||
|
||
def process_spider_output(self, response, result, spider):
|
||
# Called with the results returned from the Spider, after
|
||
# it has processed the response.
|
||
|
||
# Must return an iterable of Request, or item objects.
|
||
for i in result:
|
||
yield i
|
||
|
||
def process_spider_exception(self, response, exception, spider):
|
||
# Called when a spider or process_spider_input() method
|
||
# (from other spider middleware) raises an exception.
|
||
|
||
# Should return either None or an iterable of Request or item objects.
|
||
pass
|
||
|
||
def process_start_requests(self, start_requests, spider):
|
||
# Called with the start requests of the spider, and works
|
||
# similarly to the process_spider_output() method, except
|
||
# that it doesn’t have a response associated.
|
||
|
||
# Must return only requests (not items).
|
||
for r in start_requests:
|
||
yield r
|
||
|
||
def spider_opened(self, spider):
|
||
spider.logger.info('Spider opened: %s' % spider.name)
|
||
|
||
|
||
class ComicsDownloaderMiddleware:
|
||
# Not all methods need to be defined. If a method is not defined,
|
||
# scrapy acts as if the downloader middleware does not modify the
|
||
# passed objects.
|
||
|
||
@classmethod
|
||
def from_crawler(cls, crawler):
|
||
# This method is used by Scrapy to create your spiders.
|
||
s = cls()
|
||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||
return s
|
||
|
||
def process_request(self, request, spider):
|
||
# Called for each request that goes through the downloader
|
||
# middleware.
|
||
|
||
# Must either:
|
||
# - return None: continue processing this request
|
||
# - or return a Response object
|
||
# - or return a Request object
|
||
# - or raise IgnoreRequest: process_exception() methods of
|
||
# installed downloader middleware will be called
|
||
return None
|
||
|
||
def process_response(self, request, response, spider):
|
||
# Called with the response returned from the downloader.
|
||
|
||
# Must either;
|
||
# - return a Response object
|
||
# - return a Request object
|
||
# - or raise IgnoreRequest
|
||
return response
|
||
|
||
def process_exception(self, request, exception, spider):
|
||
# Called when a download handler or a process_request()
|
||
# (from other downloader middleware) raises an exception.
|
||
|
||
# Must either:
|
||
# - return None: continue processing this exception
|
||
# - return a Response object: stops process_exception() chain
|
||
# - return a Request object: stops process_exception() chain
|
||
pass
|
||
|
||
def spider_opened(self, spider):
|
||
spider.logger.info('Spider opened: %s' % spider.name) |