ComicScrapy/Comics/pipelines.py
2024-10-28 00:03:20 +08:00

122 lines
5.6 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import os,scrapy,logging,shutil
from Comics import settings
from Comics.items import ComicItem,ImageItem
from Comics.loader import ComicLoader
from Comics.utils import CBZUtils,fileUtils as fu
from Comics.utils import ComicPath
from Comics.utils import oldUtils
from Comics.exporters import JsonExport,ItemExporter
from scrapy.pipelines.images import ImagesPipeline
from Comics._utils.ComicInfo import ComicInfoXml
class ComicsPipeline():
# item就是yield后面的对象
def process_item(self, item: ComicItem, spider):
if isinstance(item, ComicItem):
# 已存在漫画CBZ文件 调用转换
result_item = None
if fu.exists(ComicPath(item).PATH_CBZ()): result_item = ItemExporter().export_obj(item)
# 不存在漫画CBZ文件
else: result_item = JsonExport(file=ComicPath(item).getDirJosnComicChapter()).export_json(ComicLoader(item).load_item(), if_return=True)
return result_item
class BaseImagesPipeline(ImagesPipeline):
def image_scramble_exits(self, item,image_path):
en_image_path = ComicPath(item).getFileScrambleImageSave(image_path, relative="fullpath")
return fu.exists(fu.join(settings.IMAGES_STORE, self.get_file_path(item, en_image_path)))
def get_file_path(self, item, file=None, result_type="image"):
return ComicPath(item).file_path(file=file, result_type= result_type)
# 封面路径
def file_path(self, request, response=None, info=None, *, item=None): return request.meta['path']
# 判断是否需要更新封面
def update_icon(self, item):
# 下载后的缓存封面路径
image_path = self.get_file_path(item, result_type="down_cache_icon")
# 最终封面保存路径
save_path = self.get_file_path(item=item, result_type="down_icon")
fu.update_icon(image_path, save_path)
def success_completed(self, item, results):
is_success = False
fail_data = []
for result in results:
# 失败数据
if not result[0]: fail_data.append(result[1])
if len(fail_data) == 0 and len(results) != 0: is_success = True
return is_success
class ImgDownloadPipeline(BaseImagesPipeline):
def get_media_requests(self, item, info):
comic = ComicLoader(item=item)
images_item = comic.parse_images()
for image_item in images_item:
if_down = True
image_url = image_item["image_url"]
image_path = image_item["image_path"]
if image_item["image_type"] == "Icon":
image_path = super().get_file_path(item, result_type="icon_cache")
if fu.exists(image_path): return False
# 图像(含加密图像)已存在
if super().image_scramble_exits(item, image_path):
if_down = False
logging.info(f"file exists: IMAGE_STORE {image_path}")
if if_down:
logging.info(f"downloading {image_url} --> IMAGE_STORE {image_path}")
yield scrapy.Request(url=image_url, meta={'path': image_path})
# 打包cbz封面
def pack_icon(self, item):
cbz_icon = super().get_file_path(item=item, result_type="cbz_icon")
dwn_icon = super().get_file_path(item=item, result_type="down_icon")
base_dir = fu.dirname(dwn_icon)
name = fu.basename(dwn_icon).split(".")[0]
for dirname in os.listdir(base_dir):
path = fu.join(base_dir, dirname)
if os.path.isfile(path) and dirname.startswith(name):
fu.update_icon(path, cbz_icon)
def item_completed(self, results, item, info):
"""图片下载完成开始CBZ打包
不再做数据完整检验CBZUtils后续会自动检验
传入图像路径即可
Args:
results (_type_): 当前下载完成的图片,已下载的已忽略
item (_type_): Comic item数据
info (_type_): 信息
"""
if super().success_completed(item, results): super().update_icon(item)
cbz_path = super().get_file_path(item, result_type="cbz")
chapter_dir = ComicPath(item=item).file_path(result_type=ComicPath().MAPPING_IMAGES_DIR)
images_file = oldUtils().old_images(folder=chapter_dir)
if images_file == None or len(images_file) != len(ComicLoader(item=item).get_image_urls()): return
if fu.exists(cbz_path):
#self.update_icon(item)
chapter = os.path.basename(cbz_path).split(".")[0]
if os.path.exists(chapter_dir) and chapter in chapter_dir:
print(f"正在删除多余章节文件夹 {chapter_dir}")
shutil.rmtree(chapter_dir)
self.pack_icon(item)
else:
# ComicInfoXml 生成
comic_pages = ComicInfoXml().scrapy_xml_by_json(item, save_dir=super().get_file_path(item=item, result_type="images_dir"))
if CBZUtils.packComicChapterCBZ(src_dir= super().get_file_path(item, result_type="images_dir"),
dts_path= cbz_path,
comic_info_images= comic_pages, remove=True):
super().update_icon(item)
self.pack_icon(item)