diff --git a/Comics/pipelines.py b/Comics/pipelines.py index a999c68..4d57c2b 100644 --- a/Comics/pipelines.py +++ b/Comics/pipelines.py @@ -72,11 +72,10 @@ class ImgDownloadPipeline(BaseImagesPipeline): if image_item["image_type"] == "Icon": image_path = super().get_file_path(item, result_type="icon_cache") if fu.exists(image_path): return False - # 图像(含加密图像)不存在 - if not super().image_scramble_exits(item, image_path): - # if_down = False - # logging.info(f"file exists: IMAGE_STORE {image_path}") - # if if_down: + # 图像(含加密图像)存在 + if super().image_scramble_exits(item, image_path): + logging.info(f"file exists: IMAGE_STORE {image_path}") + else: logging.info(f"downloading {image_url} --> IMAGE_STORE {image_path}") yield scrapy.Request(url=image_url, meta={'path': image_path}) @@ -102,13 +101,14 @@ class ImgDownloadPipeline(BaseImagesPipeline): info (_type_): 信息 """ # 存在未下载图像数据则重试 - if not super().success_completed(item, results): return - super().update_icon(item) + # if not super().success_completed(item, results): return + # super().update_icon(item) cbz_path = super().get_file_path(item, result_type="cbz") chapter_dir = ComicPath(item=item).file_path(result_type=ComicPath().MAPPING_IMAGES_DIR) images_file = oldUtils().old_images(folder=chapter_dir) # 校验数据是正确 if len(images_file) != len(ComicLoader(item=item).get_image_urls()): return + super().update_icon(item) # CBZ文件是否已存在 if fu.exists(cbz_path): #self.update_icon(item)