This commit is contained in:
caiwx86 2025-01-11 06:03:01 +08:00
parent c04e9a45c2
commit 23b1f10e6c
2 changed files with 7 additions and 6 deletions

View File

@ -999,7 +999,7 @@ class oldUtils:
if result_type == "new": result = only_in_new_item
return result
def clean_old_files(self, files, folder, move_folder, suffix="CBZ"):
def clean_old_files(self, files, folder, move_folder, suffix="CBZ", remove=True):
# 方法三使用pathlib模块的iterdir方法获取文件夹下的所有文件和文件夹
# 如果只需要文件名而不是文件的绝对路径可以使用name属性获取文件名
@ -1021,4 +1021,4 @@ class oldUtils:
except:
print(f"Error: move old_file={new_move_file} --> {old_move_file}")
if only_in_old_item != None: move_file()
if only_in_old_item != None and remove : move_file()

View File

@ -46,10 +46,11 @@ class RmComicSpider(scrapy.Spider):
# 获取最终存放CBZ的路径
cbz_path = ComicPath(item=item).PATH_CBZ()
# 校验繁体和简体中文CBZ路径是否存在
if cbz_path !=None and os.path.exists(cbz_path):
logging.info(f"漫画 {cbz_path} 已存在, 跳过中...")
yield item
else:
#if cbz_path !=None and os.path.exists(cbz_path):
# logging.info(f"漫画 {cbz_path} 已存在, 跳过中...")
# yield item
# else:
if cbz_path != None and not os.path.exists(cbz_path):
# 开始访问章节链接并跳转到self.parse_chapter
yield scrapy.Request(self.main_url+link, meta={'item': item}, callback=self.parse_chapter)