diff --git a/.gitignore b/.gitignore index 41ca054..55bb838 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ COMICOUT/ .conf/ **/__pycache__/** +.vscode/ logconf.log \ No newline at end of file diff --git a/common/BaseComicEntity.py b/common/BaseComicEntity.py index 61ca77a..19b793d 100644 --- a/common/BaseComicEntity.py +++ b/common/BaseComicEntity.py @@ -71,7 +71,7 @@ class baseComic: #不存在完成配置文件 则允许下载 if not ciUtils.isProgressDone(): #if fu.exists(ComicPath.getNewCBZComicChapter("file")): ciUtils.setProgressDone() - cls.comicChapters(href,scramble=True) + cls.comicChapters(href) cls.count_chapter += 1 #一本漫画下载后等待 #清空文件夹 @@ -85,10 +85,10 @@ class baseComic: 读取某章节下所有图片 ''' @classmethod - def comicChapters(cls,chapter_url,scramble=None): + def comicChapters(cls,chapter_url): is_next = False #try: - cls.Onechapter(chapter_url,scramble) + cls.Onechapter(chapter_url) #进入下个阶段 #章节图片全部下载后,调用下载封面 comic_icon_path = ComicPath.getPathConfComicIcon() @@ -107,14 +107,14 @@ class baseComic: #根据章节地址下载全部图片,并将文件名scramble开头的图片进行解密 @classmethod - def Onechapter(cls,chapter_url,scramble=None): + def Onechapter(cls,chapter_url): is_next = True if not str(chapter_url).startswith("http"): chapter_url = ci.getBaseUrl() + chapter_url #下载图片 is_next = cls.comicChapterDownload(chapter_url) #下载完成后, 开始解密图片 chapter_dir = ComicPath.getDirComicChapter() - if scramble and os.path.exists(chapter_dir): #获取章节图片路径 + if os.path.exists(chapter_dir): #获取章节图片路径 dirs = os.listdir(chapter_dir) for img in dirs: if img.startswith("scramble="): @@ -135,16 +135,13 @@ class baseComic: #保存信息 ci.writeJson() #验证数据是已存在且是否完整 - is_next = CBZUtils.nextCBZ() - is_update_old=CBZUtils.isUpdateOldCBZ() + is_next = CBZUtils.isVerCBZComic() #不存在ComicInfo.xml则生成 if is_next and fu.notExists(ComicPath.getPathComicInfoXML()): ci.writeComicInfoXML() - if not is_next and not is_update_old: - ComicPath.TIME_SLEEP = 0.5 - downloadUtils.queueDownClear() - else: - ComicPath.TIME_SLEEP = random.randint(8,15) #图标 downloadUtils.putDownImageUrlDirFile(Comic.getIcon(),ComicPath.getDirConfComic(),ComicPath.COMIC_ICON_FILE_NAME) - downloadUtils.start_downloads(timeout=8) + if not is_next: + CBZUtils.isUpdateOldCBZ() + downloadUtils.start_downloads(timeout=8) + if is_next and fu.notExists(ComicPath.getPathComicInfoXML()): ci.writeComicInfoXML() return is_next \ No newline at end of file diff --git a/common/Constant.py b/common/Constant.py index 40cb549..d095202 100644 --- a/common/Constant.py +++ b/common/Constant.py @@ -171,4 +171,19 @@ class ComicPath: @classmethod def getPathComicInfoXML(cls,mkdir=True): return cls.setDirImg([Comic.getComicName(),Comic.getChapterName() - ,cls.COMIC_INFO_XML],mkdir=mkdir) \ No newline at end of file + ,cls.COMIC_INFO_XML],mkdir=mkdir) + #scramble图像路径 + @classmethod + def getPathImageScrambleComicChapter(cls,count,block,mkdir=True,suffix="jpg"): return cls.setDirImg([Comic.getComicName(),Comic.getChapterName() + ,"scramble="+block+"_"+count],mkdir=mkdir,suffix=suffix) + # + @classmethod + def getPathImageSaveScrambleComicChapter(cls,path,mkdir=True): + count = str(path).split("_")[-1] + return cls.setDirImg([Comic.getComicName(),Comic.getChapterName() + ,count],mkdir=mkdir) + + @classmethod + def getFileScrambleImageName(cls,count,block,suffix=".jpg"): return "scramble="+str(block)+"_"+str(count)+suffix + @classmethod + def getFileScrambleImageSave(cls,file): return str(file).split("_")[-1] \ No newline at end of file diff --git a/domain/down/RouMan.py b/domain/down/RouMan.py index 1275a0f..e5d61c5 100644 --- a/domain/down/RouMan.py +++ b/domain/down/RouMan.py @@ -7,6 +7,7 @@ from utils.FileUtils import imageUtils from utils.NetUtils import htmlUtils from utils.ComicUtils import ntfy from utils.NetUtils import downloadUtils as downUtils +from utils.Logger import logger class DomainDown: @classmethod @@ -36,11 +37,11 @@ class DomainDown: (image_src,scramble) = [image.get("src"),image.get("scramble")] count_image = "{:0>3d}".format(count) image_src_suffix = "."+str(image_src).split(".")[-1] + image_file_name = count_image+image_src_suffix if scramble: de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==") blocks_num = imageUtils.encodeImage(de_str) - count_image = "scramble="+str(blocks_num)+"_"+count_image - image_file_name = count_image+image_src_suffix + image_file_name = ComicPath.getFileScrambleImageName(count=count_image,block=blocks_num,suffix=image_src_suffix) files_name.append(image_file_name) images_url.append(image_src) downUtils.putDownImageUrlDirFile(image_src,ComicPath.getDirComicChapter(),image_file_name) diff --git a/logconf.yml b/logconf.yml index 75543d9..ea4eb17 100644 --- a/logconf.yml +++ b/logconf.yml @@ -14,7 +14,7 @@ formatters: handlers: sh: class: logging.StreamHandler - level: INFO + level: DEBUG formatter: tostrout stream: ext://sys.stdout diff --git a/utils/ComicUtils.py b/utils/ComicUtils.py index 4c9d5d3..11b8c1f 100644 --- a/utils/ComicUtils.py +++ b/utils/ComicUtils.py @@ -63,7 +63,7 @@ class CBZUtils: zf.write(path.joinpath(filename), arc_dir.joinpath(filename)) zf.close() ntfy.sendMsg(f"打包完成:{target_file}") - cls.verCBZComic(target_file) + cls.isVerCBZComic(target_file) @classmethod def packAutoComicChapterCBZ(cls): @@ -129,8 +129,10 @@ class CBZUtils: return result #CBZ检验是否完整 + #正确True 错误False @classmethod - def verCBZComic(cls,path=None,list_img=None,min_size=300000): + def isVerCBZComic(cls,path=None,list_img=None,min_size=300000): + is_ver = False #数据检验 if path == None: path = cls.getCBZ_Path() #文件不存在 则返回 @@ -150,7 +152,7 @@ class CBZUtils: ciUtils.setProgressFail() except Exception as e: print(e) - return False + return is_ver @classmethod def isUpdateOldCBZ(cls,filesname=None,result=False): @@ -176,10 +178,5 @@ class CBZUtils: fu.remove(unzip_path) return True ci.writeComicInfoXML(overlay=True) - result = False - return result - - @classmethod - def nextCBZ(cls,list_img=None): - if list_img == None: list_img = Comic.getChapterImgs() - return not cls.verCBZComic(list_img=list_img) \ No newline at end of file + is_update = False + return is_update \ No newline at end of file diff --git a/utils/FileUtils.py b/utils/FileUtils.py index dbc0fc4..3bf8773 100644 --- a/utils/FileUtils.py +++ b/utils/FileUtils.py @@ -153,14 +153,9 @@ class imageUtils: #image.show() file_str = str(imgpath).split("=") #10_29.jpg - base_dir = file_str[0].replace("scramble","") - base_name = file_str[-1] - base_fn = base_name.split("_") - save_name = base_fn[1] - save_name_delesu = save_name.split(".")[0] + base_fn = file_str[-1].split("_") blocks = int(base_fn[0]) - img_type = os.path.basename(imgpath).split('.')[-1] - save_path = os.path.join(os.path.dirname(imgpath),save_name_delesu+"."+img_type) + save_path = ComicPath.getFileScrambleImageSave(imgpath) # print(type(aid),type(img_name)) if blocks: s = blocks # 随机值 @@ -324,7 +319,8 @@ class dbUtils: result = False db = cls.init_db(db_name) if db == None: return None - result = db.search(Query().name == name) + data = db.search(Query().name == name) + logger.info(f"result query= {data}") if progress != None: try: if len(db.search((Query().name == name) & (Query().progress == progress))) != 0: result = True diff --git a/utils/NetUtils.py b/utils/NetUtils.py index 3c0f338..5a2fb2c 100644 --- a/utils/NetUtils.py +++ b/utils/NetUtils.py @@ -188,6 +188,13 @@ class downloadUtils: result = cls.getDownUrlDirFileType() if result == None: return None (file_url,dir,file,file_type) = [result[0],result[1],result[2],result[3]] + logger.debug(f"file_url={file_url}, dir={dir} , file={file}, file_type={file_type}") + en_scrabmle_file = ComicPath.getFileScrambleImageSave(file) + save_path = os.path.join(dir,en_scrabmle_file) + logger.debug(f"save_path= {save_path}") + if os.path.exists(save_path): + logger.info(f"文件已存在,跳过中... {save_path}") + return True if file_url == None: logger.error("common_down file_url 为空") raise NameError("common_down file_url为空") @@ -197,7 +204,6 @@ class downloadUtils: "http": proxy_type + "://" + proxy, "https": proxy_type + "://" + proxy } response = None - save_path = os.path.join(dir,file) logger.debug(f"save_path {save_path}") if not os.path.exists(dir): os.makedirs(dir) temp_path = save_path+".downloads" @@ -244,7 +250,5 @@ class downloadUtils: with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor: future_list = list() while not cls.QUEUE_DOWN.empty(): - logger.debug("QUEUE_DOWN 不为空 准备下载中...") - future_list.append(executor.submit( - cls.common_download,timeout, proxy_type, proxy)) + future_list.append(executor.submit(cls.common_download,timeout=timeout, proxy_type=proxy_type, proxy=proxy)) concurrent.futures.wait(future_list, timeout) \ No newline at end of file