This commit is contained in:
caiwx86 2023-04-07 20:30:43 +08:00
parent e61beb42a8
commit b264f61a18
5 changed files with 49 additions and 39 deletions

View File

@ -11,7 +11,6 @@ from common.ComicInfo import ComicInfo as ci
from common.Comic import Comic from common.Comic import Comic
from common.Comic import ListComic from common.Comic import ListComic
from common.Constant import ComicPath from common.Constant import ComicPath
from utils.FileUtils import fileUtils
#中心框架 #中心框架
class baseComic: class baseComic:
@ -70,9 +69,6 @@ class baseComic:
#if ciUtils.isProgressDone() and fu.notExists(ComicPath.getNewCBZComicChapter("file")): #if ciUtils.isProgressDone() and fu.notExists(ComicPath.getNewCBZComicChapter("file")):
# ciUtils.setProgressFail() # ciUtils.setProgressFail()
cbz_path = ComicPath.getPathCBZComicChapter() cbz_path = ComicPath.getPathCBZComicChapter()
#file_date = int(fileUtils.getModificationDate(cbz_path,"%Y%m%d%H",not_exists="2010010101"))
#if file_date > 2023040610 and file_date < 2023040710:
# os.remove(cbz_path)
if not os.path.exists(cbz_path): if not os.path.exists(cbz_path):
print(f"CBZ不存在, {cbz_path}") print(f"CBZ不存在, {cbz_path}")
ciUtils.setProgressFail() ciUtils.setProgressFail()
@ -97,16 +93,7 @@ class baseComic:
is_next = False is_next = False
#try: #try:
cls.Onechapter(chapter_url) cls.Onechapter(chapter_url)
#进入下个阶段 #下个阶段
#章节图片全部下载后,调用下载封面
comic_icon_path = ComicPath.getPathConfComicIcon()
chapter_icon_path = ComicPath.getPathCBZComicChapterIcon()
if ciUtils.isProgressDown and os.path.exists(comic_icon_path) and not os.path.exists(chapter_icon_path):
shutil.copy(comic_icon_path,chapter_icon_path)
ntfy.sendMsg(f"复制成功:{comic_icon_path} -> {chapter_icon_path}")
ciUtils.iconDB()
ciUtils.setProgressCBZ()
#下个阶段
if ciUtils.isProgressCBZ: is_next = CBZUtils.packAutoComicChapterCBZ() if ciUtils.isProgressCBZ: is_next = CBZUtils.packAutoComicChapterCBZ()
#except Exception as e: is_next = ntfy.sendMsg(f"{ci.getComicName()} 下载出错了",error=e) #except Exception as e: is_next = ntfy.sendMsg(f"{ci.getComicName()} 下载出错了",error=e)
ntfy.sendMsg(f"预计总章节大小:{cls.count_chapter + 1} / "+ str(Comic.getLenChapters()),alert=True) ntfy.sendMsg(f"预计总章节大小:{cls.count_chapter + 1} / "+ str(Comic.getLenChapters()),alert=True)
@ -115,31 +102,16 @@ class baseComic:
#根据章节地址下载全部图片并将文件名scramble开头的图片进行解密 #根据章节地址下载全部图片并将文件名scramble开头的图片进行解密
@classmethod @classmethod
def Onechapter(cls,chapter_url): def Onechapter(cls,url):
is_next = True
if not str(chapter_url).startswith("http"): chapter_url = ci.getBaseUrl() + chapter_url
#下载图片 #下载图片
is_next = cls.comicChapterDownload(chapter_url) url = ComicPath.getUrl(url)
#下载完成后, 开始解密图片
chapter_dir = ComicPath.getDirComicChapter()
if os.path.exists(chapter_dir): #获取章节图片路径
dirs = os.listdir(chapter_dir)
for img in dirs:
if img.startswith("scramble="):
imageUtils.encode_scramble_image(os.path.join(chapter_dir,img))
#进入下一阶段
ciUtils.setProgressDown()
return is_next
@classmethod
def comicChapterDownload(cls,url):
is_next = True is_next = True
#获取本次工程的HOME目录 #获取本次工程的HOME目录
try: try:
domains.setdomain(url) domains.setdomain(url)
except: except:
htmlUtils.remove_HtmlCache(url) htmlUtils.remove_HtmlCache(url)
cls.comicChapterDownload(url) cls.Onechapter(url)
#保存信息 #保存信息
ci.writeJson() ci.writeJson()
#验证数据是已存在且是否完整 #验证数据是已存在且是否完整
@ -153,5 +125,20 @@ class baseComic:
ci.writeComicInfoXML() ci.writeComicInfoXML()
ComicPath.TIME_SLEEP = random.randint(5,15) ComicPath.TIME_SLEEP = random.randint(5,15)
downloadUtils.putDownImageUrlDirFile(Comic.getIcon(),ComicPath.getDirConfComic(),ComicPath.COMIC_ICON_FILE_NAME) downloadUtils.putDownImageUrlDirFile(Comic.getIcon(),ComicPath.getDirConfComic(),ComicPath.COMIC_ICON_FILE_NAME)
downloadUtils.start_downloads(timeout=15) downloadUtils.start_downloads(timeout=12.5,concurrency=10)
return is_next #下载完成后, 开始解密漫画章节下的加密图片
imageUtils.deScrambleImagesByDir(ComicPath.getDirComicChapter())
#进入下一阶段
#章节图片全部下载后,调用下载封面
cls.iconDownload()
ciUtils.setProgressCBZ()
@classmethod
def iconDownload(cls):
comic_icon_path = ComicPath.getPathConfComicIcon()
chapter_icon_path = ComicPath.getPathCBZComicChapterIcon()
if ciUtils.isProgressDown and os.path.exists(comic_icon_path) and not os.path.exists(chapter_icon_path):
shutil.copy(comic_icon_path,chapter_icon_path)
ntfy.sendMsg(f"复制成功:{comic_icon_path} -> {chapter_icon_path}")
ciUtils.iconDB()

View File

@ -86,6 +86,12 @@ class ComicPath:
COMIC_INFO_XML = "ComicInfo.xml" COMIC_INFO_XML = "ComicInfo.xml"
TIME_SLEEP = 0.5 TIME_SLEEP = 0.5
IS_UPDATE_COMIC = False IS_UPDATE_COMIC = False
COMIC_SCRABMLE_COUNT = 0
@classmethod
def getUrl(cls,relative_url):
if not str(relative_url).startswith("http"): relative_url = pathStr.getBaseUrl(Comic.getHomePage())+relative_url
return relative_url
#顶级路径 #顶级路径
@classmethod @classmethod

View File

@ -32,6 +32,7 @@ class DomainDown:
ntfy.sendMsg(f"未获取到章节图像 comic_name={Comic.getComicName()} chapter={Comic.getChapterName()}") ntfy.sendMsg(f"未获取到章节图像 comic_name={Comic.getComicName()} chapter={Comic.getChapterName()}")
count = 1 count = 1
scramble_count = 0
(files_name,images_url) = [[],[]] (files_name,images_url) = [[],[]]
for image in images: for image in images:
(image_src,scramble) = [image.get("src"),image.get("scramble")] (image_src,scramble) = [image.get("src"),image.get("scramble")]
@ -42,9 +43,11 @@ class DomainDown:
de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==") de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==")
blocks_num = imageUtils.encodeImage(de_str) blocks_num = imageUtils.encodeImage(de_str)
image_file_name = ComicPath.getFileScrambleImageName(count=count_image,block=blocks_num,suffix=image_src_suffix) image_file_name = ComicPath.getFileScrambleImageName(count=count_image,block=blocks_num,suffix=image_src_suffix)
scramble_count += 1
files_name.append(image_file_name) files_name.append(image_file_name)
images_url.append(image_src) images_url.append(image_src)
downUtils.putDownImageUrlDirFile(image_src,ComicPath.getDirComicChapter(),image_file_name) downUtils.putDownImageUrlDirFile(image_src,ComicPath.getDirComicChapter(),image_file_name)
count+=1 count+=1
ComicPath.COMIC_SCRABMLE_COUNT = scramble_count
Comic.setChapterImgs(images_url) Comic.setChapterImgs(images_url)
Comic.setChapterFilesName(files_name) Comic.setChapterFilesName(files_name)

View File

@ -7,6 +7,18 @@ from utils.Logger import logger
class imageUtils: class imageUtils:
@classmethod
def deScrambleImagesByDir(cls,chapter_dir):
scramble_count = 0
if os.path.exists(chapter_dir): #获取章节图片路径
dirs = os.listdir(chapter_dir)
for img in dirs:
if img.startswith("scramble="):
imageUtils.encode_scramble_image(os.path.join(chapter_dir,img))
scramble_count += 1
logger.debug(f"scramble= {scramble_count}")
return scramble_count
@classmethod @classmethod
def encodeImage(cls,str_en): def encodeImage(cls,str_en):
#print("en",str_en) #print("en",str_en)
@ -195,6 +207,7 @@ class imageUtils:
print("remove=",imgpath) print("remove=",imgpath)
class fileUtils: class fileUtils:
SCRAMBLE_COUNT = 0
@classmethod @classmethod
def getModificationDate(cls,file_path,format="%Y%m%d",not_exists=None): def getModificationDate(cls,file_path,format="%Y%m%d",not_exists=None):

View File

@ -183,7 +183,7 @@ class downloadUtils:
def putDownImageUrlDirFile(cls,url,dir,file): cls.putDownUrlDirFileType(url,dir,file,cls.TYPE_IMG) def putDownImageUrlDirFile(cls,url,dir,file): cls.putDownUrlDirFileType(url,dir,file,cls.TYPE_IMG)
@classmethod @classmethod
def common_download(cls,file_url,dir,file,file_type,repair_max=15,timeout=10,proxy=None,proxy_type=None): def common_download(cls,file_url,dir,file,file_type,repair_max=30,timeout=10,proxy=None,proxy_type=None):
logger.debug(f"file_url={file_url}, dir={dir} , file={file}, file_type={file_type}") logger.debug(f"file_url={file_url}, dir={dir} , file={file}, file_type={file_type}")
en_scrabmle_file = ComicPath.getFileScrambleImageSave(file) en_scrabmle_file = ComicPath.getFileScrambleImageSave(file)
en_scrabmle_path = os.path.join(dir,en_scrabmle_file) en_scrabmle_path = os.path.join(dir,en_scrabmle_file)
@ -214,7 +214,7 @@ class downloadUtils:
raise NameError("下载异常") raise NameError("下载异常")
with open(temp_path, 'wb') as f: with open(temp_path, 'wb') as f:
f.write(response.content) f.write(response.content)
time.sleep(0.7) time.sleep(0.1)
response.close() response.close()
#验证是否是图像 #验证是否是图像
if fu.ver_file(temp_path,type=file_type): if fu.ver_file(temp_path,type=file_type):
@ -225,10 +225,11 @@ class downloadUtils:
raise NameError("## Fail: {} {}".format(file_url, "图像损坏")) raise NameError("## Fail: {} {}".format(file_url, "图像损坏"))
except Exception as e: except Exception as e:
logger.warning(f'重试:第{repair_count}次 异常:{e} {file_url}') logger.warning(f'重试:第{repair_count}次 异常:{e} {file_url}')
time.sleep(1.5)
repair_count += 1 repair_count += 1
@classmethod @classmethod
def start_downloads(cls,repair_max=20,concurrency=None,timeout=20,proxy_type=None, proxy=None): def start_downloads(cls,concurrency=None,timeout=20,proxy_type=None, proxy=None):
""" """
Download image according to given urls and automatically rename them in order. Download image according to given urls and automatically rename them in order.
:param timeout: :param timeout: