This commit is contained in:
caiwx86 2023-04-07 20:30:43 +08:00
parent e61beb42a8
commit b264f61a18
5 changed files with 49 additions and 39 deletions

View File

@ -11,7 +11,6 @@ from common.ComicInfo import ComicInfo as ci
from common.Comic import Comic
from common.Comic import ListComic
from common.Constant import ComicPath
from utils.FileUtils import fileUtils
#中心框架
class baseComic:
@ -70,9 +69,6 @@ class baseComic:
#if ciUtils.isProgressDone() and fu.notExists(ComicPath.getNewCBZComicChapter("file")):
# ciUtils.setProgressFail()
cbz_path = ComicPath.getPathCBZComicChapter()
#file_date = int(fileUtils.getModificationDate(cbz_path,"%Y%m%d%H",not_exists="2010010101"))
#if file_date > 2023040610 and file_date < 2023040710:
# os.remove(cbz_path)
if not os.path.exists(cbz_path):
print(f"CBZ不存在, {cbz_path}")
ciUtils.setProgressFail()
@ -97,16 +93,7 @@ class baseComic:
is_next = False
#try:
cls.Onechapter(chapter_url)
#进入下个阶段
#章节图片全部下载后,调用下载封面
comic_icon_path = ComicPath.getPathConfComicIcon()
chapter_icon_path = ComicPath.getPathCBZComicChapterIcon()
if ciUtils.isProgressDown and os.path.exists(comic_icon_path) and not os.path.exists(chapter_icon_path):
shutil.copy(comic_icon_path,chapter_icon_path)
ntfy.sendMsg(f"复制成功:{comic_icon_path} -> {chapter_icon_path}")
ciUtils.iconDB()
ciUtils.setProgressCBZ()
#下个阶段
#下个阶段
if ciUtils.isProgressCBZ: is_next = CBZUtils.packAutoComicChapterCBZ()
#except Exception as e: is_next = ntfy.sendMsg(f"{ci.getComicName()} 下载出错了",error=e)
ntfy.sendMsg(f"预计总章节大小:{cls.count_chapter + 1} / "+ str(Comic.getLenChapters()),alert=True)
@ -115,31 +102,16 @@ class baseComic:
#根据章节地址下载全部图片并将文件名scramble开头的图片进行解密
@classmethod
def Onechapter(cls,chapter_url):
is_next = True
if not str(chapter_url).startswith("http"): chapter_url = ci.getBaseUrl() + chapter_url
def Onechapter(cls,url):
#下载图片
is_next = cls.comicChapterDownload(chapter_url)
#下载完成后, 开始解密图片
chapter_dir = ComicPath.getDirComicChapter()
if os.path.exists(chapter_dir): #获取章节图片路径
dirs = os.listdir(chapter_dir)
for img in dirs:
if img.startswith("scramble="):
imageUtils.encode_scramble_image(os.path.join(chapter_dir,img))
#进入下一阶段
ciUtils.setProgressDown()
return is_next
@classmethod
def comicChapterDownload(cls,url):
url = ComicPath.getUrl(url)
is_next = True
#获取本次工程的HOME目录
try:
domains.setdomain(url)
except:
htmlUtils.remove_HtmlCache(url)
cls.comicChapterDownload(url)
cls.Onechapter(url)
#保存信息
ci.writeJson()
#验证数据是已存在且是否完整
@ -153,5 +125,20 @@ class baseComic:
ci.writeComicInfoXML()
ComicPath.TIME_SLEEP = random.randint(5,15)
downloadUtils.putDownImageUrlDirFile(Comic.getIcon(),ComicPath.getDirConfComic(),ComicPath.COMIC_ICON_FILE_NAME)
downloadUtils.start_downloads(timeout=15)
return is_next
downloadUtils.start_downloads(timeout=12.5,concurrency=10)
#下载完成后, 开始解密漫画章节下的加密图片
imageUtils.deScrambleImagesByDir(ComicPath.getDirComicChapter())
#进入下一阶段
#章节图片全部下载后,调用下载封面
cls.iconDownload()
ciUtils.setProgressCBZ()
@classmethod
def iconDownload(cls):
comic_icon_path = ComicPath.getPathConfComicIcon()
chapter_icon_path = ComicPath.getPathCBZComicChapterIcon()
if ciUtils.isProgressDown and os.path.exists(comic_icon_path) and not os.path.exists(chapter_icon_path):
shutil.copy(comic_icon_path,chapter_icon_path)
ntfy.sendMsg(f"复制成功:{comic_icon_path} -> {chapter_icon_path}")
ciUtils.iconDB()

View File

@ -86,7 +86,13 @@ class ComicPath:
COMIC_INFO_XML = "ComicInfo.xml"
TIME_SLEEP = 0.5
IS_UPDATE_COMIC = False
COMIC_SCRABMLE_COUNT = 0
@classmethod
def getUrl(cls,relative_url):
if not str(relative_url).startswith("http"): relative_url = pathStr.getBaseUrl(Comic.getHomePage())+relative_url
return relative_url
#顶级路径
@classmethod
def setJoinPathDir(cls,path,dir="",suffix=None,mkdir=False):

View File

@ -32,6 +32,7 @@ class DomainDown:
ntfy.sendMsg(f"未获取到章节图像 comic_name={Comic.getComicName()} chapter={Comic.getChapterName()}")
count = 1
scramble_count = 0
(files_name,images_url) = [[],[]]
for image in images:
(image_src,scramble) = [image.get("src"),image.get("scramble")]
@ -42,9 +43,11 @@ class DomainDown:
de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==")
blocks_num = imageUtils.encodeImage(de_str)
image_file_name = ComicPath.getFileScrambleImageName(count=count_image,block=blocks_num,suffix=image_src_suffix)
scramble_count += 1
files_name.append(image_file_name)
images_url.append(image_src)
downUtils.putDownImageUrlDirFile(image_src,ComicPath.getDirComicChapter(),image_file_name)
count+=1
ComicPath.COMIC_SCRABMLE_COUNT = scramble_count
Comic.setChapterImgs(images_url)
Comic.setChapterFilesName(files_name)

View File

@ -6,7 +6,19 @@ from common.Constant import ComicPath
from utils.Logger import logger
class imageUtils:
@classmethod
def deScrambleImagesByDir(cls,chapter_dir):
scramble_count = 0
if os.path.exists(chapter_dir): #获取章节图片路径
dirs = os.listdir(chapter_dir)
for img in dirs:
if img.startswith("scramble="):
imageUtils.encode_scramble_image(os.path.join(chapter_dir,img))
scramble_count += 1
logger.debug(f"scramble= {scramble_count}")
return scramble_count
@classmethod
def encodeImage(cls,str_en):
#print("en",str_en)
@ -195,7 +207,8 @@ class imageUtils:
print("remove=",imgpath)
class fileUtils:
SCRAMBLE_COUNT = 0
@classmethod
def getModificationDate(cls,file_path,format="%Y%m%d",not_exists=None):
if cls.notExists(file_path): return not_exists

View File

@ -183,7 +183,7 @@ class downloadUtils:
def putDownImageUrlDirFile(cls,url,dir,file): cls.putDownUrlDirFileType(url,dir,file,cls.TYPE_IMG)
@classmethod
def common_download(cls,file_url,dir,file,file_type,repair_max=15,timeout=10,proxy=None,proxy_type=None):
def common_download(cls,file_url,dir,file,file_type,repair_max=30,timeout=10,proxy=None,proxy_type=None):
logger.debug(f"file_url={file_url}, dir={dir} , file={file}, file_type={file_type}")
en_scrabmle_file = ComicPath.getFileScrambleImageSave(file)
en_scrabmle_path = os.path.join(dir,en_scrabmle_file)
@ -214,7 +214,7 @@ class downloadUtils:
raise NameError("下载异常")
with open(temp_path, 'wb') as f:
f.write(response.content)
time.sleep(0.7)
time.sleep(0.1)
response.close()
#验证是否是图像
if fu.ver_file(temp_path,type=file_type):
@ -225,10 +225,11 @@ class downloadUtils:
raise NameError("## Fail: {} {}".format(file_url, "图像损坏"))
except Exception as e:
logger.warning(f'重试:第{repair_count}次 异常:{e} {file_url}')
time.sleep(1.5)
repair_count += 1
@classmethod
def start_downloads(cls,repair_max=20,concurrency=None,timeout=20,proxy_type=None, proxy=None):
def start_downloads(cls,concurrency=None,timeout=20,proxy_type=None, proxy=None):
"""
Download image according to given urls and automatically rename them in order.
:param timeout: