PyComicPackRouMan/domain/down/RouMan.py
2023-04-05 23:50:04 +08:00

45 lines
2.1 KiB
Python

from common.ComicInfo import ComicInfo as ci
from common.ComicInfo import Comic
from common.ComicInfo import ComicInfoUtils as ciUtils
from common.Constant import pathStr
from common.Constant import ComicPath
from utils.FileUtils import imageUtils
from utils.NetUtils import htmlUtils
from utils.ComicUtils import ntfy
class DomainDown:
@classmethod
def comicChapterDownload(cls,chapter_url):
str_xpath='//script[@id="__NEXT_DATA__"]/text()'
str_exec="props.pageProps"
book = htmlUtils.setXpathData(chapter_url,xpath=str_xpath,num=0,exec=str_exec,update=ComicPath.IS_UPDATE_COMIC)
Comic.setComicName(book,"bookName")
Comic.setChapterName(book,"chapterName")
#alias = x.get("alias")
Comic.setDep(book,"description")
images = Comic.getValue(book,"images")
chapter_api_url = ci.parseExec(book,"chapterAPIPath",start_add=pathStr.getBaseUrl(chapter_url))
if chapter_api_url != None:
ntfy.sendMsg(f"chapterApiUrl= {chapter_api_url}",alert=False)
data = htmlUtils.getJSON(chapter_api_url,update=True)
if data != None:
Comic.setChapterName(data,"chapter.name")
images = Comic.getValue(data,"chapter.images")
if len(images) == 0:
ntfy.sendMsg(f"未获取到章节图像 comic_name={Comic.getComicName()} chapter={Comic.getChapterName()}")
count = 1
list_img,list_file_name = [[],[]]
for image in images:
(image_src,scramble) = [image.get("src"),image.get("scramble")]
count_image = "{:0>3d}".format(count)
list_img.append(image_src)
image_src_suffix = "."+str(image_src).split(".")[-1]
if scramble:
de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==")
blocks_num = imageUtils.encodeImage(de_str)
count_image = "scramble="+str(blocks_num)+"_"+count_image
list_file_name.append(count_image+image_src_suffix)
count+=1
ciUtils.comicChapterDownload(list_img,list_file_name)