from common.ComicInfo import ComicInfo as ci from common.ComicInfo import Comic from common.ComicInfo import ComicInfoUtils as ciUtils from common.Constant import pathStr from common.Constant import ComicPath from utils.FileUtils import imageUtils from utils.NetUtils import htmlUtils from utils.ComicUtils import ntfy from utils.NetUtils import downloadUtils as downUtils from utils.Logger import logger class DomainDown: @classmethod def comicChapterDownload(cls,chapter_url): str_xpath='//script[@id="__NEXT_DATA__"]/text()' str_exec="props.pageProps" book = htmlUtils.setXpathData(chapter_url,xpath=str_xpath,num=0,exec=str_exec,update=ComicPath.IS_UPDATE_COMIC) Comic.setComicName(book,"bookName") Comic.setChapterName(book,"chapterName") #alias = x.get("alias") Comic.setDep(book,"description") images = Comic.getValue(book,"images") chapter_api_url = ci.parseExec(book,"chapterAPIPath",start_add=pathStr.getBaseUrl(chapter_url)) if chapter_api_url != None: ntfy.sendMsg(f"chapterApiUrl= {chapter_api_url}",alert=False) data = htmlUtils.getJSON(chapter_api_url,update=True) if data != None: Comic.setChapterName(data,"chapter.name") images = Comic.getValue(data,"chapter.images") if len(images) == 0: ntfy.sendMsg(f"未获取到章节图像 comic_name={Comic.getComicName()} chapter={Comic.getChapterName()}") count = 1 (files_name,images_url) = [[],[]] for image in images: (image_src,scramble) = [image.get("src"),image.get("scramble")] count_image = "{:0>3d}".format(count) image_src_suffix = "."+str(image_src).split(".")[-1] image_file_name = count_image+image_src_suffix if scramble: de_str = str(image_src).split("/")[-1].replace(image_src_suffix,"==") blocks_num = imageUtils.encodeImage(de_str) image_file_name = ComicPath.getFileScrambleImageName(count=count_image,block=blocks_num,suffix=image_src_suffix) files_name.append(image_file_name) images_url.append(image_src) downUtils.putDownImageUrlDirFile(image_src,ComicPath.getDirComicChapter(),image_file_name) count+=1 Comic.setChapterImgs(images_url) Comic.setChapterFilesName(files_name)