This commit is contained in:
caiwx86 2023-04-06 19:33:08 +08:00
parent 1401b9734d
commit 4eaca38fe7
3 changed files with 29 additions and 29 deletions

View File

@ -137,11 +137,10 @@ class baseComic:
#验证数据是已存在且是否完整
is_next = CBZUtils.isVerCBZComic()
#不存在ComicInfo.xml则生成
if is_next and fu.notExists(ComicPath.getPathComicInfoXML()): ci.writeComicInfoXML()
ci.writeComicInfoXML()
#图标
downloadUtils.putDownImageUrlDirFile(Comic.getIcon(),ComicPath.getDirConfComic(),ComicPath.COMIC_ICON_FILE_NAME)
if not is_next:
CBZUtils.isUpdateOldCBZ()
downloadUtils.start_downloads(timeout=8)
if is_next and fu.notExists(ComicPath.getPathComicInfoXML()): ci.writeComicInfoXML()
return is_next

View File

@ -14,7 +14,7 @@ formatters:
handlers:
sh:
class: logging.StreamHandler
level: DEBUG
level: INFO
formatter: tostrout
stream: ext://sys.stdout

View File

@ -184,10 +184,7 @@ class downloadUtils:
def putDownImageUrlDirFile(cls,url,dir,file): cls.putDownUrlDirFileType(url,dir,file,cls.TYPE_IMG)
@classmethod
def common_download(cls,repair_max=20,timeout=10,proxy=None,proxy_type=None):
result = cls.getDownUrlDirFileType()
if result == None: return None
(file_url,dir,file,file_type) = [result[0],result[1],result[2],result[3]]
def common_download(cls,file_url,dir,file,file_type,repair_max=15,timeout=10,proxy=None,proxy_type=None):
logger.debug(f"file_url={file_url}, dir={dir} , file={file}, file_type={file_type}")
en_scrabmle_file = ComicPath.getFileScrambleImageSave(file)
save_path = os.path.join(dir,en_scrabmle_file)
@ -208,27 +205,27 @@ class downloadUtils:
if not os.path.exists(dir): os.makedirs(dir)
temp_path = save_path+".downloads"
repair_count = 1
try:
response = requests.get(
file_url, headers=cls.headers, timeout=timeout, proxies=proxies)
if response.status_code != 200 and repair_count <= repair_max:
logger.warning("下载异常")
raise NameError("下载异常")
with open(temp_path, 'wb') as f:
f.write(response.content)
time.sleep(0.7)
response.close()
#验证是否是图像
if fu.ver_file(temp_path,type=file_type):
shutil.move(temp_path, save_path)
logger.info("## OK: {} {}".format(save_path, file_url))
else:
logger.warning("## Fail: {} {}".format(file_url, "图像损坏"))
raise NameError("## Fail: {} {}".format(file_url, "图像损坏"))
except Exception as e:
logger.warning(f'重试:第{repair_count}次 异常:{e} {file_url}')
cls.putDownUrlDirFileType(file_url,dir,file,file_type)
repair_count += 1
while not os.path.exists(save_path) and repair_count <= repair_max:
try:
response = requests.get(
file_url, headers=cls.headers, timeout=timeout, proxies=proxies)
if response.status_code != 200 and repair_count <= repair_max:
logger.warning("下载异常")
raise NameError("下载异常")
with open(temp_path, 'wb') as f:
f.write(response.content)
time.sleep(0.7)
response.close()
#验证是否是图像
if fu.ver_file(temp_path,type=file_type):
shutil.move(temp_path, save_path)
logger.info("## OK: {} {}".format(save_path, file_url))
else:
logger.warning("## Fail: {} {}".format(file_url, "图像损坏"))
raise NameError("## Fail: {} {}".format(file_url, "图像损坏"))
except Exception as e:
logger.warning(f'重试:第{repair_count}次 异常:{e} {file_url}')
repair_count += 1
@classmethod
def start_downloads(cls,repair_max=20,concurrency=None,timeout=20,proxy_type=None, proxy=None):
@ -249,5 +246,9 @@ class downloadUtils:
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
future_list = list()
while not cls.QUEUE_DOWN.empty():
future_list.append(executor.submit(cls.common_download,timeout=timeout, proxy_type=proxy_type, proxy=proxy))
result = cls.QUEUE_DOWN.get(False)
(file_url,dir,file,file_type) = [result[0],result[1],result[2],result[3]]
future_list.append(executor.submit(cls.common_download,
file_url,dir,file,file_type,
timeout=timeout, proxy_type=proxy_type, proxy=proxy))
concurrent.futures.wait(future_list, timeout)