80 lines
4.1 KiB
Python
80 lines
4.1 KiB
Python
import scrapy,logging,time,os,skip
|
|
from Comics.items import ComicItem
|
|
from Comics.loader import ComicLoader
|
|
from Comics.utils import ComicPath
|
|
from Comics.utils import Conf
|
|
from Comics.utils import oldUtils
|
|
|
|
class RmComicSpider(scrapy.Spider):
|
|
name = 'rm_comic'
|
|
allowed_domains = ['rouman5.com']
|
|
main_url = 'https://'+allowed_domains[0]
|
|
start_urls = main_url+'/books'
|
|
|
|
# 遍历网站页数数据
|
|
def start_requests(self):
|
|
for x in range(0,70):
|
|
yield scrapy.Request(self.start_urls+"?&page="+str(x), callback=self.books_comic)
|
|
|
|
# 获取多个漫画信息
|
|
def books_comic(self, response):
|
|
comics = ComicLoader(item=ComicItem(), response=response)
|
|
# 获取漫画网站//script[@id]内的json数据并获取props.pageProps.books数据并作偱环解析
|
|
for book in comics.get_exec(comics.get_xpath('//script[@id="__NEXT_DATA__"]/text()')[0], str_exec="props.pageProps.books"):
|
|
# 排除指定的漫画名
|
|
if book['name'] not in skip.skip_comic:
|
|
yield scrapy.Request(url=self.start_urls+"/"+book['id'], callback=self.parse_comic)
|
|
|
|
# 获取某个漫画的相关数据
|
|
# 获取到多个章节链接后进入下个流程
|
|
def parse_comic(self, response):
|
|
# 初始化Comic数据并根据工程名称读取配置文件并自动解析
|
|
comic_item = Conf().comic(self.name, ComicLoader(ComicItem(), response))
|
|
path_comic = comic_item.load_item()
|
|
cbz_dir = ComicPath(path_comic).file_path(result_type=ComicPath.MAPPING_CBZ_DIR)
|
|
move_folder = ComicPath(path_comic).file_path(result_type=ComicPath.MAPPING_OLD_CBZ_DIR)
|
|
# 循环遍历根据配置文件自动解析并注入的章节名和章节链接
|
|
new_chapter = oldUtils().new_files(files=comic_item.get_chapters(), folder=cbz_dir)
|
|
# 清理多余章节
|
|
oldUtils().clean_old_files(files=comic_item.get_chapters(), folder=cbz_dir, move_folder=move_folder)
|
|
for chapter, link in zip(comic_item.get_chapters(), comic_item.get_chapter_href()):
|
|
if chapter in new_chapter:
|
|
# 打包导出item数据
|
|
item = comic_item.load_item(chapter=chapter)
|
|
# 获取最终存放CBZ的路径
|
|
cbz_path = ComicPath(item=item).PATH_CBZ()
|
|
# 校验繁体和简体中文CBZ路径是否存在
|
|
# if not checkUtils().is_error(item) and os.path.exists(cbz_path):
|
|
if cbz_path !=None and os.path.exists(cbz_path):
|
|
logging.info(f"漫画 {cbz_path} 已存在, 跳过中...")
|
|
yield item
|
|
else:
|
|
# 开始访问章节链接并跳转到self.parse_chapter
|
|
yield scrapy.Request(self.main_url+link, meta={'item': item}, callback=self.parse_chapter)
|
|
|
|
# 读取某章节下的所有图片
|
|
def parse_chapter(self, response):
|
|
# 获取传入的漫画item数据
|
|
ci = ComicLoader(item=response.meta['item'], response=response)
|
|
# 再次通过获取的XPATH数据解析并保存到ci(ComicItem)中
|
|
item: ComicLoader = Conf().parse_chapter(item=ci, value=ci.get_xpath('//script[@id="__NEXT_DATA__"]/text()')[0])
|
|
comic, chapter_api_url = [ item.load_item() ,item.get_chapter_api() ]
|
|
if chapter_api_url is not None and len(chapter_api_url) != 0 :
|
|
try:
|
|
yield scrapy.Request(self.main_url + chapter_api_url, meta={'item': comic}, callback=self.parse_chapter_api)
|
|
except:
|
|
logging.warning(f"yield scrapy.Request({self.main_url} + {chapter_api_url}, meta={comic}, callback=self.parse_chapter_api)")
|
|
else:
|
|
yield comic
|
|
|
|
# 加密数据API处理
|
|
def parse_chapter_api(self, response):
|
|
comic_item = ComicLoader(item=response.meta['item'], response=response)
|
|
return Conf().parse_chapter_api(item=comic_item, value=response.text).load_item()
|
|
|
|
|
|
def parse(self, response):
|
|
raise NotImplementedError
|
|
|
|
def error_parse(self, response):
|
|
raise NotImplementedError |