推荐脚本+分页处理

This commit is contained in:
2025-11-20 16:09:29 +08:00
parent 078d86db6e
commit 97da821799
3 changed files with 133 additions and 38 deletions

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
新华网搜索爬虫命令行工具
用法: python RmrbSearch.py --key "关键词" --total 10 --type 0
"""
import argparse
import json
import sys
from pathlib import Path
import time
# Add project root directory to path to import crawler
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from crawler.xhw.XhwCrawler import XhwCrawler
from loguru import logger
def main():
"""主函数"""
parser = argparse.ArgumentParser(
description='新华网新闻搜索工具',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
"""
)
parser.add_argument(
'--output', '-o',
type=str,
help='输出文件路径'
)
args = parser.parse_args()
output_file = args.output
logger.info("使用直接参数模式")
try:
crawler = XhwCrawler()
time.sleep(5)
result = crawler.commend()
# print(result)
output = {
"code": result.code,
"message": result.message,
"success": result.success,
"data": None,
"dataList": [item.model_dump() for item in result.dataList] if result.dataList else []
}
# result = None
# with open("F:\Project\schoolNews\schoolNewsCrawler\output\output.json", "r", encoding="utf-8") as f:
# result = json.load(f)
# print(result)
# output = result
if output_file:
output_path = Path(output_file)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False, indent=2)
logger.info(f"结果已保存到: {output_file}")
crawler.close()
sys.exit(0 if result.success else 1)
# print(json.dumps(output, ensure_ascii=False, indent=2))
# sys.exit(0 if result["success"] else 1)
except Exception as e:
logger.error(f"执行失败: {str(e)}")
error_output = {
"code": 500,
"message": f"执行失败: {str(e)}",
"success": False,
"data": None,
"dataList": []
}
print(json.dumps(error_output, ensure_ascii=False, indent=2))
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -348,11 +348,7 @@ class XhwCrawler(BaseCrawler):
title = head_div.find_element(By.CSS_SELECTOR, "h1").text
news_item.title = title
# 内容
try:
article_div = self.driver.find_element(By.CSS_SELECTOR, "div.main.clearfix")
content_div = article_div.find_element(By.CSS_SELECTOR, "div#detail span#detailContent")
def parse_content(content_div):
children = content_div.find_elements(By.XPATH, "./*")
for child in children:
try:
@@ -401,6 +397,27 @@ class XhwCrawler(BaseCrawler):
except Exception as e:
logger.warning(f"解析段落失败: {e}")
continue
# 内容
try:
article_div = self.driver.find_element(By.CSS_SELECTOR, "div.main.clearfix")
content_div = article_div.find_element(By.CSS_SELECTOR, "div#detail span#detailContent")
parse_content(content_div)
page_div = content_div.find_element(By.CSS_SELECTOR, "center.xinhuaPager")
page_urls = []
if page_div:
page_as = page_div.find_elements(By.CSS_SELECTOR, "span#xinhuaPagerBox > a")
for page_a in page_as:
page_url = page_a.get_attribute("href")
if page_url and not page_url.startswith("http"):
page_url = self._normalize_url(page_url)
page_urls.append(page_url)
for page_url in page_urls:
self.driver.get(page_url)
time.sleep(2)
content_div = self.driver.find_element(By.CSS_SELECTOR, "div#detail span#detailContent")
parse_content(content_div)
except:
logger.warning(f"新闻内容解析失败: {url}")