From 97da82179924cfc64a6999d6a651782bfeb27ebc Mon Sep 17 00:00:00 2001 From: wangys <3401275564@qq.com> Date: Thu, 20 Nov 2025 16:09:29 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8E=A8=E8=8D=90=E8=84=9A=E6=9C=AC+=E5=88=86?= =?UTF-8?q?=E9=A1=B5=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- schoolNewsCrawler/crawler/xhw/XhwCommend.py | 88 +++++++++++++++++++++ schoolNewsCrawler/crawler/xhw/XhwCrawler.py | 27 +++++-- schoolNewsCrawler/test.ipynb | 56 ++++++------- 3 files changed, 133 insertions(+), 38 deletions(-) create mode 100644 schoolNewsCrawler/crawler/xhw/XhwCommend.py diff --git a/schoolNewsCrawler/crawler/xhw/XhwCommend.py b/schoolNewsCrawler/crawler/xhw/XhwCommend.py new file mode 100644 index 0000000..8af14f7 --- /dev/null +++ b/schoolNewsCrawler/crawler/xhw/XhwCommend.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +新华网搜索爬虫命令行工具 +用法: python RmrbSearch.py --key "关键词" --total 10 --type 0 +""" + +import argparse +import json +import sys +from pathlib import Path +import time +# Add project root directory to path to import crawler +sys.path.insert(0, str(Path(__file__).parent.parent.parent)) + +from crawler.xhw.XhwCrawler import XhwCrawler +from loguru import logger + + +def main(): + """主函数""" + parser = argparse.ArgumentParser( + description='新华网新闻搜索工具', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" + """ + ) + + + parser.add_argument( + '--output', '-o', + type=str, + help='输出文件路径' + ) + + args = parser.parse_args() + + + output_file = args.output + + logger.info("使用直接参数模式") + + + try: + crawler = XhwCrawler() + time.sleep(5) + result = crawler.commend() + # print(result) + output = { + "code": result.code, + "message": result.message, + "success": result.success, + "data": None, + "dataList": [item.model_dump() for item in result.dataList] if result.dataList else [] + } + # result = None + # with open("F:\Project\schoolNews\schoolNewsCrawler\output\output.json", "r", encoding="utf-8") as f: + # result = json.load(f) + # print(result) + # output = result + + + if output_file: + output_path = Path(output_file) + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(output, f, ensure_ascii=False, indent=2) + logger.info(f"结果已保存到: {output_file}") + + crawler.close() + sys.exit(0 if result.success else 1) + # print(json.dumps(output, ensure_ascii=False, indent=2)) + # sys.exit(0 if result["success"] else 1) + except Exception as e: + logger.error(f"执行失败: {str(e)}") + error_output = { + "code": 500, + "message": f"执行失败: {str(e)}", + "success": False, + "data": None, + "dataList": [] + } + print(json.dumps(error_output, ensure_ascii=False, indent=2)) + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/schoolNewsCrawler/crawler/xhw/XhwCrawler.py b/schoolNewsCrawler/crawler/xhw/XhwCrawler.py index 707d864..0b34d79 100644 --- a/schoolNewsCrawler/crawler/xhw/XhwCrawler.py +++ b/schoolNewsCrawler/crawler/xhw/XhwCrawler.py @@ -348,11 +348,7 @@ class XhwCrawler(BaseCrawler): title = head_div.find_element(By.CSS_SELECTOR, "h1").text news_item.title = title - - # 内容 - try: - article_div = self.driver.find_element(By.CSS_SELECTOR, "div.main.clearfix") - content_div = article_div.find_element(By.CSS_SELECTOR, "div#detail span#detailContent") + def parse_content(content_div): children = content_div.find_elements(By.XPATH, "./*") for child in children: try: @@ -401,6 +397,27 @@ class XhwCrawler(BaseCrawler): except Exception as e: logger.warning(f"解析段落失败: {e}") continue + # 内容 + try: + article_div = self.driver.find_element(By.CSS_SELECTOR, "div.main.clearfix") + content_div = article_div.find_element(By.CSS_SELECTOR, "div#detail span#detailContent") + parse_content(content_div) + + page_div = content_div.find_element(By.CSS_SELECTOR, "center.xinhuaPager") + page_urls = [] + if page_div: + page_as = page_div.find_elements(By.CSS_SELECTOR, "span#xinhuaPagerBox > a") + for page_a in page_as: + page_url = page_a.get_attribute("href") + if page_url and not page_url.startswith("http"): + page_url = self._normalize_url(page_url) + page_urls.append(page_url) + for page_url in page_urls: + self.driver.get(page_url) + time.sleep(2) + content_div = self.driver.find_element(By.CSS_SELECTOR, "div#detail span#detailContent") + parse_content(content_div) + except: logger.warning(f"新闻内容解析失败: {url}") diff --git a/schoolNewsCrawler/test.ipynb b/schoolNewsCrawler/test.ipynb index cd447f3..5a7ad3a 100644 --- a/schoolNewsCrawler/test.ipynb +++ b/schoolNewsCrawler/test.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 9, + "execution_count": 19, "id": "948be230", "metadata": {}, "outputs": [ @@ -41,7 +41,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 20, "id": "31a8a0dd", "metadata": {}, "outputs": [ @@ -49,9 +49,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2025-11-20 15:39:21.410\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.BaseCrawler\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m71\u001b[0m - \u001b[1m初始化爬虫: XhwCrawler\u001b[0m\n", - "\u001b[32m2025-11-20 15:39:22.502\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36m_init_driver\u001b[0m:\u001b[36m20\u001b[0m - \u001b[1mChrome浏览器初始化成功\u001b[0m\n", - "\u001b[32m2025-11-20 15:39:22.502\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36m_init_driver\u001b[0m:\u001b[36m24\u001b[0m - \u001b[1m访问主页获取初始Cookie\u001b[0m\n" + "\u001b[32m2025-11-20 16:06:16.802\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.BaseCrawler\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m71\u001b[0m - \u001b[1m初始化爬虫: XhwCrawler\u001b[0m\n", + "\u001b[32m2025-11-20 16:06:17.899\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36m_init_driver\u001b[0m:\u001b[36m20\u001b[0m - \u001b[1mChrome浏览器初始化成功\u001b[0m\n", + "\u001b[32m2025-11-20 16:06:17.900\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36m_init_driver\u001b[0m:\u001b[36m24\u001b[0m - \u001b[1m访问主页获取初始Cookie\u001b[0m\n" ] } ], @@ -61,7 +61,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 21, "id": "e5a6e91c", "metadata": {}, "outputs": [], @@ -76,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 22, "id": "7e0f56fa", "metadata": {}, "outputs": [], @@ -86,45 +86,35 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 26, "id": "47327ebf", "metadata": {}, - "outputs": [], - "source": [ - "#crawler.parse_xh_news_detail(\"https://www.news.cn/politics/leaders/20250224/5384be3d47c643b3a68e3bb724656152/c.html\")\n", - "# crawler.parse_xh_news_detail(\"https://www.news.cn/politics/leaders/20240207/2819fe60663140eab9599581dcae8c1e/c.html\") #视频" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "fa359d5b", - "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m2025-11-20 15:45:21.322\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36mcommend\u001b[0m:\u001b[36m44\u001b[0m - \u001b[1m轮播图新闻url: 5\u001b[0m\n", - "\u001b[32m2025-11-20 15:45:21.483\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36mcommend\u001b[0m:\u001b[36m54\u001b[0m - \u001b[1m聚焦新闻url: 21\u001b[0m\n", - "\u001b[32m2025-11-20 15:45:22.214\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36mcommend\u001b[0m:\u001b[36m44\u001b[0m - \u001b[1m轮播图新闻url: 7\u001b[0m\n", - "\u001b[32m2025-11-20 15:45:23.134\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36mcommend\u001b[0m:\u001b[36m54\u001b[0m - \u001b[1m聚焦新闻url: 124\u001b[0m\n", - "\u001b[32m2025-11-20 15:45:23.135\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcrawler.xhw.XhwCrawler\u001b[0m:\u001b[36mcommend\u001b[0m:\u001b[36m55\u001b[0m - \u001b[1m获取到新闻url:157\u001b[0m\n" - ] - }, { "data": { "text/plain": [ - "ResultDomain(code=0, message='', success=True, data=None, dataList=[])" + "NewsItem(title='全国精神文明建设表彰大会在京召开', contentRows=[{'tag': 'img', 'content': \"\"}, {'tag': 'p', 'content': '

\\u2003\\u20035月23日,全国精神文明建设表彰大会在北京召开。中央宣传思想文化工作领导小组决定,授予202个城市(区)全国文明城市(区)称号,授予3316个村镇全国文明村镇称号,授予4688个单位全国文明单位称号,授予798户家庭全国文明家庭称号,授予890所学校全国文明校园称号;授予60名(组)同志第九届全国道德模范荣誉称号,授予239名(组)同志第九届全国道德模范提名奖。

'}, {'tag': 'p', 'content': '

\\u2003\\u2003新华社记者 翟健岚 摄

'}, {'tag': 'img', 'content': \"\"}, {'tag': 'p', 'content': '

\\u2003\\u20035月23日,全国精神文明建设表彰大会在北京召开。中央宣传思想文化工作领导小组决定,授予202个城市(区)全国文明城市(区)称号,授予3316个村镇全国文明村镇称号,授予4688个单位全国文明单位称号,授予798户家庭全国文明家庭称号,授予890所学校全国文明校园称号;授予60名(组)同志第九届全国道德模范荣誉称号,授予239名(组)同志第九届全国道德模范提名奖。

'}, {'tag': 'p', 'content': '

\\u2003\\u2003新华社记者 翟健岚 摄

'}, {'tag': 'img', 'content': \"\"}, {'tag': 'p', 'content': '

\\u2003\\u20035月23日,全国精神文明建设表彰大会在北京召开。中央宣传思想文化工作领导小组决定,授予202个城市(区)全国文明城市(区)称号,授予3316个村镇全国文明村镇称号,授予4688个单位全国文明单位称号,授予798户家庭全国文明家庭称号,授予890所学校全国文明校园称号;授予60名(组)同志第九届全国道德模范荣誉称号,授予239名(组)同志第九届全国道德模范提名奖。

'}, {'tag': 'p', 'content': '

\\u2003\\u2003新华社记者 翟健岚 摄

'}, {'tag': 'img', 'content': \"\"}, {'tag': 'p', 'content': '

\\u2003\\u20035月23日,全国精神文明建设表彰大会在北京召开。中央宣传思想文化工作领导小组决定,授予202个城市(区)全国文明城市(区)称号,授予3316个村镇全国文明村镇称号,授予4688个单位全国文明单位称号,授予798户家庭全国文明家庭称号,授予890所学校全国文明校园称号;授予60名(组)同志第九届全国道德模范荣誉称号,授予239名(组)同志第九届全国道德模范提名奖。

'}, {'tag': 'p', 'content': '

\\u2003\\u2003新华社记者 翟健岚 摄

'}], url='https://www.news.cn/photo/20250523/9fc5e377b19047918dfe0eca2aad5c67/c.html', viewCount=None, publishTime='2025-05-23 18:33:10', author=None, source='新华网', category=None, executeStatus=0, executeMessage=None)" ] }, - "execution_count": 17, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "crawler.commend()" + "#crawler.parse_xh_news_detail(\"https://www.news.cn/politics/leaders/20250224/5384be3d47c643b3a68e3bb724656152/c.html\")\n", + "# crawler.parse_xh_news_detail(\"https://www.news.cn/politics/leaders/20240207/2819fe60663140eab9599581dcae8c1e/c.html\") #视频\n", + "crawler.parse_xh_news_detail(\"https://www.news.cn/photo/20250523/9fc5e377b19047918dfe0eca2aad5c67/c.html\") # 分页" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "fa359d5b", + "metadata": {}, + "outputs": [], + "source": [ + "# crawler.commend()" ] }, {