From 9f56f4fd2484445c43ab2a219f846f9d46a15da6 Mon Sep 17 00:00:00 2001 From: wangys <3401275564@qq.com> Date: Thu, 20 Nov 2025 15:22:26 +0800 Subject: [PATCH] =?UTF-8?q?=E6=96=B0=E5=8D=8E=E7=BD=91=E7=83=AD=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- schoolNewsCrawler/crawler/xhw/XhwCrawler.py | 31 +++++-- schoolNewsCrawler/crawler/xhw/XhwHotPoint.py | 88 ++++++++++++++++++++ schoolNewsCrawler/crawler/xhw/XhwSearch.py | 4 +- 3 files changed, 114 insertions(+), 9 deletions(-) create mode 100644 schoolNewsCrawler/crawler/xhw/XhwHotPoint.py diff --git a/schoolNewsCrawler/crawler/xhw/XhwCrawler.py b/schoolNewsCrawler/crawler/xhw/XhwCrawler.py index d9a8b0a..d391006 100644 --- a/schoolNewsCrawler/crawler/xhw/XhwCrawler.py +++ b/schoolNewsCrawler/crawler/xhw/XhwCrawler.py @@ -120,10 +120,8 @@ class XhwCrawler(BaseCrawler): # 访问主页获取初始Cookie logger.info("访问主页获取初始Cookie") - logger.info(f"准备访问URL: {self.config.base_url}") try: driver.get(self.config.base_url) - logger.info(f"成功访问URL: {self.config.base_url}") except Exception as e: logger.error(f"访问URL失败: {self.config.base_url}, 错误: {str(e)}") return None @@ -537,22 +535,41 @@ class XhwCrawler(BaseCrawler): resultDomain.success = False return resultDomain - + # 访问搜索页 + try: + self.driver.get(hot_point_config.url) + time.sleep(2) + except Exception as e: + logger.warning(f"访问搜索页失败: {hot_point_config.url}, {e}") + return resultDomain try: # 获取新闻url url_base_map = {} - - - + news_div = self.driver.find_element(By.CSS_SELECTOR, "section.wrapper > div.page-news.center-1200") + hot_news_div = news_div.find_element(By.CSS_SELECTOR, "div.page-news-l") + + news_items_div = hot_news_div.find_element(By.CSS_SELECTOR, "div.page-news-list") + news_items = news_items_div.find_elements(By.CSS_SELECTOR, "div.item") + for news in news_items: + a_tag = news.find_element(By.TAG_NAME, "a") + news_url = a_tag.get_attribute("href") + news_title = a_tag.text.strip() + url_base_map[news_url] = {"title": news_title} + news_urls.append(news_url) + + # 从新闻url中获取新闻详情 + count = 0 for news_url in news_urls: try: news = self.parse_news_detail(news_url) if news: news.title = url_base_map.get(news_url, {}).get("title") or news.title - news.publishTime = url_base_map.get(news_url, {}).get("date") or news.publishTime news_list.append(news) + count += 1 + if count >= 5: + break except Exception as e: logger.warning(f"解析新闻失败: {news_url}, {e}") continue diff --git a/schoolNewsCrawler/crawler/xhw/XhwHotPoint.py b/schoolNewsCrawler/crawler/xhw/XhwHotPoint.py new file mode 100644 index 0000000..0216a20 --- /dev/null +++ b/schoolNewsCrawler/crawler/xhw/XhwHotPoint.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +新华网搜索爬虫命令行工具 +用法: python RmrbSearch.py --key "关键词" --total 10 --type 0 +""" + +import argparse +import json +import sys +from pathlib import Path +import time +# Add project root directory to path to import crawler +sys.path.insert(0, str(Path(__file__).parent.parent.parent)) + +from crawler.xhw.XhwCrawler import XhwCrawler +from loguru import logger + + +def main(): + """主函数""" + parser = argparse.ArgumentParser( + description='新华网新闻搜索工具', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" + """ + ) + + + parser.add_argument( + '--output', '-o', + type=str, + help='输出文件路径' + ) + + args = parser.parse_args() + + + output_file = args.output + + logger.info("使用直接参数模式") + + + try: + crawler = XhwCrawler() + time.sleep(5) + result = crawler.hot_point() + # print(result) + output = { + "code": result.code, + "message": result.message, + "success": result.success, + "data": None, + "dataList": [item.model_dump() for item in result.dataList] if result.dataList else [] + } + # result = None + # with open("F:\Project\schoolNews\schoolNewsCrawler\output\output.json", "r", encoding="utf-8") as f: + # result = json.load(f) + # print(result) + # output = result + + + if output_file: + output_path = Path(output_file) + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(output, f, ensure_ascii=False, indent=2) + logger.info(f"结果已保存到: {output_file}") + + crawler.close() + sys.exit(0 if result.success else 1) + # print(json.dumps(output, ensure_ascii=False, indent=2)) + # sys.exit(0 if result["success"] else 1) + except Exception as e: + logger.error(f"执行失败: {str(e)}") + error_output = { + "code": 500, + "message": f"执行失败: {str(e)}", + "success": False, + "data": None, + "dataList": [] + } + print(json.dumps(error_output, ensure_ascii=False, indent=2)) + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/schoolNewsCrawler/crawler/xhw/XhwSearch.py b/schoolNewsCrawler/crawler/xhw/XhwSearch.py index c1e8de2..27900df 100644 --- a/schoolNewsCrawler/crawler/xhw/XhwSearch.py +++ b/schoolNewsCrawler/crawler/xhw/XhwSearch.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- """ -人民日报搜索爬虫命令行工具 +新华网搜索爬虫命令行工具 用法: python RmrbSearch.py --key "关键词" --total 10 --type 0 """ @@ -20,7 +20,7 @@ from loguru import logger def main(): """主函数""" parser = argparse.ArgumentParser( - description='人民日报新闻搜索工具', + description='新华网新闻搜索工具', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" """