新华网热点
This commit is contained in:
@@ -120,10 +120,8 @@ class XhwCrawler(BaseCrawler):
|
|||||||
|
|
||||||
# 访问主页获取初始Cookie
|
# 访问主页获取初始Cookie
|
||||||
logger.info("访问主页获取初始Cookie")
|
logger.info("访问主页获取初始Cookie")
|
||||||
logger.info(f"准备访问URL: {self.config.base_url}")
|
|
||||||
try:
|
try:
|
||||||
driver.get(self.config.base_url)
|
driver.get(self.config.base_url)
|
||||||
logger.info(f"成功访问URL: {self.config.base_url}")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"访问URL失败: {self.config.base_url}, 错误: {str(e)}")
|
logger.error(f"访问URL失败: {self.config.base_url}, 错误: {str(e)}")
|
||||||
return None
|
return None
|
||||||
@@ -537,22 +535,41 @@ class XhwCrawler(BaseCrawler):
|
|||||||
resultDomain.success = False
|
resultDomain.success = False
|
||||||
return resultDomain
|
return resultDomain
|
||||||
|
|
||||||
|
# 访问搜索页
|
||||||
|
try:
|
||||||
|
self.driver.get(hot_point_config.url)
|
||||||
|
time.sleep(2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"访问搜索页失败: {hot_point_config.url}, {e}")
|
||||||
|
return resultDomain
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 获取新闻url
|
# 获取新闻url
|
||||||
url_base_map = {}
|
url_base_map = {}
|
||||||
|
news_div = self.driver.find_element(By.CSS_SELECTOR, "section.wrapper > div.page-news.center-1200")
|
||||||
|
hot_news_div = news_div.find_element(By.CSS_SELECTOR, "div.page-news-l")
|
||||||
|
|
||||||
|
news_items_div = hot_news_div.find_element(By.CSS_SELECTOR, "div.page-news-list")
|
||||||
|
news_items = news_items_div.find_elements(By.CSS_SELECTOR, "div.item")
|
||||||
|
for news in news_items:
|
||||||
|
a_tag = news.find_element(By.TAG_NAME, "a")
|
||||||
|
news_url = a_tag.get_attribute("href")
|
||||||
|
news_title = a_tag.text.strip()
|
||||||
|
url_base_map[news_url] = {"title": news_title}
|
||||||
|
news_urls.append(news_url)
|
||||||
|
|
||||||
|
|
||||||
# 从新闻url中获取新闻详情
|
# 从新闻url中获取新闻详情
|
||||||
|
count = 0
|
||||||
for news_url in news_urls:
|
for news_url in news_urls:
|
||||||
try:
|
try:
|
||||||
news = self.parse_news_detail(news_url)
|
news = self.parse_news_detail(news_url)
|
||||||
if news:
|
if news:
|
||||||
news.title = url_base_map.get(news_url, {}).get("title") or news.title
|
news.title = url_base_map.get(news_url, {}).get("title") or news.title
|
||||||
news.publishTime = url_base_map.get(news_url, {}).get("date") or news.publishTime
|
|
||||||
news_list.append(news)
|
news_list.append(news)
|
||||||
|
count += 1
|
||||||
|
if count >= 5:
|
||||||
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"解析新闻失败: {news_url}, {e}")
|
logger.warning(f"解析新闻失败: {news_url}, {e}")
|
||||||
continue
|
continue
|
||||||
|
|||||||
88
schoolNewsCrawler/crawler/xhw/XhwHotPoint.py
Normal file
88
schoolNewsCrawler/crawler/xhw/XhwHotPoint.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
新华网搜索爬虫命令行工具
|
||||||
|
用法: python RmrbSearch.py --key "关键词" --total 10 --type 0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
import time
|
||||||
|
# Add project root directory to path to import crawler
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
|
from crawler.xhw.XhwCrawler import XhwCrawler
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""主函数"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='新华网新闻搜索工具',
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
epilog="""
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--output', '-o',
|
||||||
|
type=str,
|
||||||
|
help='输出文件路径'
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
output_file = args.output
|
||||||
|
|
||||||
|
logger.info("使用直接参数模式")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
crawler = XhwCrawler()
|
||||||
|
time.sleep(5)
|
||||||
|
result = crawler.hot_point()
|
||||||
|
# print(result)
|
||||||
|
output = {
|
||||||
|
"code": result.code,
|
||||||
|
"message": result.message,
|
||||||
|
"success": result.success,
|
||||||
|
"data": None,
|
||||||
|
"dataList": [item.model_dump() for item in result.dataList] if result.dataList else []
|
||||||
|
}
|
||||||
|
# result = None
|
||||||
|
# with open("F:\Project\schoolNews\schoolNewsCrawler\output\output.json", "r", encoding="utf-8") as f:
|
||||||
|
# result = json.load(f)
|
||||||
|
# print(result)
|
||||||
|
# output = result
|
||||||
|
|
||||||
|
|
||||||
|
if output_file:
|
||||||
|
output_path = Path(output_file)
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(output_path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(output, f, ensure_ascii=False, indent=2)
|
||||||
|
logger.info(f"结果已保存到: {output_file}")
|
||||||
|
|
||||||
|
crawler.close()
|
||||||
|
sys.exit(0 if result.success else 1)
|
||||||
|
# print(json.dumps(output, ensure_ascii=False, indent=2))
|
||||||
|
# sys.exit(0 if result["success"] else 1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"执行失败: {str(e)}")
|
||||||
|
error_output = {
|
||||||
|
"code": 500,
|
||||||
|
"message": f"执行失败: {str(e)}",
|
||||||
|
"success": False,
|
||||||
|
"data": None,
|
||||||
|
"dataList": []
|
||||||
|
}
|
||||||
|
print(json.dumps(error_output, ensure_ascii=False, indent=2))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
人民日报搜索爬虫命令行工具
|
新华网搜索爬虫命令行工具
|
||||||
用法: python RmrbSearch.py --key "关键词" --total 10 --type 0
|
用法: python RmrbSearch.py --key "关键词" --total 10 --type 0
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ from loguru import logger
|
|||||||
def main():
|
def main():
|
||||||
"""主函数"""
|
"""主函数"""
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='人民日报新闻搜索工具',
|
description='新华网新闻搜索工具',
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
epilog="""
|
epilog="""
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user