#!/usr/bin/env python # -*- coding: utf-8 -*- """ 人民日报搜索爬虫命令行工具 用法: python RmrbSearch.py --key "关键词" --total 10 --type 0 """ import argparse import json import sys from pathlib import Path # Add parent directory to path to import crawler sys.path.insert(0, str(Path(__file__).parent.parent)) from crawler.RmrbCrawler import RmrbCrawler from loguru import logger def main(): """主函数""" parser = argparse.ArgumentParser( description='人民日报新闻搜索工具', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" 示例: python RmrbSearch.py --key "教育改革" --total 20 python RmrbSearch.py -k "科技创新" -t 15 -n 1 新闻类型说明: 0 - 所有类型 (默认) 1 - 新闻 2 - 互动 3 - 报刊 4 - 图片 5 - 视频 """ ) parser.add_argument( '--key', '-k', type=str, required=True, help='搜索关键词 (必需)' ) parser.add_argument( '--total', '-t', type=int, default=10, help='获取新闻总数 (默认: 10)' ) parser.add_argument( '--type', '-n', type=int, default=0, choices=[0, 1, 2, 3, 4, 5], help='新闻类型: 0=全部, 1=新闻, 2=互动, 3=报刊, 4=图片, 5=视频 (默认: 0)' ) args = parser.parse_args() try: # 创建爬虫实例 logger.info(f"开始搜索: 关键词='{args.key}', 数量={args.total}, 类型={args.type}") crawler = RmrbCrawler() # 执行搜索 result = crawler.search(key=args.key, total=args.total, news_type=args.type) # 输出JSON结果 output = { "code": result.code, "message": result.message, "success": result.success, "data": None, "dataList": [item.dict() for item in result.dataList] if result.dataList else [] } print(json.dumps(output, ensure_ascii=False, indent=2)) # 关闭爬虫 crawler.close() # 退出码: 成功=0, 失败=1 sys.exit(0 if result.success else 1) except Exception as e: logger.error(f"执行失败: {str(e)}") error_output = { "code": 500, "message": f"执行失败: {str(e)}", "success": False, "data": None, "dataList": [] } print(json.dumps(error_output, ensure_ascii=False, indent=2)) sys.exit(1) if __name__ == "__main__": main()