搜索关键字爬虫

This commit is contained in:
2025-11-12 16:10:34 +08:00
parent 7be02fe396
commit 675e6da7d7
37 changed files with 3382 additions and 572 deletions

View File

@@ -25,7 +25,8 @@ def main():
epilog="""
示例:
python RmrbSearch.py --key "教育改革" --total 20
python RmrbSearch.py -k "科技创新" -t 15 -n 1
python RmrbSearch.py -k "科技创新" -t 15 --type 1
python RmrbSearch.py --key "AI" --total 5 --output "out.json"
新闻类型说明:
0 - 所有类型 (默认)
@@ -38,53 +39,72 @@ def main():
)
parser.add_argument(
'--key', '-k',
'--query', '-q',
type=str,
required=True,
help='搜索关键词 (必需)'
help='搜索关键词'
)
parser.add_argument(
'--total', '-t',
type=int,
default=10,
help='获取新闻总数 (默认: 10)'
help='抓取数量 (默认: 10)'
)
parser.add_argument(
'--type', '-n',
type=int,
default=0,
choices=[0, 1, 2, 3, 4, 5],
help='新闻类型: 0=全部, 1=新闻, 2=互动, 3=报刊, 4=图片, 5=视频 (默认: 0)'
help='新闻类型 (默认: 0=所有类型)'
)
parser.add_argument(
'--output', '-o',
type=str,
help='输出文件路径'
)
args = parser.parse_args()
# 获取参数
key = args.query
total = args.total
news_type = args.type
output_file = args.output
logger.info("使用直接参数模式")
# 关键校验key 必须存在
if not key or not key.strip():
parser.error("搜索关键词不能为空!")
try:
# 创建爬虫实例
logger.info(f"开始搜索: 关键词='{args.key}', 数量={args.total}, 类型={args.type}")
logger.info(f"开始搜索: 关键词='{key}', 数量={total}, 类型={news_type}")
crawler = RmrbCrawler()
# result = crawler.search(key=key.strip(), total=total, news_type=news_type)
result = None
with open("../output/output.json", "r", encoding="utf-8") as f:
result = json.load(f)
# 执行搜索
result = crawler.search(key=args.key, total=args.total, news_type=args.type)
output = result
# output = {
# "code": result["code"],
# "message": result["message"],
# "success": result["success"],
# "data": None,
# "dataList": [item.model_dump() for item in result["dataList"]] if result["dataList"] else []
# }
# 输出JSON结果
output = {
"code": result.code,
"message": result.message,
"success": result.success,
"data": None,
"dataList": [item.dict() for item in result.dataList] if result.dataList else []
}
if output_file:
output_path = Path(output_file)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False, indent=2)
logger.info(f"结果已保存到: {output_file}")
print(json.dumps(output, ensure_ascii=False, indent=2))
# 关闭爬虫
crawler.close()
# 退出码: 成功=0, 失败=1
sys.exit(0 if result.success else 1)
sys.exit(0 if result["success"] else 1)
except Exception as e:
logger.error(f"执行失败: {str(e)}")
@@ -100,4 +120,4 @@ def main():
if __name__ == "__main__":
main()
main()