from typing import List, Dict, Any import json class ReportIntegrator: """医疗报告整合分析器""" def __init__(self, llm_service): self.llm_service = llm_service def integrate_reports(self, reports: List[Dict[str, Any]]) -> Dict[str, Any]: """整合多份医疗报告""" if len(reports) == 1: return self._single_report_summary(reports[0]) # 构建整合分析的提示词 prompt = self._build_integration_prompt(reports) # 调用LLM进行整合分析 if self.llm_service.llm_type == "openai": result = self._call_openai_integration(prompt) elif self.llm_service.llm_type == "ollama": result = self._call_ollama_integration(prompt) else: result = self._mock_integration(reports) # 添加报告列表 result["reports_included"] = [ {"filename": report["filename"], "summary": report["analysis"].get("summary", "无摘要")} for report in reports ] return result def _build_integration_prompt(self, reports: List[Dict[str, Any]]) -> str: """构建整合分析提示词""" report_details = [] for i, report in enumerate(reports, 1): analysis = report["analysis"] report_details.append(f"【报告{i}: {report['filename']}】\n摘要: {analysis.get('summary', '无')}") prompt = f"""你是专业医疗分析专家。整合以下{len(reports)}份报告,提供综合评估。 {chr(10).join(report_details)} 请以JSON格式返回: {{"overall_summary": "整体摘要", "health_trends": ["趋势"], "priority_concerns": [{{"concern": "关注点", "severity": "低/中/高", "description": "描述"}}], "comprehensive_assessment": "综合评估", "integrated_recommendations": ["建议"], "follow_up_suggestions": ["后续建议"]}}""" return prompt def _call_openai_integration(self, prompt: str) -> Dict[str, Any]: """调用OpenAI进行整合分析""" try: response = self.llm_service.client.chat.completions.create( model=self.llm_service.model, messages=[{"role": "system", "content": "你是医疗分析专家。"}, {"role": "user", "content": prompt}], temperature=0.7, max_tokens=3000 ) content = response.choices[0].message.content return self.llm_service._parse_llm_response(content) except Exception as e: return self._create_error_result(f"OpenAI分析失败: {str(e)}") def _call_ollama_integration(self, prompt: str) -> Dict[str, Any]: """调用Ollama进行整合分析""" try: import requests response = requests.post(f"{self.llm_service.ollama_host}/api/generate", json={"model": self.llm_service.model, "prompt": prompt, "stream": False}, timeout=90) if response.status_code == 200: return self.llm_service._parse_llm_response(response.json().get("response", "")) raise Exception(f"Ollama错误: {response.status_code}") except Exception as e: return self._create_error_result(f"Ollama分析失败: {str(e)}") def _mock_integration(self, reports: List[Dict[str, Any]]) -> Dict[str, Any]: """模拟整合分析结果""" total_abnormal = sum(len(report["analysis"].get("abnormal_items", [])) for report in reports) return { "overall_summary": f"综合分析了{len(reports)}份报告,发现{total_abnormal}项异常指标。整体健康状况良好。", "health_trends": ["各项指标整体稳定", "未发现明显恶化趋势", "建议持续监测"], "priority_concerns": [{"concern": "定期体检", "severity": "低", "description": "建议保持定期体检"}] if total_abnormal == 0 else [{"concern": "异常指标", "severity": "中", "description": f"发现{total_abnormal}项异常"}], "comprehensive_assessment": "整体健康状况可控,建议关注生活方式、定期复查。", "integrated_recommendations": ["保持均衡饮食", "坚持适量运动", "保证充足睡眠", "定期体检"], "follow_up_suggestions": ["3-6个月后复查关键指标", "如有不适及时就医", "保持健康记录"], "note": "这是模拟结果。实际使用请配置OpenAI或Ollama。" } def _single_report_summary(self, report: Dict[str, Any]) -> Dict[str, Any]: """单个报告摘要""" analysis = report["analysis"] return { "overall_summary": f"单份报告分析:{analysis.get('summary', '无摘要')}", "reports_included": [{"filename": report["filename"], "summary": analysis.get("summary", "无")}], "health_trends": analysis.get("key_findings", []), "priority_concerns": [{"concern": item, "severity": "中", "description": "需关注"} for item in analysis.get("abnormal_items", [])[:3]], "comprehensive_assessment": analysis.get("risk_assessment", "请查看详细分析"), "integrated_recommendations": analysis.get("recommendations", []), "follow_up_suggestions": ["定期复查", "咨询医生"] } def _create_error_result(self, error_msg: str) -> Dict[str, Any]: """创建错误结果""" return { "error": error_msg, "overall_summary": "分析失败", "health_trends": [], "priority_concerns": [], "comprehensive_assessment": "无法完成分析", "integrated_recommendations": [], "follow_up_suggestions": [] }