This commit is contained in:
2026-03-02 17:12:17 +08:00
parent b30af4aff8
commit 843146cdd7
2489 changed files with 7434 additions and 61841 deletions

View File

@@ -1,10 +0,0 @@
{
"permissions": {
"allow": [
"Bash(ls:*)",
"Bash(tree:*)",
"Bash(find:*)",
"Bash(dir:*)"
]
}
}

View File

@@ -1,46 +0,0 @@
# Docker 构建时排除的目录和文件
# 排除数据卷目录PostgreSQL 等容器创建的数据)
**/volumes/
**/data/
docker/**/volumes/
# 排除日志
**/logs/
*.log
# 排除临时文件
*.tmp
*.swp
.tmp/
# 排除 git
.git/
**/.git/
# 排除 IDE 配置
.idea/
.vscode/
**/.vscode/
# 排除环境文件
.env
.env.local
**/.env.local
# 排除 node_modules前端构建时会重新安装
**/node_modules/
# 排除构建产物(保留 JAR 文件)
**/build/
# 排除 target 下的非 JAR 文件
**/target/classes/
**/target/generated-sources/
**/target/generated-test-sources/
**/target/maven-archiver/
**/target/maven-status/
**/target/test-classes/
**/target/*.original
# 排除导出的镜像
docker/urbanLifeline/images/

210
.gitignore vendored
View File

@@ -1,207 +1,7 @@
# ---> Java
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
.claude
.idea
.kiro
.tmp
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
replay_pid*
.trae
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
江西城市生命线-可交互原型/frontend/node_modules/*
THAI-Platform/*
urbanLifelineWeb/packages/wechat_demo/*
urbanLifelineWeb/packages/workcase_wechat/unpackage/*
docs/AI训练资料
tellhow-app2/
**/*.difypkg

5
.idea/.gitignore generated vendored
View File

@@ -1,5 +0,0 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/

6
.idea/misc.xml generated
View File

@@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>

8
.idea/modules.xml generated
View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/urbanLifeline.iml" filepath="$PROJECT_DIR$/.idea/urbanLifeline.iml" />
</modules>
</component>
</project>

View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

9
.idea/vcs.xml generated
View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
<mapping directory="$PROJECT_DIR$/ai-management-dify" vcs="Git" />
<mapping directory="$PROJECT_DIR$/ai-management-platform" vcs="Git" />
<mapping directory="$PROJECT_DIR$/pigx-ai-app" vcs="Git" />
</component>
</project>

View File

@@ -1,140 +0,0 @@
# UrbanLifeline 到 Pigx 迁移文档总览
## 📋 文档清单
本目录包含了 UrbanLifeline 系统迁移到 Pigx 平台所需的全部技术文档。
### 核心文档
| 文档名称 | 文件路径 | 说明 |
|---------|---------|------|
| **需求文档** | [requirements.md](./requirements.md) | 定义迁移需求,明确创建 pigx-dify 模块 |
| **设计文档** | [design.md](./design.md) | 技术设计方案,包含 pigx-dify 模块架构 |
| **任务清单** | [tasks.md](./tasks.md) | 详细的迁移任务列表,包含 AI 模块迁移步骤 |
### 技术指南
| 文档名称 | 文件路径 | 用途 |
|---------|---------|------|
| **权限映射表** | [permission-mapping.md](./permission-mapping.md) | 权限标识从 urbanLifeline 到 pigx 的映射对照 |
| **权限注解转换指南** | [permission-annotation-guide.md](./permission-annotation-guide.md) | @PreAuthorize@pms.hasPermission 的转换方法 |
| **用户服务配置指南** | [security-config-guide.md](./security-config-guide.md) | SecurityUtils 和 RemoteUserService 的使用说明 |
| **数据库迁移脚本** | [database-migration-script.md](./database-migration-script.md) | PostgreSQL 到 MySQL 的完整迁移 SQL |
| **租户隔离指南** | [tenant-isolation-guide.md](./tenant-isolation-guide.md) | 多租户字段添加和隔离实现方案 |
| **Dify模块架构** | [pigx-dify-architecture.md](./pigx-dify-architecture.md) | 新建 pigx-dify 模块的详细架构设计 |
## 🎯 迁移要点总结
### 1. 核心变更
- ✅ 创建独立的 **pigx-dify** 模块承载 AI 功能
- ✅ 保留原有 Dify API 集成方式
- ✅ 权限体系完全适配 pigx@pms.hasPermission
- ✅ 所有业务表添加 tenant_id 实现多租户隔离
- ✅ 数据库从 PostgreSQL 迁移到 MySQL
### 2. 模块分布
| 模块 | 目标位置 | 状态 |
|------|---------|------|
| 工单 (workcase) | pigx-app-server-biz | 待迁移 |
| 招标 (bidding) | pigx-app-server-biz | 待迁移 |
| 平台管理 (platform) | pigx-app-server-biz | 待迁移 |
| AI功能 (ai) | **pigx-dify新建** | 待迁移 |
| 消息 (message) | pigx-app-server-biz | 待迁移 |
### 3. 关键技术适配
#### 权限转换
```java
// 原系统
@PreAuthorize("hasAuthority('workcase:ticket:create')")
// 新系统
@PreAuthorize("@pms.hasPermission('workcase_ticket_add')")
```
#### 用户信息获取
```java
// 原系统
JwtUtils.getUserId()
// 新系统
SecurityUtils.getUser().getId()
```
#### 响应格式
```java
// 原系统
ResultDomain.success(data)
// 新系统
R.ok(data)
```
## 📝 使用指南
### 第一步:理解需求和设计
1. 阅读 [requirements.md](./requirements.md) 了解迁移需求
2. 阅读 [design.md](./design.md) 理解技术方案
3. 查看 [pigx-dify-architecture.md](./pigx-dify-architecture.md) 了解 AI 模块设计
### 第二步:准备迁移
1. 使用 [tasks.md](./tasks.md) 作为任务清单
2. 参考 [permission-mapping.md](./permission-mapping.md) 准备权限映射
3. 阅读 [database-migration-script.md](./database-migration-script.md) 准备数据库
### 第三步:执行迁移
1. 按照 [permission-annotation-guide.md](./permission-annotation-guide.md) 转换权限注解
2. 根据 [security-config-guide.md](./security-config-guide.md) 配置用户服务
3. 使用 [tenant-isolation-guide.md](./tenant-isolation-guide.md) 实现租户隔离
### 第四步:验证测试
1. 验证权限控制正确性
2. 测试租户数据隔离
3. 确认 Dify 集成正常
4. 检查所有功能模块
## 🔧 工具和脚本
### 批量权限转换
```bash
# 权限注解批量替换
find . -name "*.java" -exec sed -i \
's/@PreAuthorize("hasAuthority(\x27\([^:]*\):\([^:]*\):\([^x27]*\)\x27)")/@PreAuthorize("@pms.hasPermission(\x27\1_\2_\3\x27)")/g' {} \;
```
### 数据库迁移
```sql
-- 执行顺序
1. 创建 MySQL 数据库结构
2. 添加 tenant_id 字段
3. 迁移业务数据
4. 建立用户映射关系
5. 验证数据完整性
```
## 📊 迁移进度跟踪
使用 [tasks.md](./tasks.md) 中的任务清单跟踪进度:
- [ ] 基础设施准备
- [ ] 权限体系迁移
- [ ] 数据库迁移
- [ ] 后端代码迁移
- [ ] 前端页面迁移
- [ ] 集成测试
- [ ] 上线部署
## 🚨 重要提醒
1. **数据备份**:迁移前必须完整备份所有数据
2. **权限测试**:每个模块迁移后都要测试权限控制
3. **租户隔离**:确保所有查询都包含租户条件
4. **Dify配置**:保存好 Dify API Key 和配置信息
5. **回滚方案**:准备好回滚脚本和流程
## 📞 支持与反馈
如有问题,请参考相应的技术指南文档,或联系技术支持团队。
---
*最后更新时间2024年*

View File

@@ -1,712 +0,0 @@
# PostgreSQL to MySQL 数据库迁移脚本
## 概述
本文档包含从 urbanLifeline (PostgreSQL) 到 pigx (MySQL) 的数据库迁移脚本。
## 迁移策略
1. PostgreSQL Schema → MySQL Database 或表前缀
2. 所有业务表添加 `tenant_id` 字段
3. 用户ID关联到 pigx 的 sys_user 表
4. 数据类型映射和语法适配
## 类型映射规则
| PostgreSQL | MySQL | 说明 |
|-----------|-------|------|
| SERIAL | INT AUTO_INCREMENT | 自增整数 |
| BIGSERIAL | BIGINT AUTO_INCREMENT | 自增大整数 |
| VARCHAR(n) | VARCHAR(n) | 可变长字符串 |
| TEXT | TEXT | 长文本 |
| TIMESTAMPTZ | DATETIME | 时间戳 |
| BOOLEAN | TINYINT(1) | 布尔值 |
| DECIMAL(m,n) | DECIMAL(m,n) | 十进制数 |
| INTEGER | INT | 整数 |
| BIGINT | BIGINT | 大整数 |
| JSONB | JSON | JSON数据 |
| VARCHAR(50)[] | JSON | 数组转JSON |
| TEXT[] | JSON | 文本数组转JSON |
## 1. 工单模块 (Workcase)
### 1.1 来客表(系统外部人员)
```sql
-- PostgreSQL 原表: sys.tb_guest
-- MySQL 目标表: tb_guest
CREATE TABLE IF NOT EXISTS `tb_guest` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`user_id` varchar(50) NOT NULL COMMENT '来客ID',
`name` varchar(50) NOT NULL COMMENT '姓名',
`phone` varchar(50) DEFAULT NULL COMMENT '电话',
`email` varchar(50) DEFAULT NULL COMMENT '邮箱',
`wechat_id` varchar(50) DEFAULT NULL COMMENT '微信号',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`user_id`),
UNIQUE KEY `uk_wechat_id` (`wechat_id`),
UNIQUE KEY `uk_phone` (`phone`),
UNIQUE KEY `uk_email` (`email`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='系统外部人员表';
### 1.2 聊天室表
CREATE TABLE IF NOT EXISTS `tb_chat_room` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`workcase_id` varchar(50) DEFAULT NULL COMMENT '关联工单ID',
`room_name` varchar(200) NOT NULL COMMENT '聊天室名称',
`room_type` varchar(20) NOT NULL DEFAULT 'workcase' COMMENT '聊天室类型',
`status` varchar(20) NOT NULL DEFAULT 'active' COMMENT '状态active-活跃 closed-已关闭 archived-已归档',
`guest_id` varchar(50) NOT NULL COMMENT '来客ID创建者',
`guest_name` varchar(100) NOT NULL COMMENT '来客姓名',
`ai_session_id` varchar(50) DEFAULT NULL COMMENT 'AI对话会话ID',
`message_count` int NOT NULL DEFAULT 0 COMMENT '消息总数',
`device_code` varchar(50) NOT NULL COMMENT '设备代码',
`last_message_time` datetime DEFAULT NULL COMMENT '最后消息时间',
`last_message` text DEFAULT NULL COMMENT '最后一条消息内容',
`comment_level` int DEFAULT 0 COMMENT '服务评分1-5',
`closed_by` varchar(50) DEFAULT NULL COMMENT '关闭人',
`closed_time` datetime DEFAULT NULL COMMENT '关闭时间',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`room_id`),
UNIQUE KEY `uk_workcase_id` (`workcase_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_guest_status` (`guest_id`, `status`),
KEY `idx_last_message_time` (`last_message_time` DESC),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='IM聊天室表一个工单对应一个聊天室';
### 1.3 聊天室成员表
CREATE TABLE IF NOT EXISTS `tb_chat_room_member` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`member_id` varchar(50) NOT NULL COMMENT '成员记录ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`user_id` varchar(50) NOT NULL COMMENT '用户ID来客ID或员工ID',
`user_type` varchar(20) NOT NULL COMMENT '用户类型guest-来客 staff-客服 ai-AI助手',
`user_name` varchar(100) NOT NULL COMMENT '用户名称',
`status` varchar(20) NOT NULL DEFAULT 'active' COMMENT '状态active-活跃 left-已离开 removed-被移除',
`unread_count` int NOT NULL DEFAULT 0 COMMENT '该成员的未读消息数',
`last_read_time` datetime DEFAULT NULL COMMENT '最后阅读时间',
`last_read_msg_id` varchar(50) DEFAULT NULL COMMENT '最后阅读的消息ID',
`join_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '加入时间',
`leave_time` datetime DEFAULT NULL COMMENT '离开时间',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`member_id`),
UNIQUE KEY `uk_room_user` (`room_id`, `user_id`),
KEY `idx_room_status` (`room_id`, `status`),
KEY `idx_user_status` (`user_id`, `user_type`, `status`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='聊天室成员表,记录来客和客服人员';
### 1.4 聊天室消息表
CREATE TABLE IF NOT EXISTS `tb_chat_room_message` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`sender_id` varchar(50) NOT NULL COMMENT '发送者ID',
`sender_type` varchar(20) NOT NULL COMMENT '发送者类型guest-来客 agent-客服 ai-AI助手 system-系统消息',
`sender_name` varchar(100) NOT NULL COMMENT '发送者名称',
`message_type` varchar(20) NOT NULL DEFAULT 'text' COMMENT '消息类型text-文本 image-图片 file-文件 voice-语音 video-视频',
`content` text NOT NULL COMMENT '消息内容',
`files` json DEFAULT NULL COMMENT '附件文件ID数组',
`content_extra` json DEFAULT NULL COMMENT '扩展内容',
`reply_to_msg_id` varchar(50) DEFAULT NULL COMMENT '回复的消息ID',
`is_ai_message` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否AI消息',
`ai_message_id` varchar(50) DEFAULT NULL COMMENT 'AI原始消息ID',
`status` varchar(20) NOT NULL DEFAULT 'sent' COMMENT '状态sent-已发送 delivered-已送达 read-已读 failed-失败',
`read_count` int NOT NULL DEFAULT 0 COMMENT '已读人数',
`send_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '发送时间',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`message_id`),
KEY `idx_room_time` (`room_id`, `send_time` DESC),
KEY `idx_sender` (`sender_id`, `sender_type`),
KEY `idx_ai_message` (`ai_message_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='IM聊天消息表包含AI对话和人工客服消息';
### 1.5 聊天室总结表
CREATE TABLE IF NOT EXISTS `tb_chat_room_summary` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`summary_id` varchar(50) NOT NULL COMMENT '总结ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`question` text DEFAULT NULL COMMENT '核心问题',
`needs` json DEFAULT NULL COMMENT '核心诉求数组',
`answer` text DEFAULT NULL COMMENT '解决方案',
`workcloud` json DEFAULT NULL COMMENT '词云关键词数组',
`message_count` int DEFAULT 0 COMMENT '参与总结的消息数量',
`summary_time` datetime DEFAULT NULL COMMENT '总结生成时间',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`summary_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_room_time` (`room_id`, `summary_time` DESC),
KEY `idx_summary_time` (`summary_time` DESC),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='聊天室总结表保存AI生成的聊天总结分析';
### 1.6 视频会议表
CREATE TABLE IF NOT EXISTS `tb_video_meeting` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`meeting_id` varchar(50) NOT NULL COMMENT '会议ID也是Jitsi房间名',
`room_id` varchar(50) NOT NULL COMMENT '关联聊天室ID',
`workcase_id` varchar(50) NOT NULL COMMENT '关联工单ID',
`meeting_name` varchar(200) NOT NULL COMMENT '会议名称',
`meeting_password` varchar(50) DEFAULT NULL COMMENT '会议密码',
`description` varchar(500) DEFAULT NULL COMMENT '会议描述',
`jwt_token` text DEFAULT NULL COMMENT 'JWT Token',
`jitsi_room_name` varchar(200) NOT NULL COMMENT 'Jitsi房间名',
`jitsi_server_url` varchar(500) NOT NULL DEFAULT 'https://meet.jit.si' COMMENT 'Jitsi服务器地址',
`status` varchar(20) NOT NULL DEFAULT 'scheduled' COMMENT '状态',
`creator_type` varchar(20) NOT NULL COMMENT '创建者类型',
`creator_name` varchar(100) NOT NULL COMMENT '创建者名称',
`participant_count` int NOT NULL DEFAULT 0 COMMENT '参与人数',
`max_participants` int DEFAULT 10 COMMENT '最大参与人数',
`start_time` datetime NOT NULL COMMENT '会议开始时间',
`end_time` datetime NOT NULL COMMENT '会议结束时间',
`advance` int DEFAULT 5 COMMENT '提前入会时间(分钟)',
`actual_start_time` datetime DEFAULT NULL COMMENT '实际开始时间',
`actual_end_time` datetime DEFAULT NULL COMMENT '实际结束时间',
`duration_seconds` int DEFAULT 0 COMMENT '会议时长(秒)',
`iframe_url` text DEFAULT NULL COMMENT 'iframe嵌入URL',
`config` json DEFAULT NULL COMMENT 'Jitsi配置项',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`meeting_id`),
UNIQUE KEY `uk_jitsi_room_name` (`jitsi_room_name`),
KEY `idx_room_status` (`room_id`, `status`),
KEY `idx_workcase_status` (`workcase_id`, `status`),
KEY `idx_create_time` (`create_time` DESC),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='Jitsi Meet视频会议表';
### 1.7 客服人员配置表
CREATE TABLE IF NOT EXISTS `tb_customer_service` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`user_id` varchar(50) NOT NULL COMMENT '员工ID关联sys用户ID',
`username` varchar(100) NOT NULL COMMENT '员工姓名',
`user_code` varchar(50) DEFAULT NULL COMMENT '员工工号',
`status` varchar(20) NOT NULL DEFAULT 'offline' COMMENT '状态online-在线 busy-忙碌 offline-离线',
`skill_tags` json DEFAULT NULL COMMENT '技能标签',
`max_concurrent` int NOT NULL DEFAULT 5 COMMENT '最大并发接待数',
`avg_response_time` int DEFAULT NULL COMMENT '平均响应时间(秒)',
`satisfaction_score` decimal(3,2) DEFAULT NULL COMMENT '满意度评分0-5',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`user_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='客服人员配置表';
### 1.8 工单表
CREATE TABLE IF NOT EXISTS `tb_workcase` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`workcase_id` varchar(50) NOT NULL COMMENT '工单ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`user_id` varchar(50) NOT NULL COMMENT '来客ID',
`username` varchar(200) NOT NULL COMMENT '来客姓名',
`phone` varchar(20) NOT NULL COMMENT '来客电话',
`type` varchar(50) NOT NULL COMMENT '故障类型',
`device` varchar(50) DEFAULT NULL COMMENT '设备名称',
`device_code` varchar(50) DEFAULT NULL COMMENT '设备代码',
`device_name_plate` varchar(50) DEFAULT NULL COMMENT '设备名称牌',
`device_name_plate_img` varchar(50) NOT NULL COMMENT '设备名称牌图片',
`address` varchar(1000) DEFAULT NULL COMMENT '现场地址',
`description` varchar(1000) DEFAULT NULL COMMENT '故障描述',
`imgs` json DEFAULT NULL COMMENT '工单图片id数组',
`emergency` varchar(50) NOT NULL DEFAULT 'normal' COMMENT '紧急程度 normal-普通 emergency-紧急',
`status` varchar(50) NOT NULL DEFAULT 'pending' COMMENT '状态 pending-待处理 processing-处理中 done-已完成',
`processor` varchar(50) DEFAULT NULL COMMENT '处理人',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`workcase_id`),
UNIQUE KEY `uk_room_id` (`room_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='工单表';
### 1.9 工单处理过程表
CREATE TABLE IF NOT EXISTS `tb_workcase_process` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`workcase_id` varchar(50) NOT NULL COMMENT '工单ID',
`process_id` varchar(50) NOT NULL COMMENT '过程id',
`action` varchar(50) NOT NULL COMMENT '动作 info:记录,assign:指派,redeploy:转派,repeal:撤销,finish:完成',
`message` varchar(200) DEFAULT NULL COMMENT '消息',
`files` json DEFAULT NULL COMMENT '携带文件',
`processor` varchar(50) DEFAULT NULL COMMENT '处理人',
`remark` varchar(500) DEFAULT NULL COMMENT '备注',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '过程发起人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`process_id`),
KEY `idx_workcase_id` (`workcase_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='工单处理过程表';
### 1.10 工单设备文件表
CREATE TABLE IF NOT EXISTS `tb_workcase_device` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`workcase_id` varchar(50) NOT NULL COMMENT '工单ID',
`device` varchar(50) NOT NULL COMMENT '设备名称',
`device_code` varchar(50) DEFAULT NULL COMMENT '设备代码',
`file_id` varchar(50) NOT NULL COMMENT '文件id',
`file_name` varchar(50) NOT NULL COMMENT '文件名',
`file_root_id` varchar(50) DEFAULT NULL COMMENT '文件根id',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
PRIMARY KEY(`workcase_id`, `file_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='工单设备文件表';
### 1.11 词云统计表
CREATE TABLE IF NOT EXISTS `tb_word_cloud` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`word_id` varchar(50) NOT NULL COMMENT '词条ID',
`word` varchar(100) NOT NULL COMMENT '词语',
`frequency` int NOT NULL DEFAULT 1 COMMENT '词频',
`source_type` varchar(20) NOT NULL COMMENT '来源类型 chat-聊天 workcase-工单 global-全局',
`source_id` varchar(50) DEFAULT NULL COMMENT '来源ID',
`category` varchar(50) DEFAULT NULL COMMENT '分类',
`stat_date` date NOT NULL COMMENT '统计日期',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`word_id`),
UNIQUE KEY `uk_word_source` (`word`, `source_type`, `source_id`, `stat_date`, `category`),
KEY `idx_source` (`source_type`, `source_id`, `stat_date`),
KEY `idx_category` (`category`, `stat_date`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='词云统计表';
```
## 2. AI模块 (Dify)
### 2.1 智能体配置表
```sql
CREATE TABLE IF NOT EXISTS `tb_agent` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`agent_id` varchar(50) NOT NULL COMMENT '智能体ID',
`name` varchar(50) NOT NULL COMMENT '智能体名称',
`description` varchar(500) DEFAULT NULL COMMENT '智能体描述',
`link` varchar(500) DEFAULT NULL COMMENT '智能体url',
`api_key` varchar(500) NOT NULL COMMENT 'dify智能体APIKEY',
`is_outer` tinyint(1) DEFAULT 0 COMMENT '是否是对外智能体,未登录可用',
`introduce` varchar(500) NOT NULL COMMENT '引导词',
`prompt_cards` json DEFAULT NULL COMMENT '提示卡片数组',
`category` varchar(50) NOT NULL COMMENT '分类',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) DEFAULT NULL COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`agent_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
UNIQUE KEY `uk_api_key` (`api_key`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI智能体配置表';
### 2.2 AI对话表
CREATE TABLE IF NOT EXISTS `tb_chat` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`chat_id` varchar(50) NOT NULL COMMENT '对话ID',
`agent_id` varchar(50) NOT NULL COMMENT '智能体ID',
`user_id` varchar(50) NOT NULL COMMENT '用户ID',
`user_type` tinyint(1) NOT NULL DEFAULT 1 COMMENT '用户类型 1-系统内部人员 0-系统外部人员',
`title` varchar(500) NOT NULL COMMENT '对话标题',
`channel` varchar(50) DEFAULT 'agent' COMMENT '对话渠道 agent、wechat',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`chat_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_agent_id` (`agent_id`),
KEY `idx_user_id` (`user_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI对话表';
### 2.3 AI对话消息表
CREATE TABLE IF NOT EXISTS `tb_chat_message` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`dify_message_id` varchar(100) DEFAULT NULL COMMENT 'Dify消息ID',
`chat_id` varchar(50) NOT NULL COMMENT '对话ID',
`role` varchar(50) NOT NULL COMMENT '角色user-用户/ai-智能体/recipient-来客',
`content` text NOT NULL COMMENT '消息内容',
`files` json DEFAULT NULL COMMENT '文件id数组',
`comment` varchar(50) DEFAULT NULL COMMENT '评价',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`message_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_chat_id` (`chat_id`),
KEY `idx_dify_message_id` (`dify_message_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI对话消息表';
### 2.4 知识库配置表
CREATE TABLE IF NOT EXISTS `tb_knowledge` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`knowledge_id` varchar(50) NOT NULL COMMENT '知识库ID',
`title` varchar(255) NOT NULL COMMENT '知识库标题',
`avatar` varchar(255) DEFAULT NULL COMMENT '知识库头像',
`description` varchar(500) DEFAULT NULL COMMENT '知识库描述',
`dify_dataset_id` varchar(100) DEFAULT NULL COMMENT 'Dify知识库ID',
`dify_indexing_technique` varchar(50) DEFAULT 'high_quality' COMMENT 'Dify索引方式',
`embedding_model` varchar(100) DEFAULT NULL COMMENT '向量模型名称',
`embedding_model_provider` varchar(100) DEFAULT NULL COMMENT '向量模型提供商',
`rerank_model` varchar(100) DEFAULT NULL COMMENT 'Rerank模型名称',
`rerank_model_provider` varchar(100) DEFAULT NULL COMMENT 'Rerank模型提供商',
`reranking_enable` tinyint(1) DEFAULT 0 COMMENT '是否启用Rerank',
`retrieval_top_k` int DEFAULT 2 COMMENT '检索Top K',
`retrieval_score_threshold` decimal(3,2) DEFAULT 0.00 COMMENT '检索分数阈值',
`document_count` int DEFAULT 0 COMMENT '文档数量',
`total_chunks` int DEFAULT 0 COMMENT '总分段数',
`service` varchar(50) DEFAULT NULL COMMENT '所属服务 workcase、bidding',
`project_id` varchar(50) DEFAULT NULL COMMENT 'bidding所属项目ID',
`category` varchar(50) DEFAULT NULL COMMENT '所属分类',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建者',
`dept_path` varchar(50) DEFAULT NULL COMMENT '创建者部门路径',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_knowledge_id` (`knowledge_id`),
UNIQUE KEY `uk_dify_dataset_id` (`dify_dataset_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库配置表';
### 2.5 知识库文件表
CREATE TABLE IF NOT EXISTS `tb_knowledge_file` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`knowledge_id` varchar(50) NOT NULL COMMENT '知识库ID',
`file_root_id` varchar(50) NOT NULL COMMENT '文件根ID',
`file_id` varchar(50) NOT NULL COMMENT '文件ID',
`dify_file_id` varchar(50) NOT NULL COMMENT 'dify文件ID',
`version` int NOT NULL DEFAULT 1 COMMENT '文件版本',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_knowledge_file` (`knowledge_id`, `file_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库文件表';
### 2.6 知识库文件日志表
CREATE TABLE IF NOT EXISTS `tb_knowledge_file_log` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`log_id` varchar(50) NOT NULL COMMENT '日志ID',
`knowledge_id` varchar(50) NOT NULL COMMENT '知识库ID',
`file_root_id` varchar(50) NOT NULL COMMENT '文件根ID',
`file_id` varchar(50) NOT NULL COMMENT '文件ID',
`file_name` varchar(100) NOT NULL COMMENT '文件名',
`service` varchar(50) NOT NULL COMMENT '所属服务 workcase、bidding',
`version` int NOT NULL DEFAULT 1 COMMENT '文件版本',
`action` varchar(50) NOT NULL COMMENT '操作类型 upload、update、delete',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建者',
`creator_name` varchar(100) NOT NULL COMMENT '创建者姓名',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_knowledge_file_log` (`knowledge_id`, `file_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库文件日志表';
```
## 3. 招标模块 (Bidding)
### 3.1 招标项目表
```sql
CREATE TABLE IF NOT EXISTS `tb_bidding_project` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`project_id` varchar(50) NOT NULL COMMENT '项目ID',
`project_no` varchar(100) NOT NULL COMMENT '项目编号',
`project_name` varchar(500) NOT NULL COMMENT '项目名称',
`project_type` varchar(50) NOT NULL COMMENT '项目类型',
`industry` varchar(100) DEFAULT NULL COMMENT '所属行业',
`source_platform` varchar(100) DEFAULT NULL COMMENT '来源平台',
`source_url` varchar(500) DEFAULT NULL COMMENT '来源URL',
`publish_date` datetime DEFAULT NULL COMMENT '发布日期',
`deadline` datetime DEFAULT NULL COMMENT '投标截止日期',
`opening_date` datetime DEFAULT NULL COMMENT '开标日期',
`budget_amount` decimal(18,2) DEFAULT NULL COMMENT '预算金额',
`currency` varchar(10) DEFAULT 'CNY' COMMENT '货币单位',
`project_status` varchar(30) NOT NULL DEFAULT 'collecting' COMMENT '项目状态',
`winning_status` varchar(30) DEFAULT NULL COMMENT '中标状态',
`winning_amount` decimal(18,2) DEFAULT NULL COMMENT '中标金额',
`client_name` varchar(255) DEFAULT NULL COMMENT '客户名称',
`client_contact` varchar(100) DEFAULT NULL COMMENT '客户联系方式',
`contact_person` varchar(100) DEFAULT NULL COMMENT '联系人',
`project_location` varchar(500) DEFAULT NULL COMMENT '项目地点',
`description` text DEFAULT NULL COMMENT '项目描述',
`keywords` json DEFAULT NULL COMMENT '关键词数组',
`metadata` json DEFAULT NULL COMMENT '项目元数据',
`dept_path` varchar(255) DEFAULT NULL COMMENT '部门全路径',
`responsible_user` varchar(50) DEFAULT NULL COMMENT '负责人',
`team_members` json DEFAULT NULL COMMENT '团队成员数组',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) DEFAULT NULL COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`project_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
UNIQUE KEY `uk_project_no` (`project_no`),
KEY `idx_project_status` (`project_status`),
KEY `idx_deadline` (`deadline`),
KEY `idx_dept_path` (`dept_path`),
KEY `idx_responsible_user` (`responsible_user`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='招标项目表';
### 3.2 招标文件表
CREATE TABLE IF NOT EXISTS `tb_bidding_document` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`doc_id` varchar(50) NOT NULL COMMENT '文档ID',
`project_id` varchar(50) NOT NULL COMMENT '所属项目ID',
`doc_type` varchar(50) NOT NULL COMMENT '文档类型',
`doc_name` varchar(500) NOT NULL COMMENT '文档名称',
`file_id` varchar(50) DEFAULT NULL COMMENT '关联文件表ID',
`file_path` varchar(500) DEFAULT NULL COMMENT '文件路径',
`file_size` bigint DEFAULT NULL COMMENT '文件大小',
`mime_type` varchar(100) DEFAULT NULL COMMENT 'MIME类型',
`version` int DEFAULT 1 COMMENT '版本号',
`language` varchar(20) DEFAULT 'zh-CN' COMMENT '语言',
`page_count` int DEFAULT NULL COMMENT '页数',
`parse_status` varchar(30) DEFAULT 'pending' COMMENT '解析状态',
`parse_result` json DEFAULT NULL COMMENT '解析结果',
`extraction_data` json DEFAULT NULL COMMENT '提取的结构化数据',
`ai_analysis` text DEFAULT NULL COMMENT 'AI分析结果',
`upload_date` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '上传日期',
`dept_path` varchar(255) DEFAULT NULL COMMENT '部门全路径',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) DEFAULT NULL COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`doc_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_project_id` (`project_id`),
KEY `idx_doc_type` (`doc_type`),
KEY `idx_tenant_id` (`tenant_id`),
CONSTRAINT `fk_bidding_document_project` FOREIGN KEY (`project_id`) REFERENCES `tb_bidding_project`(`project_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='招标文件表';
```
## 4. 消息模块 (Message)
### 4.1 消息表
```sql
CREATE TABLE IF NOT EXISTS `tb_message` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`title` varchar(255) NOT NULL COMMENT '消息标题',
`content` varchar(255) NOT NULL COMMENT '消息内容',
`type` varchar(50) NOT NULL COMMENT '消息类型',
`status` varchar(50) NOT NULL COMMENT '消息状态',
`service` varchar(50) NOT NULL COMMENT '服务类型',
`dept_path` varchar(255) DEFAULT NULL COMMENT '部门全路径',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL DEFAULT 'system' COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`message_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='消息表';
### 4.2 消息发送范围表
CREATE TABLE IF NOT EXISTS `tb_message_range` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`target_type` varchar(20) NOT NULL COMMENT '目标类型user/dept/role/all',
`target_id` varchar(50) DEFAULT NULL COMMENT '目标ID',
`channel` varchar(20) NOT NULL DEFAULT 'app' COMMENT '发送渠道',
`dept_path` varchar(255) DEFAULT NULL COMMENT '部门全路径',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`creator` varchar(50) NOT NULL DEFAULT 'system' COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT 0 COMMENT '是否删除',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_message_target` (`message_id`, `target_type`, `target_id`, `channel`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='消息发送范围定义表';
### 4.3 消息接收记录表
CREATE TABLE IF NOT EXISTS `tb_message_receiver` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`user_id` varchar(50) NOT NULL COMMENT '用户ID',
`channel` varchar(20) DEFAULT 'app' COMMENT '接收渠道',
`status` varchar(20) NOT NULL DEFAULT 'unread' COMMENT '状态',
`read_time` datetime DEFAULT NULL COMMENT '阅读时间',
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_message_user` (`message_id`, `user_id`, `channel`),
KEY `idx_user_status` (`user_id`, `status`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='用户消息接收记录表';
```
## 5. 平台管理模块 (Platform)
```sql
-- 平台管理的表通常比较简单,主要是配置和日志
-- 根据实际需求补充具体表结构
```
## 6. 数据迁移脚本
### 6.1 用户数据映射
```sql
-- 创建用户映射表(临时)
CREATE TABLE IF NOT EXISTS `temp_user_mapping` (
`old_user_id` varchar(50) NOT NULL COMMENT '原系统用户ID',
`new_user_id` bigint NOT NULL COMMENT 'pigx系统用户ID',
`user_type` varchar(20) NOT NULL COMMENT '用户类型',
PRIMARY KEY (`old_user_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='用户ID映射表临时';
-- 插入映射数据(示例)
-- INSERT INTO temp_user_mapping (old_user_id, new_user_id, user_type)
-- SELECT old_id, new_id, 'staff' FROM ...;
```
### 6.2 数据迁移存储过程(示例)
```sql
DELIMITER $$
CREATE PROCEDURE migrate_workcase_data()
BEGIN
DECLARE done INT DEFAULT FALSE;
DECLARE v_old_user_id VARCHAR(50);
DECLARE v_new_user_id BIGINT;
DECLARE cur CURSOR FOR
SELECT old_user_id, new_user_id FROM temp_user_mapping;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;
-- 开始事务
START TRANSACTION;
-- 迁移工单数据
INSERT INTO tb_workcase (
optsn, workcase_id, room_id, user_id, username, phone,
type, device, device_code, device_name_plate, device_name_plate_img,
address, description, imgs, emergency, status, processor,
tenant_id, creator, create_time, update_time, delete_time, deleted
)
SELECT
optsn, workcase_id, room_id,
COALESCE(m.new_user_id, w.user_id) as user_id, -- 映射用户ID
username, phone,
type, device, device_code, device_name_plate, device_name_plate_img,
address, description,
CASE WHEN imgs IS NULL THEN NULL ELSE JSON_ARRAY(imgs) END, -- 数组转JSON
emergency, status, processor,
1 as tenant_id, -- 默认租户ID
creator, create_time, update_time, delete_time, deleted
FROM postgresql_workcase.tb_workcase w
LEFT JOIN temp_user_mapping m ON w.user_id = m.old_user_id;
COMMIT;
END$$
DELIMITER ;
```
## 7. 索引优化建议
```sql
-- 为查询性能添加复合索引
ALTER TABLE tb_workcase ADD INDEX idx_status_tenant (status, tenant_id);
ALTER TABLE tb_chat_room ADD INDEX idx_status_tenant (status, tenant_id);
ALTER TABLE tb_chat_message ADD INDEX idx_chat_tenant (chat_id, tenant_id);
ALTER TABLE tb_agent ADD INDEX idx_category_tenant (category, tenant_id);
```
## 8. 注意事项
1. **租户隔离**:所有业务表都添加了 `tenant_id` 字段,默认值为 1
2. **用户关联**需要建立原系统用户ID到pigx用户ID的映射关系
3. **数组处理**PostgreSQL的数组类型转换为MySQL的JSON类型
4. **时区处理**PostgreSQL的TIMESTAMPTZ转换为MySQL的DATETIME注意时区转换
5. **外键约束**:根据实际需求决定是否保留外键约束
6. **数据完整性**:迁移前做好数据备份,迁移后进行数据验证
## 9. 迁移后验证
```sql
-- 验证数据条数
SELECT 'tb_workcase' as table_name, COUNT(*) as record_count FROM tb_workcase
UNION ALL
SELECT 'tb_chat_room', COUNT(*) FROM tb_chat_room
UNION ALL
SELECT 'tb_agent', COUNT(*) FROM tb_agent
UNION ALL
SELECT 'tb_chat', COUNT(*) FROM tb_chat
UNION ALL
SELECT 'tb_knowledge', COUNT(*) FROM tb_knowledge;
-- 验证租户隔离
SELECT tenant_id, COUNT(*) as count
FROM tb_workcase
GROUP BY tenant_id;
-- 验证用户关联
SELECT COUNT(*) as unmapped_users
FROM tb_workcase w
LEFT JOIN sys_user u ON w.user_id = u.user_id
WHERE u.user_id IS NULL;
```

View File

@@ -1,571 +0,0 @@
# 数据库迁移脚本指南PostgreSQL → MySQL
## 1. 概述
本文档提供了将 urbanLifeline 数据库从 PostgreSQL 迁移到 MySQLpigx 平台)的完整脚本和指南。
## 2. 数据类型映射
| PostgreSQL | MySQL | 说明 |
|-----------|-------|------|
| VARCHAR(n) | VARCHAR(n) | 字符串 |
| TEXT | TEXT | 长文本 |
| INTEGER | INT | 整数 |
| BIGINT | BIGINT | 长整数 |
| BOOLEAN | TINYINT(1) | 布尔值 |
| TIMESTAMPTZ | DATETIME | 时间戳 |
| JSONB | JSON | JSON数据 |
| NUMERIC(p,s) | DECIMAL(p,s) | 小数 |
| VARCHAR(n)[] | JSON | 数组转JSON |
| SERIAL | INT AUTO_INCREMENT | 自增 |
## 3. pigx-dify 模块数据库脚本
### 3.1 智能体表 (tb_agent)
```sql
-- 智能体配置表
CREATE TABLE `tb_agent` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`agent_id` varchar(50) NOT NULL COMMENT '智能体ID',
`name` varchar(50) NOT NULL COMMENT '智能体名称',
`description` varchar(500) DEFAULT NULL COMMENT '智能体描述',
`link` varchar(500) DEFAULT NULL COMMENT '智能体url',
`api_key` varchar(500) NOT NULL COMMENT 'dify智能体APIKEY',
`is_outer` tinyint(1) DEFAULT '0' COMMENT '是否是对外智能体,未登录可用',
`introduce` varchar(500) NOT NULL COMMENT '引导词',
`prompt_cards` json DEFAULT NULL COMMENT '提示卡片数组 [{file_id:, prompt:}]',
`category` varchar(50) NOT NULL COMMENT '分类',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`creator` varchar(50) DEFAULT NULL COMMENT '创建者',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`agent_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
UNIQUE KEY `uk_api_key` (`api_key`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI智能体配置表';
```
### 3.2 对话表 (tb_chat)
```sql
-- AI智能体对话表
CREATE TABLE `tb_chat` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`chat_id` varchar(50) NOT NULL COMMENT '对话ID',
`agent_id` varchar(50) NOT NULL COMMENT '智能体ID',
`user_id` varchar(50) NOT NULL COMMENT '用户ID',
`user_type` tinyint(1) NOT NULL DEFAULT '1' COMMENT '用户类型 1-系统内部人员 0-系统外部人员',
`title` varchar(500) NOT NULL COMMENT '对话标题',
`channel` varchar(50) DEFAULT 'agent' COMMENT '对话渠道 agent、wechat',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`chat_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_agent_id` (`agent_id`),
KEY `idx_user_id` (`user_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI智能体对话表';
```
### 3.3 聊天消息表 (tb_chat_message)
```sql
-- AI智能体对话消息表
CREATE TABLE `tb_chat_message` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`dify_message_id` varchar(100) DEFAULT NULL COMMENT 'Dify消息ID',
`chat_id` varchar(50) NOT NULL COMMENT '对话ID',
`role` varchar(50) NOT NULL COMMENT '角色user-用户/ai-智能体/recipient-来客',
`content` text NOT NULL COMMENT '消息内容',
`files` json DEFAULT NULL COMMENT '文件id数组',
`comment` varchar(50) DEFAULT NULL COMMENT '评价',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`message_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_chat_id` (`chat_id`),
KEY `idx_dify_message_id` (`dify_message_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='AI智能体对话消息表';
```
### 3.4 知识库表 (tb_knowledge)
```sql
-- 知识库配置表
CREATE TABLE `tb_knowledge` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`knowledge_id` varchar(50) NOT NULL COMMENT '知识库ID',
`title` varchar(255) NOT NULL COMMENT '知识库标题',
`avatar` varchar(255) DEFAULT NULL COMMENT '知识库头像',
`description` varchar(500) DEFAULT NULL COMMENT '知识库描述',
`dify_dataset_id` varchar(100) DEFAULT NULL COMMENT 'Dify知识库IDDataset ID',
`dify_indexing_technique` varchar(50) DEFAULT 'high_quality' COMMENT 'Dify索引方式high_quality/economy',
`embedding_model` varchar(100) DEFAULT NULL COMMENT '向量模型名称',
`embedding_model_provider` varchar(100) DEFAULT NULL COMMENT '向量模型提供商',
`rerank_model` varchar(100) DEFAULT NULL COMMENT 'Rerank模型名称',
`rerank_model_provider` varchar(100) DEFAULT NULL COMMENT 'Rerank模型提供商',
`reranking_enable` tinyint(1) DEFAULT '0' COMMENT '是否启用Rerank',
`retrieval_top_k` int DEFAULT '2' COMMENT '检索Top K返回前K个结果',
`retrieval_score_threshold` decimal(3,2) DEFAULT '0.00' COMMENT '检索分数阈值0.00-1.00',
`document_count` int DEFAULT '0' COMMENT '文档数量',
`total_chunks` int DEFAULT '0' COMMENT '总分段数',
`service` varchar(50) DEFAULT NULL COMMENT '所属服务 workcase、bidding',
`project_id` varchar(50) DEFAULT NULL COMMENT 'bidding所属项目ID',
`category` varchar(50) DEFAULT NULL COMMENT '所属分类',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建者用户ID',
`dept_path` varchar(50) DEFAULT NULL COMMENT '创建者部门路径',
`updater` varchar(50) DEFAULT NULL COMMENT '更新者',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_knowledge_id` (`knowledge_id`),
UNIQUE KEY `uk_dify_dataset_id` (`dify_dataset_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库配置表';
```
### 3.5 知识库文件表 (tb_knowledge_file)
```sql
-- 知识库文件表
CREATE TABLE `tb_knowledge_file` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`knowledge_id` varchar(50) NOT NULL COMMENT '知识库ID',
`file_root_id` varchar(50) NOT NULL COMMENT '文件根ID',
`file_id` varchar(50) NOT NULL COMMENT '文件ID',
`dify_file_id` varchar(50) NOT NULL COMMENT 'dify文件ID',
`version` int NOT NULL DEFAULT '1' COMMENT '文件版本',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`optsn`),
UNIQUE KEY `uk_knowledge_file` (`knowledge_id`, `file_id`),
KEY `idx_file_root_id` (`file_root_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库文件表';
```
## 4. pigx-app-server 模块数据库脚本
### 4.1 工单表 (tb_workcase)
```sql
-- 工单表
CREATE TABLE `tb_workcase` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`workcase_id` varchar(50) NOT NULL COMMENT '工单ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`user_id` varchar(50) NOT NULL COMMENT '来客ID',
`username` varchar(200) NOT NULL COMMENT '来客姓名',
`phone` varchar(20) NOT NULL COMMENT '来客电话',
`type` varchar(50) NOT NULL COMMENT '故障类型',
`device` varchar(50) DEFAULT NULL COMMENT '设备名称',
`device_code` varchar(50) DEFAULT NULL COMMENT '设备代码',
`device_name_plate` varchar(50) DEFAULT NULL COMMENT '设备名称牌',
`device_name_plate_img` varchar(50) NOT NULL COMMENT '设备名称牌图片',
`address` varchar(1000) DEFAULT NULL COMMENT '现场地址',
`description` varchar(1000) DEFAULT NULL COMMENT '故障描述',
`imgs` json DEFAULT NULL COMMENT '工单图片id数组',
`emergency` varchar(50) NOT NULL DEFAULT 'normal' COMMENT '紧急程度 normal-普通 emergency-紧急',
`status` varchar(50) NOT NULL DEFAULT 'pending' COMMENT '状态 pending-待处理 processing-处理中 done-已完成',
`processor` varchar(50) DEFAULT NULL COMMENT '处理人',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`workcase_id`),
UNIQUE KEY `uk_room_id` (`room_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_user_id` (`user_id`),
KEY `idx_status` (`status`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='工单表';
```
### 4.2 聊天室表 (tb_chat_room)
```sql
-- IM聊天室表
CREATE TABLE `tb_chat_room` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`workcase_id` varchar(50) DEFAULT NULL COMMENT '关联工单ID',
`room_name` varchar(200) NOT NULL COMMENT '聊天室名称',
`room_type` varchar(20) NOT NULL DEFAULT 'workcase' COMMENT '聊天室类型workcase-工单客服',
`status` varchar(20) NOT NULL DEFAULT 'active' COMMENT '状态active-活跃 closed-已关闭 archived-已归档',
`guest_id` varchar(50) NOT NULL COMMENT '来客ID创建者',
`guest_name` varchar(100) NOT NULL COMMENT '来客姓名',
`ai_session_id` varchar(50) DEFAULT NULL COMMENT 'AI对话会话ID从ai.tb_chat同步',
`message_count` int NOT NULL DEFAULT '0' COMMENT '消息总数',
`device_code` varchar(50) NOT NULL COMMENT '设备代码',
`last_message_time` datetime DEFAULT NULL COMMENT '最后消息时间',
`last_message` text DEFAULT NULL COMMENT '最后一条消息内容',
`comment_level` int DEFAULT '0' COMMENT '服务评分1-5',
`closed_by` varchar(50) DEFAULT NULL COMMENT '关闭人',
`closed_time` datetime DEFAULT NULL COMMENT '关闭时间',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`room_id`),
UNIQUE KEY `uk_workcase_id` (`workcase_id`),
UNIQUE KEY `uk_optsn` (`optsn`),
KEY `idx_guest` (`guest_id`, `status`),
KEY `idx_last_message_time` (`last_message_time`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='IM聊天室表一个工单对应一个聊天室';
```
### 4.3 聊天室成员表 (tb_chat_room_member)
```sql
-- 聊天室成员表
CREATE TABLE `tb_chat_room_member` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`member_id` varchar(50) NOT NULL COMMENT '成员记录ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`user_id` varchar(50) NOT NULL COMMENT '用户ID来客ID或员工ID',
`user_type` varchar(20) NOT NULL COMMENT '用户类型guest-来客 staff-客服 ai-AI助手',
`user_name` varchar(100) NOT NULL COMMENT '用户名称',
`status` varchar(20) NOT NULL DEFAULT 'active' COMMENT '状态active-活跃 left-已离开 removed-被移除',
`unread_count` int NOT NULL DEFAULT '0' COMMENT '未读消息数',
`last_read_time` datetime DEFAULT NULL COMMENT '最后阅读时间',
`last_read_msg_id` varchar(50) DEFAULT NULL COMMENT '最后阅读的消息ID',
`join_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '加入时间',
`leave_time` datetime DEFAULT NULL COMMENT '离开时间',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`member_id`),
UNIQUE KEY `uk_room_user` (`room_id`, `user_id`),
KEY `idx_room_status` (`room_id`, `status`),
KEY `idx_user` (`user_id`, `user_type`, `status`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='聊天室成员表';
```
### 4.4 聊天室消息表 (tb_chat_room_message)
```sql
-- 聊天室消息表
CREATE TABLE `tb_chat_room_message` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`message_id` varchar(50) NOT NULL COMMENT '消息ID',
`room_id` varchar(50) NOT NULL COMMENT '聊天室ID',
`sender_id` varchar(50) NOT NULL COMMENT '发送者ID',
`sender_type` varchar(20) NOT NULL COMMENT '发送者类型guest-来客 agent-客服 ai-AI助手 system-系统消息',
`sender_name` varchar(100) NOT NULL COMMENT '发送者名称',
`message_type` varchar(20) NOT NULL DEFAULT 'text' COMMENT '消息类型',
`content` text NOT NULL COMMENT '消息内容',
`files` json DEFAULT NULL COMMENT '附件文件ID数组',
`content_extra` json DEFAULT NULL COMMENT '扩展内容(会议链接、引用信息等)',
`reply_to_msg_id` varchar(50) DEFAULT NULL COMMENT '回复的消息ID',
`is_ai_message` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否AI消息',
`ai_message_id` varchar(50) DEFAULT NULL COMMENT 'AI原始消息ID',
`status` varchar(20) NOT NULL DEFAULT 'sent' COMMENT '状态',
`read_count` int NOT NULL DEFAULT '0' COMMENT '已读人数',
`send_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '发送时间',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`message_id`),
KEY `idx_room_time` (`room_id`, `send_time`),
KEY `idx_sender` (`sender_id`, `sender_type`),
KEY `idx_ai_message` (`ai_message_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='IM聊天消息表';
```
### 4.5 视频会议表 (tb_video_meeting)
```sql
-- Jitsi Meet视频会议表
CREATE TABLE `tb_video_meeting` (
`optsn` varchar(50) NOT NULL COMMENT '流水号',
`meeting_id` varchar(50) NOT NULL COMMENT '会议ID也是Jitsi房间名',
`room_id` varchar(50) NOT NULL COMMENT '关联聊天室ID',
`workcase_id` varchar(50) NOT NULL COMMENT '关联工单ID',
`meeting_name` varchar(200) NOT NULL COMMENT '会议名称',
`meeting_password` varchar(50) DEFAULT NULL COMMENT '会议密码',
`description` varchar(500) DEFAULT NULL COMMENT '会议描述',
`jwt_token` text DEFAULT NULL COMMENT 'JWT Token用于身份验证',
`jitsi_room_name` varchar(200) NOT NULL COMMENT 'Jitsi房间名',
`jitsi_server_url` varchar(500) NOT NULL DEFAULT 'https://meet.jit.si' COMMENT 'Jitsi服务器地址',
`status` varchar(20) NOT NULL DEFAULT 'scheduled' COMMENT '状态',
`creator_type` varchar(20) NOT NULL COMMENT '创建者类型',
`creator_name` varchar(100) NOT NULL COMMENT '创建者名称',
`participant_count` int NOT NULL DEFAULT '0' COMMENT '参与人数',
`max_participants` int DEFAULT '10' COMMENT '最大参与人数',
`start_time` datetime NOT NULL COMMENT '定义会议开始时间',
`end_time` datetime NOT NULL COMMENT '定义会议结束时间',
`advance` int DEFAULT '5' COMMENT '提前入会时间(分钟)',
`actual_start_time` datetime DEFAULT NULL COMMENT '真正会议开始时间',
`actual_end_time` datetime DEFAULT NULL COMMENT '真正会议结束时间',
`duration_seconds` int DEFAULT '0' COMMENT '会议时长(秒)',
`iframe_url` text DEFAULT NULL COMMENT 'iframe嵌入URL',
`config` json DEFAULT NULL COMMENT 'Jitsi配置项',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`creator` varchar(50) NOT NULL COMMENT '创建人',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`meeting_id`),
UNIQUE KEY `uk_jitsi_room_name` (`jitsi_room_name`),
KEY `idx_room` (`room_id`, `status`),
KEY `idx_workcase` (`workcase_id`, `status`),
KEY `idx_create_time` (`create_time`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='Jitsi Meet视频会议表';
```
## 5. 数据迁移步骤
### 5.1 准备工作
```bash
# 1. 导出 PostgreSQL 数据
pg_dump -h localhost -p 5432 -U postgres -d urbanlifeline \
--data-only \
--column-inserts \
--no-owner \
--no-privileges \
--no-tablespaces \
> urbanlifeline_data.sql
# 2. 创建 MySQL 数据库
mysql -u root -p -e "CREATE DATABASE pigx_dify DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;"
```
### 5.2 执行表结构创建
```bash
# 执行所有建表语句
mysql -u root -p pigx_dify < create_tables.sql
```
### 5.3 数据转换脚本
```python
#!/usr/bin/env python3
# convert_data.py - PostgreSQL 到 MySQL 数据转换脚本
import re
import json
def convert_postgresql_to_mysql(input_file, output_file):
with open(input_file, 'r', encoding='utf-8') as f:
content = f.read()
# 替换 PostgreSQL 特有语法
replacements = [
# 布尔值转换
(r'\btrue\b', '1'),
(r'\bfalse\b', '0'),
# 时间戳转换
(r"now\(\)", "CURRENT_TIMESTAMP"),
# 数组转JSON
(r"'{([^}]*)}'::\w+\[\]", lambda m: f"'{json.dumps(m.group(1).split(',') if m.group(1) else [])}'"),
# Schema 去除
(r'\b(workcase|ai|sys|message|bidding)\\.', ''),
]
for pattern, replacement in replacements:
if callable(replacement):
content = re.sub(pattern, replacement, content)
else:
content = re.sub(pattern, replacement, content, flags=re.IGNORECASE)
# 添加租户ID到每个INSERT语句
content = add_tenant_id(content)
with open(output_file, 'w', encoding='utf-8') as f:
f.write(content)
def add_tenant_id(content):
"""为所有INSERT语句添加tenant_id字段"""
lines = content.split('\n')
result = []
for line in lines:
if line.startswith('INSERT INTO'):
# 检查表是否需要tenant_id
if any(table in line for table in ['tb_agent', 'tb_chat', 'tb_workcase', 'tb_chat_room']):
# 在VALUES前添加tenant_id字段
line = re.sub(
r'(\([^)]+)\)',
r'\1, tenant_id)',
line, count=1
)
# 在值列表中添加1作为默认租户ID
line = re.sub(
r'VALUES\s*\(([^)]+)\)',
r'VALUES (\1, 1)',
line
)
result.append(line)
return '\n'.join(result)
if __name__ == '__main__':
convert_postgresql_to_mysql('urbanlifeline_data.sql', 'pigx_data.sql')
print("数据转换完成!")
```
### 5.4 导入数据到 MySQL
```bash
# 导入转换后的数据
mysql -u root -p pigx_dify < pigx_data.sql
# 验证数据
mysql -u root -p pigx_dify -e "
SELECT COUNT(*) FROM tb_agent;
SELECT COUNT(*) FROM tb_chat;
SELECT COUNT(*) FROM tb_workcase;
"
```
## 6. 添加租户字段指南
### 6.1 需要添加 tenant_id 的表
所有业务表都需要添加 `tenant_id` 字段:
- tb_agent
- tb_chat
- tb_chat_message通过chat关联获取
- tb_knowledge
- tb_workcase
- tb_chat_room
- tb_video_meeting
### 6.2 添加租户字段的SQL模板
```sql
-- 为现有表添加租户字段(如果表已存在)
ALTER TABLE `tb_agent`
ADD COLUMN `tenant_id` BIGINT NOT NULL DEFAULT 1 COMMENT '租户ID' AFTER `category`,
ADD INDEX `idx_tenant_id` (`tenant_id`);
-- 更新现有数据的租户ID默认为1
UPDATE tb_agent SET tenant_id = 1 WHERE tenant_id IS NULL;
```
### 6.3 MyBatis-Plus 配置
```java
// 实体类添加租户字段
@TableField(fill = FieldFill.INSERT)
private Long tenantId;
// 自动填充处理器
@Component
public class MybatisPlusMetaObjectHandler implements MetaObjectHandler {
@Override
public void insertFill(MetaObject metaObject) {
PigxUser user = SecurityUtils.getUser();
this.strictInsertFill(metaObject, "tenantId", Long.class, user.getTenantId());
}
}
// 租户拦截器配置
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
interceptor.addInnerInterceptor(new TenantLineInnerInterceptor(new TenantLineHandler() {
@Override
public Expression getTenantId() {
return new LongValue(SecurityUtils.getUser().getTenantId());
}
@Override
public String getTenantIdColumn() {
return "tenant_id";
}
}));
return interceptor;
}
```
## 7. 验证和测试
### 7.1 数据完整性检查
```sql
-- 检查数据迁移完整性
SELECT
'tb_agent' AS table_name,
COUNT(*) AS record_count
FROM tb_agent
UNION ALL
SELECT 'tb_chat', COUNT(*) FROM tb_chat
UNION ALL
SELECT 'tb_workcase', COUNT(*) FROM tb_workcase;
-- 检查租户ID设置
SELECT
tenant_id,
COUNT(*) AS record_count
FROM tb_agent
GROUP BY tenant_id;
```
### 7.2 索引优化
```sql
-- 添加常用查询索引
ALTER TABLE tb_chat ADD INDEX idx_user_agent (user_id, agent_id);
ALTER TABLE tb_workcase ADD INDEX idx_create_time (create_time DESC);
ALTER TABLE tb_chat_message ADD INDEX idx_chat_time (chat_id, create_time DESC);
```
## 8. 回滚方案
如果迁移失败,可以使用以下方式回滚:
```bash
# 备份当前MySQL数据
mysqldump -u root -p pigx_dify > backup_before_migration.sql
# 如需回滚
mysql -u root -p -e "DROP DATABASE pigx_dify; CREATE DATABASE pigx_dify;"
mysql -u root -p pigx_dify < backup_before_migration.sql
```
## 9. 注意事项
1. **字符集**:确保 MySQL 使用 utf8mb4 字符集
2. **时区**:注意 PostgreSQL 的 TIMESTAMPTZ 转换为 MySQL DATETIME 可能有时区差异
3. **数组类型**PostgreSQL 的数组类型需要转换为 JSON
4. **事务**:大批量数据导入时注意事务大小
5. **权限**:确保 MySQL 用户有足够权限创建表和索引
6. **外键**:本脚本未创建外键约束,如需要可后续添加
## 10. 迁移清单
- [ ] 导出 PostgreSQL 数据
- [ ] 创建 MySQL 数据库和表结构
- [ ] 执行数据转换脚本
- [ ] 导入数据到 MySQL
- [ ] 添加租户字段和索引
- [ ] 验证数据完整性
- [ ] 测试查询性能
- [ ] 配置 MyBatis-Plus 租户拦截器
- [ ] 执行应用集成测试
- [ ] 准备回滚方案

View File

@@ -1,547 +0,0 @@
# Design Document
## Overview
本设计文档描述了将 urbanLifelineServ 和 urbanLifelineWeb 的**业务功能**迁移到 pigx-ai 平台的技术方案。
### 核心原则
- **只迁移业务代码**招标、工单、平台管理、AI、消息等业务功能
- **复用 pigx 基础设施**:人员、部门、权限、认证完全使用 pigx 原生实现
- **适配 pigx 规范**:使用 PigxUser、R<T> 响应格式、Feign 远程调用等
### 迁移范围
| 源模块 | 目标位置 | 说明 |
|-------|---------|------|
| bidding | pigx-app-server-biz | 招标业务 |
| workcase | pigx-app-server-biz | 工单业务 |
| platform | pigx-app-server-biz | 平台管理 |
| ai | pigx-dify新建模块 | AI对话/知识库/Dify集成 |
| message | pigx-app-server-biz | 消息通知 |
| file | 使用 pigx-common-oss | 文件服务 |
| crontab | pigx-visual/xxl-job | 定时任务 |
## Architecture
### 后端模块结构
```
pigx-app-server/
├── pigx-app-server-api/ # API接口定义
│ └── src/main/java/com/pig4cloud/pigx/app/api/
│ ├── entity/ # 业务实体
│ │ ├── bidding/ # 招标实体
│ │ ├── workcase/ # 工单实体
│ │ └── platform/ # 平台实体
│ ├── dto/ # 数据传输对象
│ ├── vo/ # 视图对象
│ └── feign/ # Feign接口
└── pigx-app-server-biz/ # 业务实现
└── src/main/java/com/pig4cloud/pigx/app/
├── controller/
│ ├── bidding/ # 招标控制器
│ ├── workcase/ # 工单控制器
│ ├── platform/ # 平台控制器
│ └── message/ # 消息控制器
├── service/
│ ├── bidding/
│ ├── workcase/
│ ├── platform/
│ └── message/
└── mapper/
├── bidding/
├── workcase/
├── platform/
└── message/
pigx-dify/ # 新建的AI模块
├── pigx-dify-api/ # API接口定义
│ └── src/main/java/com/pig4cloud/pigx/dify/api/
│ ├── entity/ # AI实体
│ │ ├── TbAgent.java # 智能体配置
│ │ ├── TbChat.java # 聊天会话
│ │ ├── TbChatMessage.java # 聊天消息
│ │ └── TbKnowledge.java # 知识库
│ ├── dto/ # 数据传输对象
│ └── feign/ # Feign接口
└── pigx-dify-biz/ # 业务实现
└── src/main/java/com/pig4cloud/pigx/dify/
├── controller/
│ ├── AgentController.java # 智能体管理
│ ├── ChatController.java # 对话管理
│ └── KnowledgeController.java # 知识库管理
├── service/
│ ├── AgentService.java
│ ├── ChatService.java
│ └── KnowledgeService.java
├── mapper/
│ ├── AgentMapper.java
│ ├── ChatMapper.java
│ └── KnowledgeMapper.java
└── client/
└── DifyApiClient.java # Dify API客户端
```
### 前端模块结构
```
pigx-ai-ui/src/
├── views/
│ ├── urban/ # 迁移的业务视图
│ │ ├── bidding/ # 招标页面
│ │ ├── workcase/ # 工单页面
│ │ └── platform/ # 平台管理页面
│ └── dify/ # AI功能页面新建
│ ├── agent/ # 智能体管理
│ ├── chat/ # 对话界面
│ └── knowledge/ # 知识库管理
├── components/
│ └── urban/ # 迁移的共享组件
└── api/
├── urban/ # 业务API定义
│ ├── bidding.ts
│ ├── workcase.ts
│ └── platform.ts
└── dify/ # AI API定义
├── agent.ts
├── chat.ts
└── knowledge.ts
```
## Components and Interfaces
### 1. 权限模型完全替换
**核心原则**: 不迁移任何用户、部门、角色、权限数据,完全使用 pigx 原生权限体系。
#### 权限注解适配
```java
// 源代码 (urbanLifelineServ 使用 @PreAuthorize)
@PreAuthorize("hasAuthority('workcase:ticket:create')")
@PostMapping
public ResultDomain<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return ResultDomain.success(workcaseService.save(workcase));
}
// 目标代码 (pigx-app-server 使用 @HasPermission)
@HasPermission("workcase_ticket_add")
@PostMapping
public R<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return R.ok(workcaseService.save(workcase));
}
```
#### 权限标识映射规则
| 源权限标识 | 目标权限标识 | 说明 |
|-----------|-------------|------|
| workcase:ticket:create | workcase_ticket_add | 工单创建 |
| workcase:ticket:update | workcase_ticket_edit | 工单编辑 |
| workcase:ticket:view | workcase_ticket_view | 工单查看 |
| workcase:ticket:delete | workcase_ticket_del | 工单删除 |
| bidding:project:create | bidding_project_add | 招标创建 |
| bidding:project:view | bidding_project_view | 招标查看 |
#### 用户信息获取适配
```java
// 源代码 (JWT 获取用户)
Long userId = JwtUtils.getUserId();
String username = JwtUtils.getUsername();
// 目标代码 (pigx SecurityUtils)
PigxUser user = SecurityUtils.getUser();
Long userId = user.getId();
String username = user.getUsername();
Long tenantId = user.getTenantId(); // 租户ID
Long deptId = user.getDeptId(); // 部门ID
```
#### 用户服务调用适配
```java
// 源代码 (直接调用 UserService)
@Autowired
private UserService userService;
User user = userService.getById(userId);
// 目标代码 (通过 Feign 调用 pigx-upms)
@Autowired
private RemoteUserService remoteUserService;
R<SysUser> result = remoteUserService.selectById(userId);
SysUser user = result.getData();
```
### 2. 菜单和权限配置
**不迁移源系统的菜单和权限数据**,在 pigx 中重新配置:
#### 菜单配置 (sys_menu 表)
```sql
-- 在 pigx 的 sys_menu 表中添加业务功能菜单
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(1000, '工单管理', 'workcase_menu', '/workcase', 0, 'workcase', 1, '0', 1),
(1001, '工单列表', 'workcase_ticket_view', '/workcase/list', 1000, '', 1, '1', 1),
(1002, '创建工单', 'workcase_ticket_add', '', 1000, '', 2, '2', 1),
(1003, '编辑工单', 'workcase_ticket_edit', '', 1000, '', 3, '2', 1),
(1004, '删除工单', 'workcase_ticket_del', '', 1000, '', 4, '2', 1),
(2000, '招标管理', 'bidding_menu', '/bidding', 0, 'bidding', 2, '0', 1),
(2001, '招标项目', 'bidding_project_view', '/bidding/project', 2000, '', 1, '1', 1),
(2002, '创建项目', 'bidding_project_add', '', 2000, '', 2, '2', 1),
(2003, '编辑项目', 'bidding_project_edit', '', 2000, '', 3, '2', 1);
```
#### 角色权限分配
使用 pigx 现有的角色管理功能,为角色分配新的业务权限:
```sql
-- 为管理员角色分配所有业务权限
INSERT INTO sys_role_menu (role_id, menu_id)
SELECT 1, menu_id FROM sys_menu WHERE permission LIKE 'workcase_%' OR permission LIKE 'bidding_%';
```
#### 数据权限适配
```java
// 源代码 (可能没有租户隔离)
@PostMapping("/list")
public ResultDomain<TbWorkcaseDTO> getWorkcaseList(@RequestBody TbWorkcaseDTO filter) {
return ResultDomain.success(workcaseService.list(filter));
}
// 目标代码 (自动添加租户和部门过滤)
@PostMapping("/list")
@HasPermission("workcase_ticket_view")
public R<List<TbWorkcaseDTO>> getWorkcaseList(@RequestBody TbWorkcaseDTO filter) {
// pigx 会自动根据用户的租户ID和数据权限过滤数据
return R.ok(workcaseService.list(filter));
}
```
```java
// 源代码 (urbanLifelineServ 使用 ResultDomain)
@GetMapping("/list")
public ResultDomain<Workcase> list() {
// ResultDomain 包含 dataList 字段
return ResultDomain.success(workcaseService.list());
}
// 目标代码 (使用 pigx R<T>)
@GetMapping("/list")
public R<List<Workcase>> list() {
return R.ok(workcaseService.list());
}
// 或者使用分页 (pigx IPage)
@GetMapping("/page")
public R<IPage<Workcase>> page(Page page) {
return R.ok(workcaseService.page(page));
}
```
**响应格式映射**:
| 源格式 (ResultDomain) | 目标格式 (R<T>) |
|---------------------|----------------|
| ResultDomain.success(data) | R.ok(data) |
| ResultDomain.fail(msg) | R.failed(msg) |
| dataList 字段 | data 字段 (直接返回List) |
| code/message | code/msg |
### 3. 响应格式适配
```java
// 源代码
Long userId = JwtUtils.getUserId();
// 目标代码 (使用 pigx SecurityUtils)
PigxUser user = SecurityUtils.getUser();
Long userId = user.getId();
Long tenantId = user.getTenantId();
Long deptId = user.getDeptId();
```
### 4. 文件上传适配
```java
// 源代码 (MinIO直接调用)
minioClient.putObject(bucket, objectName, inputStream);
// 目标代码 (使用 pigx OSS)
@Autowired
private OssTemplate ossTemplate;
ossTemplate.putObject(bucket, objectName, inputStream);
```
### 5. 前端 API 调用适配
```typescript
// 源代码 (urbanLifelineWeb)
import { request } from '@shared/utils/request'
export const getWorkcaseList = () => request.get('/workcase/list')
// 目标代码 (pigx-ai-ui)
import request from '/@/utils/request'
export const getWorkcaseList = () => request.get('/app/workcase/list')
```
### 6. pigx-dify 模块设计(新增)
#### 6.1 模块定位
pigx-dify 是独立的 AI 服务模块,保留原 urbanLifeline 的 AI 功能和 Dify 平台集成,不与 pigx-knowledge 混合使用。
#### 6.2 核心组件
**DifyApiClient 保留原有功能:**
```java
@Component
public class DifyApiClient {
// 知识库管理
public DatasetCreateResponse createDataset(DatasetCreateRequest request);
public DatasetListResponse listDatasets(int page, int limit);
// 对话功能(保留流式和阻塞两种模式)
public void streamChat(ChatRequest request, String apiKey, StreamCallback callback);
public ChatResponse blockingChat(ChatRequest request, String apiKey);
// 工作流调用
public WorkflowRunResponse runWorkflowBlocking(WorkflowRunRequest request, String apiKey);
}
```
**数据模型保持不变:**
```java
// 智能体配置
@TableName("tb_agent")
public class TbAgent {
private String agentId;
private String name;
private String description;
private String difyApiKey; // 保留 Dify API Key
private String difyAgentId; // 保留 Dify Agent ID
private Long tenantId; // 新增租户ID
}
// 聊天会话
@TableName("tb_chat")
public class TbChat {
private String chatId;
private String agentId;
private String userId; // 关联 pigx sys_user
private String conversationId; // Dify conversation ID
private Long tenantId; // 新增租户ID
}
// 聊天消息(保持原有结构)
@TableName("tb_chat_message")
public class TbChatMessage {
private String messageId;
private String chatId;
private String content;
private String role; // user/ai/recipient
private String difyMessageId; // 保留 Dify 消息ID
}
```
#### 6.3 权限适配
```java
@RestController
@RequestMapping("/dify")
public class ChatController {
@Autowired
private DifyApiClient difyClient;
// 权限注解适配
@HasPermission("dify_chat_create") // 原: @PreAuthorize("hasAuthority('ai:chat:create')")
@PostMapping("/chat/stream")
public SseEmitter streamChat(@RequestBody ChatRequest request) {
PigxUser user = SecurityUtils.getUser();
// 保留原有的流式响应逻辑
return chatService.streamChat(request, user);
}
}
```
#### 6.4 配置管理
```yaml
# application.yml
dify:
api:
base-url: ${DIFY_API_BASE_URL:https://api.dify.ai}
default-api-key: ${DIFY_DEFAULT_API_KEY}
enabled: true
```
## Data Models
### 数据库迁移策略
1. **表结构转换**: PostgreSQL DDL → MySQL DDL
2. **添加租户字段**: 所有业务表添加 `tenant_id` 字段
3. **用户关联**: `user_id` 关联到 pigx 的 `sys_user.user_id`
### 核心业务表
**工单模块 (workcase)**:
```sql
CREATE TABLE tb_workcase (
id BIGINT NOT NULL AUTO_INCREMENT COMMENT '主键',
title VARCHAR(255) NOT NULL COMMENT '工单标题',
content TEXT COMMENT '工单内容',
status TINYINT DEFAULT 0 COMMENT '状态',
creator_id BIGINT COMMENT '创建人ID(关联sys_user)',
assignee_id BIGINT COMMENT '处理人ID(关联sys_user)',
tenant_id BIGINT DEFAULT 1 COMMENT '租户ID',
create_time DATETIME DEFAULT CURRENT_TIMESTAMP,
update_time DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
del_flag CHAR(1) DEFAULT '0',
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='工单表';
```
**招标模块 (bidding)**:
```sql
CREATE TABLE tb_bidding_project (
id BIGINT NOT NULL AUTO_INCREMENT COMMENT '主键',
project_name VARCHAR(255) NOT NULL COMMENT '项目名称',
project_code VARCHAR(64) COMMENT '项目编号',
status TINYINT DEFAULT 0 COMMENT '状态',
creator_id BIGINT COMMENT '创建人ID',
tenant_id BIGINT DEFAULT 1 COMMENT '租户ID',
create_time DATETIME DEFAULT CURRENT_TIMESTAMP,
update_time DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
del_flag CHAR(1) DEFAULT '0',
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='招标项目表';
```
### 类型映射
| PostgreSQL | MySQL | 说明 |
|-----------|-------|------|
| SERIAL | INT AUTO_INCREMENT | 自增 |
| BIGSERIAL | BIGINT AUTO_INCREMENT | 大整数自增 |
| TEXT | TEXT | 文本 |
| JSONB | JSON | JSON数据 |
| BOOLEAN | TINYINT(1) | 布尔 |
| TIMESTAMP | DATETIME | 时间戳 |
## Correctness Properties
*A property is a characteristic or behavior that should hold true across all valid executions of a system-essentially, a formal statement about what the system should do.*
### Property 1: 后端代码迁移完整性
*For any* 源项目中的业务模块bidding, workcase, platform, message迁移后的 pigx-app-server 应该包含对应的 Controller、Service、Mapper 层代码,且代码可以通过编译。
**Validates: Requirements 1.1, 2.1, 3.1, 5.1**
### Property 2: 权限注解适配正确性
*For any* 迁移后的 Controller 方法,应该使用 pigx 的 @HasPermission 注解而非 @PreAuthorize,且权限标识符合 pigx 命名规范module_action 格式)。
**Validates: Requirements 1.2, 2.2, 11.1, 11.2**
### Property 3: 数据库迁移正确性
*For any* 源项目的 PostgreSQL 业务表,转换后的 MySQL DDL 应该:
- 语法正确,可在 MySQL 中执行
- 包含 tenant_id 租户字段
- 用户关联字段正确引用 sys_user.user_id
**Validates: Requirements 1.3, 2.4, 3.3, 4.4, 5.4, 8.1, 8.2, 8.3, 8.4**
### Property 4: 前端页面迁移完整性
*For any* 源项目前端包中的页面组件,迁移后应该存在于 pigx-ai-ui 的对应目录下,且组件可以被正确导入。
**Validates: Requirements 1.4, 2.5, 3.4, 4.5**
### Property 5: API调用适配正确性
*For any* 迁移后的前端 API 调用代码,应该:
- 使用 pigx 的 request 工具(从 /@/utils/request 导入)
- API 路径符合 pigx 网关路由规则(/app/* 前缀)
- 响应处理适配 R<T> 格式
**Validates: Requirements 1.5, 9.4**
### Property 6: 数据迁移完整性
*For any* 源数据库中的业务数据记录,迁移到目标数据库后,记录数量应该相等,关键字段值应该保持一致。
**Validates: Requirements 8.5**
### Property 8: 用户服务调用正确性
*For any* 迁移后的业务代码中涉及用户信息获取的地方,应该使用 SecurityUtils.getUser() 获取当前用户,或通过 RemoteUserService 进行 Feign 调用,而非原有的 UserService。
**Validates: Requirements 11.4, 11.5**
## Error Handling
### 迁移错误处理
1. **代码编译失败**: 记录编译错误,提供修复建议
2. **类型转换失败**: 标记不兼容类型,提供替代方案
3. **依赖缺失**: 自动添加缺失的 pigx 依赖
4. **数据迁移失败**: 支持断点续传,记录失败记录
### 回滚策略
- 代码迁移:保留源文件,支持回退
- 数据库迁移:生成回滚脚本
- 配置变更:版本化管理
## Testing Strategy
### 单元测试
- Service 层业务逻辑测试
- Mapper 层数据访问测试
- 工具类函数测试
### 集成测试
- API 端到端测试
- 用户认证流程测试
- 文件上传下载测试
### 属性测试
使用 Java 的 jqwik 进行属性测试:
```java
@Property
void tenantIsolation(@ForAll @From("businessEntity") BusinessEntity entity) {
// 验证查询结果只包含当前租户数据
List<BusinessEntity> results = service.list();
assertThat(results).allMatch(e -> e.getTenantId().equals(currentTenantId));
}
@Property
void ddlConversion(@ForAll @From("postgresTable") String pgDdl) {
String mysqlDdl = converter.convert(pgDdl);
assertThat(mysqlDdl).contains("tenant_id");
assertThat(mysqlDdl).canExecuteOnMysql();
}
```
### 测试配置
- 属性测试最少运行 100 次迭代
- 使用 Testcontainers 进行数据库测试
- 前端测试使用 Vitest
### Property 9: 租户隔离正确性
*For any* 迁移后的业务表查询,应该自动包含 tenant_id 条件,确保多租户数据隔离。
**Validates: Requirements 8.4, 11.6**

View File

@@ -1,288 +0,0 @@
# Workcase 前端迁移计划
## 一、依赖分析
### 1.1 对 shared 模块的依赖
#### API 依赖
- `shared/api` - 基础 axios 封装和 TokenManager
- `shared/api/file` - 文件上传下载 API (fileAPI)
- `shared/api/ai` - AI 相关 API (aiChatAPI, agentAPI, aiKnowledgeAPI)
#### 组件依赖
- `shared/components` - FileUpload, FileHistory, IframeView
- `shared/components/ai/knowledge` - DocumentSegment (知识库文档分段组件)
- `shared/layouts` - BlankLayout, SubSidebarLayout
#### 类型依赖
- `shared/types` - BaseDTO, BaseVO, ResultDomain, PageRequest, PageParam
- `shared/types` - TbSysFileDTO, MenuItem, TbSysViewDTO
- `shared/types` - AI 相关类型 (TbChat, TbKnowledge, TbKnowledgeFileLog, DifyFileInfo)
### 1.2 内部模块结构
#### Views 结构
```
views/
├── admin/ # 管理后台页面
│ ├── agent/ # AI 智能体管理 (→ dify 模块)
│ ├── customerChat/ # 客服对话管理
│ ├── knowledge/ # 知识库管理 (→ dify 模块)
│ ├── log/ # 日志管理
│ │ ├── knowledgeLog/ # 知识库日志 (→ dify 模块)
│ │ ├── systemLog/ # 系统日志
│ │ └── workcaseLog/ # 工单日志
│ ├── overview/ # 概览页面
│ └── workcase/ # 工单管理
└── public/ # 公共页面
├── AIChat/ # AI 对话 (→ dify 模块)
├── ChatRoom/ # 聊天室
├── JitsiMeeting/ # 视频会议
├── Login/ # 登录页 (pigx 已有)
└── workcase/ # 工单详情
```
#### Components 结构
```
components/
└── workcase/
└── WorkcaseAssign.vue # 工单指派组件
```
#### API 结构
```
api/
└── workcase/
├── workcase.ts # 工单 API
└── workcaseChat.ts # 聊天室 API
```
#### Types 结构
```
types/
└── workcase/
├── workcase.ts # 工单类型
├── chatRoom.ts # 聊天室类型
├── customer.ts # 客服类型
├── conversation.ts # 对话类型
└── wordCloud.ts # 词云类型
```
## 二、迁移策略
### 2.1 迁移顺序(按优先级)
#### 第一阶段:基础设施 (已完成)
- [x] API 定义迁移
- [x] 基础工单管理页面
#### 第二阶段:工单核心功能
1. **工单组件**
- WorkcaseAssign.vue (工单指派组件)
- WorkcaseDetail (工单详情组件)
2. **工单管理页面**
- admin/workcase/WorkcaseView.vue (工单列表)
- admin/overview/OverviewView.vue (概览页面)
- admin/log/workcaseLog/ (工单日志)
#### 第三阶段:聊天室功能
1. **聊天室核心**
- public/ChatRoom/chatRoom/ChatRoom.vue (聊天室主组件)
- public/ChatRoom/ChatMessage/ (消息组件)
- public/ChatRoom/ChatRoomView.vue (聊天室视图)
2. **客服管理**
- admin/customerChat/CustomerChatView.vue (客服对话管理)
3. **视频会议**
- public/JitsiMeeting/JitsiMeetingView.vue (视频会议)
- public/ChatRoom/MeetingCard/ (会议卡片)
- public/ChatRoom/MeetingCreate/ (创建会议)
#### 第四阶段AI 功能 (最后迁移,归入 dify 模块)
1. **AI 对话**
- public/AIChat/AIChatView.vue
- public/AIChat/components/
2. **智能体管理**
- admin/agent/AgentView.vue
3. **知识库管理**
- admin/knowledge/KnowLedgeView.vue
- admin/log/knowledgeLog/KnowledgeLogView.vue
### 2.2 共享依赖处理
#### pigx 已有的功能(直接使用)
- 登录认证 (Login)
- 用户管理
- 权限管理
- 文件上传下载 (Upload 组件)
- 基础布局 (Layout)
#### 需要适配的 shared 组件
1. **FileUpload** → 使用 pigx 的 `Upload/index.vue`
2. **FileHistory** → 需要迁移或使用 pigx 的文件管理
3. **IframeView** → 简单组件,可直接迁移
4. **DocumentSegment** → AI 知识库专用,归入 dify 模块
#### 需要适配的 API
1. **fileAPI** → 适配 pigx 的文件服务 API
2. **aiChatAPI, agentAPI, aiKnowledgeAPI** → 归入 dify 模块
### 2.3 目录结构映射
#### 源目录 → 目标目录
```
urbanLifelineWeb/packages/workcase/
├── src/api/workcase/ → pigx-ai-ui/src/api/workcase/
├── src/components/workcase/ → pigx-ai-ui/src/components/workcase/
├── src/views/admin/workcase/ → pigx-ai-ui/src/views/workcase/admin/
├── src/views/admin/customerChat/ → pigx-ai-ui/src/views/workcase/customerChat/
├── src/views/admin/overview/ → pigx-ai-ui/src/views/workcase/overview/
├── src/views/admin/log/ → pigx-ai-ui/src/views/workcase/log/
├── src/views/public/ChatRoom/ → pigx-ai-ui/src/views/workcase/chatRoom/
├── src/views/public/JitsiMeeting/ → pigx-ai-ui/src/views/workcase/meeting/
├── src/views/public/workcase/ → pigx-ai-ui/src/views/workcase/detail/
└── src/types/workcase/ → pigx-ai-ui/src/types/workcase/
# AI 相关 (归入 dify 模块)
├── src/views/admin/agent/ → pigx-ai-ui/src/views/dify/agent/
├── src/views/admin/knowledge/ → pigx-ai-ui/src/views/dify/knowledge/
├── src/views/public/AIChat/ → pigx-ai-ui/src/views/dify/chat/
└── src/views/admin/log/knowledgeLog/ → pigx-ai-ui/src/views/dify/log/
```
## 三、技术适配要点
### 3.1 API 调用适配
```typescript
// 源代码
import { api } from 'shared/api'
const res = await api.post('/urban-lifeline/workcase', data)
// 目标代码
import request from '@/utils/request'
const res = await request({
url: '/workcase/workcase',
method: 'post',
data
})
```
### 3.2 响应格式适配
```typescript
// 源代码 (ResultDomain)
interface ResultDomain<T> {
code: number
message: string
success: boolean
data?: T
dataList?: T[]
pageDomain?: PageDomain<T>
}
// 目标代码 (pigx R<T>)
// pigx 使用 code === 0 表示成功
if (res.code === 0) {
// res.data 包含数据
}
```
### 3.3 文件上传适配
```typescript
// 源代码
import { fileAPI } from 'shared/api/file'
await fileAPI.uploadFile({ file, module, optsn })
// 目标代码
import { uploadFile } from '@/api/admin/file'
await uploadFile(formData)
```
### 3.4 WebSocket 适配
```typescript
// 源代码 (SockJS + STOMP)
const wsUrl = `${protocol}//${host}/${API_BASE_URL}/urban-lifeline/workcase/ws/chat-sockjs`
stompClient = new Client({
webSocketFactory: () => new SockJS(wsUrl)
})
// 目标代码 (需要适配 pigx 的 WebSocket 配置)
// pigx 可能使用不同的 WebSocket 实现
```
### 3.5 组件库适配
- Element Plus 版本可能不同,需要检查 API 变化
- 图标库:源代码使用 lucide-vue-nextpigx 使用 Element Plus Icons
- 需要统一图标使用方式
## 四、迁移检查清单
### 4.1 功能完整性
- [ ] 工单 CRUD
- [ ] 工单指派/转派
- [ ] 工单流程记录
- [ ] 聊天室功能
- [ ] 实时消息推送 (WebSocket)
- [ ] 视频会议集成
- [ ] 客服管理
- [ ] 文件上传下载
- [ ] 词云统计
### 4.2 权限控制
- [ ] 页面访问权限
- [ ] 按钮操作权限
- [ ] 数据权限(租户隔离)
### 4.3 用户体验
- [ ] 响应式布局
- [ ] 加载状态
- [ ] 错误提示
- [ ] 空状态展示
- [ ] 分页功能
### 4.4 性能优化
- [ ] 列表虚拟滚动(如需要)
- [ ] 图片懒加载
- [ ] 防抖节流
- [ ] 请求缓存
## 五、注意事项
### 5.1 不要迁移的内容
- Login 页面pigx 已有完整的登录系统)
- 用户管理相关页面(使用 pigx 原生功能)
- 权限管理相关页面(使用 pigx 原生功能)
### 5.2 需要重点测试的功能
- WebSocket 实时通信
- 文件上传下载
- 视频会议集成
- 多租户数据隔离
- 权限控制
### 5.3 AI 功能迁移注意
- AI 对话、智能体、知识库功能最后迁移
- 这些功能归入 dify 模块,不放在 workcase 模块
- 需要与 Dify API 集成测试
## 六、当前进度
### 已完成
- [x] API 定义 (workcase.ts, chat.ts)
- [x] 类型定义 (workcase.ts, chatRoom.ts, customer.ts, conversation.ts, wordCloud.ts)
- [x] 基础工单列表页面 (index.vue)
- [x] 工单指派组件 (WorkcaseAssign.vue)
- [x] 工单详情组件 (WorkcaseDetail.vue)
- [x] 聊天室消息组件 (ChatMessage.vue)
### 进行中
- [ ] 管理后台页面 (overview, customerChat, log)
### 待开始
- [ ] 视频会议功能
- [ ] 客服管理
- [ ] 日志管理
- [ ] AI 功能(最后)

View File

@@ -1,391 +0,0 @@
# 权限注解转换指南
## 概述
本指南详细说明了如何将 urbanLifelineServ 的权限注解迁移到 pigx 平台的权限体系。
## 核心变更
### 1. 权限注解格式
| 特性 | urbanLifelineServ | pigx |
|------|------------------|------|
| 注解类 | @PreAuthorize | @PreAuthorize |
| 权限判断方法 | hasAuthority() | @pms.hasPermission() |
| 权限标识格式 | module:resource:action | module_resource_action |
| 分隔符 | 冒号 `:` | 下划线 `_` |
### 2. 用户信息获取
| 功能 | urbanLifelineServ | pigx |
|------|------------------|------|
| 获取用户ID | JwtUtils.getUserId() | SecurityUtils.getUser().getId() |
| 获取用户名 | JwtUtils.getUsername() | SecurityUtils.getUser().getUsername() |
| 获取租户ID | 不支持 | SecurityUtils.getUser().getTenantId() |
| 获取部门ID | 不支持 | SecurityUtils.getUser().getDeptId() |
### 3. 响应格式
| 特性 | urbanLifelineServ | pigx |
|------|------------------|------|
| 响应类 | ResultDomain<T> | R<T> |
| 成功响应 | ResultDomain.success(data) | R.ok(data) |
| 失败响应 | ResultDomain.fail(msg) | R.failed(msg) |
| 列表字段 | dataList | data |
## 转换步骤
### 步骤1权限注解转换
#### 1.1 基本转换
```java
// 转换前
@PreAuthorize("hasAuthority('workcase:ticket:create')")
public ResultDomain<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return workcaseService.createWorkcase(workcase);
}
// 转换后
@PreAuthorize("@pms.hasPermission('workcase_ticket_add')")
public R<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return R.ok(workcaseService.createWorkcase(workcase));
}
```
#### 1.2 多权限判断
```java
// 转换前
@PreAuthorize("hasAuthority('workcase:ticket:view') or hasAuthority('workcase:ticket:admin')")
public ResultDomain<List<TbWorkcaseDTO>> listWorkcase() {
return ResultDomain.success(workcaseService.list());
}
// 转换后
@PreAuthorize("@pms.hasPermission('workcase_ticket_view') or @pms.hasPermission('workcase_ticket_admin')")
public R<List<TbWorkcaseDTO>> listWorkcase() {
return R.ok(workcaseService.list());
}
```
### 步骤2用户信息获取转换
#### 2.1 Service层用户信息获取
```java
// 转换前
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
public TbWorkcaseDTO createWorkcase(TbWorkcaseDTO workcase) {
Long userId = JwtUtils.getUserId();
String username = JwtUtils.getUsername();
workcase.setCreateBy(userId);
workcase.setCreateByName(username);
workcase.setCreateTime(new Date());
return workcaseMapper.insert(workcase);
}
}
// 转换后
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
public TbWorkcaseDTO createWorkcase(TbWorkcaseDTO workcase) {
PigxUser user = SecurityUtils.getUser();
workcase.setCreateBy(user.getId());
workcase.setCreateByName(user.getUsername());
workcase.setTenantId(user.getTenantId()); // 新增:租户隔离
workcase.setDeptId(user.getDeptId()); // 新增:部门信息
workcase.setCreateTime(LocalDateTime.now());
return workcaseMapper.insert(workcase);
}
}
```
#### 2.2 远程用户服务调用
```java
// 转换前
@Service
public class WorkcaseServiceImpl {
@Autowired
private UserService userService;
public void assignWorkcase(String workcaseId, Long assigneeId) {
User assignee = userService.getById(assigneeId);
if (assignee == null) {
throw new BusinessException("用户不存在");
}
// 处理逻辑...
}
}
// 转换后
@Service
public class WorkcaseServiceImpl {
@Autowired
private RemoteUserService remoteUserService;
public void assignWorkcase(String workcaseId, Long assigneeId) {
R<SysUser> result = remoteUserService.selectById(assigneeId);
if (!result.isSuccess() || result.getData() == null) {
throw new BusinessException("用户不存在");
}
SysUser assignee = result.getData();
// 处理逻辑...
}
}
```
### 步骤3响应格式转换
#### 3.1 Controller响应转换
```java
// 转换前
@RestController
@RequestMapping("/api/workcase")
public class WorkcaseController {
// 单个对象返回
@GetMapping("/{id}")
public ResultDomain<TbWorkcaseDTO> getById(@PathVariable String id) {
TbWorkcaseDTO workcase = workcaseService.getById(id);
if (workcase == null) {
return ResultDomain.fail("工单不存在");
}
return ResultDomain.success(workcase);
}
// 列表返回
@GetMapping("/list")
public ResultDomain<List<TbWorkcaseDTO>> list() {
List<TbWorkcaseDTO> list = workcaseService.list();
ResultDomain<List<TbWorkcaseDTO>> result = ResultDomain.success();
result.setDataList(list); // 注意使用dataList字段
return result;
}
}
// 转换后
@RestController
@RequestMapping("/workcase")
public class WorkcaseController {
// 单个对象返回
@GetMapping("/{id}")
public R<TbWorkcaseDTO> getById(@PathVariable String id) {
TbWorkcaseDTO workcase = workcaseService.getById(id);
if (workcase == null) {
return R.failed("工单不存在");
}
return R.ok(workcase);
}
// 列表返回
@GetMapping("/list")
public R<List<TbWorkcaseDTO>> list() {
List<TbWorkcaseDTO> list = workcaseService.list();
return R.ok(list); // 直接返回列表不使用dataList
}
// 分页返回
@GetMapping("/page")
public R<IPage<TbWorkcaseDTO>> page(Page page) {
return R.ok(workcaseService.page(page));
}
}
```
### 步骤4数据权限适配
#### 4.1 添加租户隔离
```java
// 转换前 - 无租户隔离
@Service
public class WorkcaseServiceImpl {
public List<TbWorkcaseDTO> listMyWorkcase() {
Long userId = JwtUtils.getUserId();
return workcaseMapper.selectByUserId(userId);
}
}
// 转换后 - 支持租户隔离
@Service
public class WorkcaseServiceImpl {
public List<TbWorkcaseDTO> listMyWorkcase() {
PigxUser user = SecurityUtils.getUser();
QueryWrapper<TbWorkcaseDTO> wrapper = new QueryWrapper<>();
wrapper.eq("tenant_id", user.getTenantId()) // 租户隔离
.eq("create_by", user.getId());
return workcaseMapper.selectList(wrapper);
}
}
```
## 批量转换工具
### 使用IDE批量替换
#### IntelliJ IDEA 正则替换
1. **查找模式** (启用正则表达式):
```regex
@PreAuthorize\("hasAuthority\('([^:]+):([^:]+):([^']+)'\)"\)
```
2. **替换为**:
```regex
@PreAuthorize("@pms.hasPermission('$1_$2_$3')")
```
3. **动作映射** (执行第二次替换):
- 查找: `_create'` 替换为: `_add'`
- 查找: `_update'` 替换为: `_edit'`
- 查找: `_delete'` 替换为: `_del'`
### 命令行批量转换脚本
```bash
#!/bin/bash
# convert-permissions.sh
# 查找所有Java文件并转换权限注解
find . -name "*.java" -type f -exec sed -i.bak \
-e "s/@PreAuthorize(\"hasAuthority('\([^:]*\):\([^:]*\):\([^']*\)')\")/@PreAuthorize(\"@pms.hasPermission('\1_\2_\3')\")/g" \
-e "s/_create')/_add')/g" \
-e "s/_update')/_edit')/g" \
-e "s/_delete')/_del')/g" {} \;
# 转换JwtUtils为SecurityUtils
find . -name "*.java" -type f -exec sed -i.bak \
-e "s/JwtUtils.getUserId()/SecurityUtils.getUser().getId()/g" \
-e "s/JwtUtils.getUsername()/SecurityUtils.getUser().getUsername()/g" {} \;
# 转换ResultDomain为R
find . -name "*.java" -type f -exec sed -i.bak \
-e "s/ResultDomain.success(/R.ok(/g" \
-e "s/ResultDomain.fail(/R.failed(/g" \
-e "s/ResultDomain</R</g" {} \;
```
## 需要手工处理的情况
### 1. 复杂的权限逻辑
```java
// 需要手工审查的复杂权限
@PreAuthorize("hasAuthority('workcase:ticket:view') and #workcase.createBy == authentication.principal.userId")
public ResultDomain<TbWorkcaseDTO> getMyWorkcase(@PathVariable String id, TbWorkcaseDTO workcase) {
// 这种情况需要根据pigx的数据权限机制重新设计
}
```
### 2. 自定义权限判断
```java
// 转换前
if (SecurityContextHolder.getContext().getAuthentication().getAuthorities()
.contains(new SimpleGrantedAuthority("workcase:ticket:admin"))) {
// 管理员逻辑
}
// 转换后
if (SecurityUtils.getUser().getAuthorities()
.contains("workcase_ticket_admin")) {
// 管理员逻辑
}
```
### 3. 异步任务中的用户信息
```java
// 转换前
@Async
public void processAsync() {
Long userId = JwtUtils.getUserId(); // 异步线程中可能获取不到
}
// 转换后
@Async
public void processAsync() {
// 需要在调用异步方法前获取用户信息并传递
PigxUser user = SecurityUtils.getUser();
processAsyncWithUser(user);
}
```
## 测试验证清单
### 权限测试
- [ ] 所有 @PreAuthorize 注解已转换为 @pms.hasPermission 格式
- [ ] 权限标识符已从冒号改为下划线
- [ ] 动作已正确映射 (create→add, update→edit, delete→del)
- [ ] 多权限判断逻辑正确
### 用户信息测试
- [ ] SecurityUtils.getUser() 能正确获取用户信息
- [ ] 租户ID正确设置到业务数据
- [ ] RemoteUserService 调用正常
### 响应格式测试
- [ ] 所有接口返回 R<T> 格式
- [ ] 前端能正确解析新的响应格式
- [ ] 错误信息正确传递
### 数据权限测试
- [ ] 租户数据隔离正常
- [ ] 部门数据权限正常
- [ ] 个人数据权限正常
## 常见问题
### Q1: @pms.hasPermission 中的 @pms 是什么?
A: `@pms` 是 pigx 权限管理系统的 SpEL 表达式前缀,用于调用权限判断方法。这是 pigx 框架的固定写法,必须保留。
### Q2: 为什么要将 create 改为 add
A: 这是 pigx 平台的命名规范,保持统一的动作命名有助于权限管理的标准化。常见映射:
- create → add (新增)
- update → edit (编辑)
- delete → del (删除)
- view → view (查看)
### Q3: 如何处理没有对应 pigx 用户的情况?
A: 所有业务用户必须在 pigx 的 sys_user 表中存在。如果是数据迁移,需要先创建对应的 pigx 用户,或建立用户映射关系。
### Q4: 租户ID是必须的吗
A: 是的。pigx 是多租户系统,所有业务表都需要 tenant_id 字段。即使是单租户使用也需要设置默认租户ID通常为1
### Q5: 如何调试权限问题?
A: 可以通过以下方式调试:
1. 查看 pigx 日志中的权限判断记录
2. 使用 SecurityUtils.getUser().getAuthorities() 查看当前用户权限
3. 检查 sys_menu 表中的权限配置
4. 验证 sys_role_menu 表中的角色权限关联
## 相关文档
- [权限标识映射表](./permission-mapping.md)
- [数据库迁移指南](./database-migration.md)
- [前端适配指南](./frontend-migration.md)

View File

@@ -1,490 +0,0 @@
# 权限注解转换指南
## 目标
将 urbanLifeline 的权限体系完全迁移到 pigx 平台的权限模型。
## 核心概念对比
| 特性 | urbanLifeline | pigx |
|------|---------------|------|
| 权限注解 | `@PreAuthorize("hasAuthority()")` | `@PreAuthorize("@pms.hasPermission()")` |
| 权限格式 | `module:resource:action` | `module_resource_action` |
| 用户获取 | `JwtUtils.getUserId()` | `SecurityUtils.getUser()` |
| 用户服务 | `UserService` (本地) | `RemoteUserService` (Feign) |
| 响应格式 | `ResultDomain<T>` | `R<T>` |
| 租户支持 | 无 | 有 (tenant_id) |
## 转换步骤详解
### 步骤 1权限注解转换
#### 1.1 基本转换规则
```java
// ❌ 旧代码 (urbanLifeline)
@PreAuthorize("hasAuthority('workcase:ticket:create')")
public ResultDomain<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO dto) {
// ...
}
// ✅ 新代码 (pigx)
@PreAuthorize("@pms.hasPermission('workcase_ticket_add')")
public R<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO dto) {
// ...
}
```
#### 1.2 多权限组合
```java
// ❌ 旧代码 - OR 条件
@PreAuthorize("hasAuthority('workcase:ticket:update') or hasAuthority('workcase:ticket:admin')")
// ✅ 新代码 - OR 条件
@PreAuthorize("@pms.hasPermission('workcase_ticket_edit') or @pms.hasPermission('workcase_ticket_admin')")
// ❌ 旧代码 - AND 条件
@PreAuthorize("hasAuthority('workcase:ticket:view') and hasAuthority('workcase:export:data')")
// ✅ 新代码 - AND 条件
@PreAuthorize("@pms.hasPermission('workcase_ticket_view') and @pms.hasPermission('workcase_export_data')")
```
#### 1.3 动态权限检查
```java
// ❌ 旧代码
@Service
public class WorkcaseService {
public boolean canEdit(Long workcaseId) {
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
return auth.getAuthorities().stream()
.anyMatch(a -> a.getAuthority().equals("workcase:ticket:update"));
}
}
// ✅ 新代码
@Service
public class WorkcaseService {
@Autowired
private PermissionService permissionService;
public boolean canEdit(Long workcaseId) {
return permissionService.hasPermission("workcase_ticket_edit");
}
}
```
### 步骤 2用户信息获取转换
#### 2.1 获取当前用户
```java
// ❌ 旧代码
Long userId = JwtUtils.getUserId();
String username = JwtUtils.getUsername();
String role = JwtUtils.getRole();
// ✅ 新代码
PigxUser user = SecurityUtils.getUser();
Long userId = user.getId();
String username = user.getUsername();
Long tenantId = user.getTenantId(); // 新增租户ID
Long deptId = user.getDeptId(); // 新增部门ID
List<String> roles = user.getRoles(); // 角色列表
```
#### 2.2 Service 层用户信息处理
```java
// ❌ 旧代码
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
@Autowired
private UserService userService;
public void assignWorkcase(Long workcaseId, Long assigneeId) {
User assignee = userService.getById(assigneeId);
// 处理逻辑...
}
}
// ✅ 新代码
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
@Autowired
private RemoteUserService remoteUserService;
public void assignWorkcase(Long workcaseId, Long assigneeId) {
// 使用 Feign 远程调用
R<SysUser> result = remoteUserService.selectById(assigneeId);
if (result.isSuccess() && result.getData() != null) {
SysUser assignee = result.getData();
// 处理逻辑...
} else {
throw new BusinessException("用户不存在");
}
}
}
```
### 步骤 3响应格式转换
#### 3.1 成功响应
```java
// ❌ 旧代码
return ResultDomain.success(data);
return ResultDomain.success(list, total);
return ResultDomain.success("操作成功", data);
// ✅ 新代码
return R.ok(data);
return R.ok(list, total); // 分页响应
return R.ok(data, "操作成功");
```
#### 3.2 错误响应
```java
// ❌ 旧代码
return ResultDomain.failure("参数错误");
return ResultDomain.failure(ErrorCode.INVALID_PARAM);
throw new BusinessException("业务异常");
// ✅ 新代码
return R.failed("参数错误");
return R.failed(CommonConstants.FAIL, "参数错误");
throw new ServiceException("业务异常");
```
#### 3.3 分页响应
```java
// ❌ 旧代码
public ResultDomain<List<TbWorkcaseDTO>> list(PageParam param) {
Page<TbWorkcase> page = workcaseMapper.selectPage(param);
return ResultDomain.success(page.getRecords(), page.getTotal());
}
// ✅ 新代码
public R<IPage<TbWorkcaseDTO>> list(Page page, TbWorkcaseDTO query) {
IPage<TbWorkcaseDTO> result = workcaseMapper.selectPageVo(page, query);
return R.ok(result);
}
```
### 步骤 4租户隔离实现
#### 4.1 实体类添加租户字段
```java
@Data
@TableName("tb_workcase")
public class TbWorkcase {
@TableId
private Long id;
private String title;
// ✅ 新增租户字段
@TableField("tenant_id")
private Long tenantId;
// 其他字段...
}
```
#### 4.2 Service 层自动注入租户
```java
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
@Override
public R<TbWorkcaseDTO> save(TbWorkcaseDTO dto) {
// ✅ 自动注入当前租户
PigxUser user = SecurityUtils.getUser();
dto.setTenantId(user.getTenantId());
dto.setCreateBy(user.getUsername());
dto.setCreateTime(LocalDateTime.now());
workcaseMapper.insert(dto);
return R.ok(dto);
}
@Override
public R<IPage<TbWorkcaseDTO>> page(Page page, TbWorkcaseDTO query) {
// ✅ 查询条件自动添加租户过滤
PigxUser user = SecurityUtils.getUser();
query.setTenantId(user.getTenantId());
return R.ok(workcaseMapper.selectPageVo(page, query));
}
}
```
#### 4.3 Mapper 层租户隔离
```xml
<!-- WorkcaseMapper.xml -->
<select id="selectPageVo" resultType="com.pig4cloud.pigx.app.api.dto.TbWorkcaseDTO">
SELECT * FROM tb_workcase
<where>
<!-- ✅ 租户隔离条件 -->
<if test="query.tenantId != null">
AND tenant_id = #{query.tenantId}
</if>
<if test="query.title != null and query.title != ''">
AND title LIKE CONCAT('%', #{query.title}, '%')
</if>
</where>
ORDER BY create_time DESC
</select>
```
### 步骤 5配置 RemoteUserService
#### 5.1 添加 Feign 客户端接口
```java
package com.pig4cloud.pigx.app.api.feign;
import com.pig4cloud.pigx.common.core.constant.ServiceNameConstants;
import com.pig4cloud.pigx.common.core.util.R;
import com.pig4cloud.pigx.upms.api.entity.SysUser;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.web.bind.annotation.*;
@FeignClient(contextId = "remoteUserService",
value = ServiceNameConstants.UPMS_SERVICE)
public interface RemoteUserService {
/**
* 根据用户ID查询用户信息
*/
@GetMapping("/user/info/{id}")
R<SysUser> selectById(@PathVariable("id") Long id);
/**
* 根据用户名查询用户信息
*/
@GetMapping("/user/info")
R<SysUser> selectByUsername(@RequestParam("username") String username);
/**
* 批量查询用户信息
*/
@PostMapping("/user/list")
R<List<SysUser>> selectBatchIds(@RequestBody List<Long> ids);
}
```
#### 5.2 使用 RemoteUserService
```java
@Service
@RequiredArgsConstructor
public class WorkcaseServiceImpl implements WorkcaseService {
private final RemoteUserService remoteUserService;
public TbWorkcaseDTO getWorkcaseDetail(Long id) {
TbWorkcase workcase = workcaseMapper.selectById(id);
TbWorkcaseDTO dto = BeanUtil.copyProperties(workcase, TbWorkcaseDTO.class);
// 获取创建人信息
if (dto.getCreatorId() != null) {
R<SysUser> creatorResult = remoteUserService.selectById(dto.getCreatorId());
if (creatorResult.isSuccess() && creatorResult.getData() != null) {
dto.setCreatorName(creatorResult.getData().getUsername());
dto.setCreatorDeptName(creatorResult.getData().getDeptName());
}
}
// 获取处理人信息
if (dto.getAssigneeId() != null) {
R<SysUser> assigneeResult = remoteUserService.selectById(dto.getAssigneeId());
if (assigneeResult.isSuccess() && assigneeResult.getData() != null) {
dto.setAssigneeName(assigneeResult.getData().getUsername());
}
}
return dto;
}
}
```
## 批量转换工具
### 使用 IDE 批量替换(推荐)
#### IntelliJ IDEA
1. **权限注解替换**
- 查找:`@PreAuthorize\("hasAuthority\('([^:]+):([^:]+):([^']+)'\)"\)`
- 替换:`@PreAuthorize("@pms.hasPermission('$1_$2_$3')")`
- 选项:勾选 "Regex"
2. **响应格式替换**
- 查找:`ResultDomain\.success\((.*?)\)`
- 替换:`R.ok($1)`
3. **用户信息获取**
- 查找:`JwtUtils\.getUserId\(\)`
- 替换:`SecurityUtils.getUser().getId()`
#### VS Code
使用 Find and Replace (Ctrl+Shift+H),启用正则表达式模式。
### 使用脚本批量转换
创建 `convert-permissions.sh`
```bash
#!/bin/bash
# 转换权限注解
find ./src -name "*.java" -type f -exec sed -i \
's/@PreAuthorize("hasAuthority('\''\\([^:]*\\):\\([^:]*\\):\\([^'\'']*\\)'\''")"/@PreAuthorize("@pms.hasPermission('\''\\1_\\2_\\3'\'')"/g' {} \;
# 转换响应格式
find ./src -name "*.java" -type f -exec sed -i \
's/ResultDomain\.success(\(.*\))/R.ok(\1)/g' {} \;
# 转换用户信息获取
find ./src -name "*.java" -type f -exec sed -i \
's/JwtUtils\.getUserId()/SecurityUtils.getUser().getId()/g' {} \;
echo "转换完成!"
```
## 测试验证
### 1. 单元测试示例
```java
@SpringBootTest
@AutoConfigureMockMvc
public class WorkcaseControllerTest {
@Autowired
private MockMvc mockMvc;
@Test
@WithMockUser(username = "admin", authorities = {"workcase_ticket_add"})
public void testCreateWorkcase() throws Exception {
TbWorkcaseDTO dto = new TbWorkcaseDTO();
dto.setTitle("测试工单");
mockMvc.perform(post("/workcase")
.contentType(MediaType.APPLICATION_JSON)
.content(JSON.toJSONString(dto)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.code").value(0))
.andExpect(jsonPath("$.data.title").value("测试工单"));
}
@Test
@WithMockUser(username = "user", authorities = {})
public void testCreateWorkcaseNoPermission() throws Exception {
mockMvc.perform(post("/workcase")
.contentType(MediaType.APPLICATION_JSON)
.content("{}"))
.andExpect(status().isForbidden());
}
}
```
### 2. 集成测试检查清单
- [ ] 权限注解正确转换
- [ ] 用户信息正确获取
- [ ] 租户数据正确隔离
- [ ] 响应格式符合规范
- [ ] RemoteUserService 调用成功
- [ ] 菜单权限正确配置
- [ ] 角色权限正确分配
## 常见问题解决
### Q1: @pms.hasPermission() 不生效
**原因**:没有正确配置 PermissionService Bean
**解决**
```java
@Configuration
public class SecurityConfig {
@Bean("pms")
public PermissionService permissionService() {
return new PermissionService();
}
}
```
### Q2: RemoteUserService 调用失败
**原因**Feign 客户端未正确配置
**解决**
1. 检查 `@EnableFeignClients` 注解
2. 确认服务名称正确
3. 添加熔断处理
```java
@FeignClient(contextId = "remoteUserService",
value = ServiceNameConstants.UPMS_SERVICE,
fallback = RemoteUserServiceFallback.class)
```
### Q3: 租户数据泄露
**原因**:查询时未添加租户过滤
**解决**
1. 使用 MyBatis-Plus 租户插件
2. 手动添加租户条件
```java
@Configuration
public class MybatisPlusConfig {
@Bean
public TenantLineInnerInterceptor tenantLineInnerInterceptor() {
return new TenantLineInnerInterceptor(new TenantLineHandler() {
@Override
public Expression getTenantId() {
PigxUser user = SecurityUtils.getUser();
return new LongValue(user.getTenantId());
}
@Override
public String getTenantIdColumn() {
return "tenant_id";
}
});
}
}
```
## 迁移验证
完成转换后,执行以下验证:
1. **编译检查**:确保所有代码编译通过
2. **启动检查**:应用能正常启动
3. **权限测试**:各接口权限控制正确
4. **数据隔离**:租户数据正确隔离
5. **功能测试**:业务功能正常运行
## 总结
权限迁移是整个系统迁移的核心部分,需要:
1. 仔细转换每个权限注解
2. 正确处理用户信息获取
3. 实现租户数据隔离
4. 充分测试验证
建议分模块逐步迁移,每完成一个模块就进行测试验证。

View File

@@ -1,253 +0,0 @@
# 权限标识映射表
## 概述
本文档定义了从 urbanLifelineServ 权限标识到 pigx 权限标识的映射规则。
## 映射规则
### 格式转换规则
- **源格式**: `module:resource:action` (使用冒号分隔)
- **目标格式**: `module_resource_action` (使用下划线分隔)
- **动作映射**:
- `create``add`
- `update``edit`
- `delete``del`
- `view``view`
- 其他保持不变
### 权限注解转换
- **源注解**: `@PreAuthorize("hasAuthority('module:resource:action')")`
- **目标注解**: `@PreAuthorize("@pms.hasPermission('module_resource_action')")`
## 权限映射表
### 工单模块 (workcase)
| 源权限标识 | 目标权限标识 | 说明 | 菜单类型 |
|-----------|-------------|------|----------|
| workcase:ticket:create | workcase_ticket_add | 创建工单 | 按钮 |
| workcase:ticket:update | workcase_ticket_edit | 更新工单 | 按钮 |
| workcase:ticket:view | workcase_ticket_view | 查看工单 | 菜单 |
| workcase:ticket:delete | workcase_ticket_del | 删除工单 | 按钮 |
| workcase:ticket:process | workcase_ticket_process | 处理工单 | 按钮 |
| workcase:ticket:device | workcase_ticket_device | 工单设备管理 | 按钮 |
| workcase:room:create | workcase_room_add | 创建聊天室 | 按钮 |
| workcase:room:update | workcase_room_edit | 更新聊天室 | 按钮 |
| workcase:room:close | workcase_room_close | 关闭聊天室 | 按钮 |
| workcase:room:view | workcase_room_view | 查看聊天室 | 菜单 |
### AI模块 (dify)
| 源权限标识 | 目标权限标识 | 说明 | 菜单类型 |
|-----------|-------------|------|----------|
| ai:agent:create | dify_agent_add | 创建智能体 | 按钮 |
| ai:agent:update | dify_agent_edit | 更新智能体 | 按钮 |
| ai:agent:delete | dify_agent_del | 删除智能体 | 按钮 |
| ai:agent:view | dify_agent_view | 查看智能体 | 菜单 |
| ai:knowledge:create | dify_knowledge_add | 创建知识库 | 按钮 |
| ai:knowledge:update | dify_knowledge_edit | 更新知识库 | 按钮 |
| ai:knowledge:delete | dify_knowledge_del | 删除知识库 | 按钮 |
| ai:knowledge:view | dify_knowledge_view | 查看知识库 | 菜单 |
| ai:knowledge:file:view | dify_knowledge_file_view | 查看知识库文件 | 按钮 |
| ai:knowledge:file:upload | dify_knowledge_file_upload | 上传知识库文件 | 按钮 |
| ai:knowledge:file:update | dify_knowledge_file_edit | 更新知识库文件 | 按钮 |
| ai:knowledge:file:delete | dify_knowledge_file_del | 删除知识库文件 | 按钮 |
| ai:dify:segment:view | dify_segment_view | 查看文档片段 | 按钮 |
| ai:dify:segment:create | dify_segment_add | 创建文档片段 | 按钮 |
| ai:dify:segment:update | dify_segment_edit | 更新文档片段 | 按钮 |
| ai:dify:segment:delete | dify_segment_del | 删除文档片段 | 按钮 |
| ai:dify:document:status | dify_document_status | 查看文档状态 | 按钮 |
| ai:chat:create | dify_chat_add | 创建对话 | 按钮 |
| ai:chat:view | dify_chat_view | 查看对话 | 菜单 |
| ai:chat:message | dify_chat_message | 发送消息 | 按钮 |
### 招标模块 (bidding)
| 源权限标识 | 目标权限标识 | 说明 | 菜单类型 |
|-----------|-------------|------|----------|
| bidding:project:create | bidding_project_add | 创建招标项目 | 按钮 |
| bidding:project:update | bidding_project_edit | 更新招标项目 | 按钮 |
| bidding:project:delete | bidding_project_del | 删除招标项目 | 按钮 |
| bidding:project:view | bidding_project_view | 查看招标项目 | 菜单 |
| bidding:bid:create | bidding_bid_add | 创建投标 | 按钮 |
| bidding:bid:update | bidding_bid_edit | 更新投标 | 按钮 |
| bidding:bid:view | bidding_bid_view | 查看投标 | 菜单 |
| bidding:document:view | bidding_document_view | 查看招标文件 | 按钮 |
| bidding:document:upload | bidding_document_upload | 上传招标文件 | 按钮 |
### 平台管理模块 (platform)
| 源权限标识 | 目标权限标识 | 说明 | 菜单类型 |
|-----------|-------------|------|----------|
| platform:config:view | platform_config_view | 查看配置 | 菜单 |
| platform:config:update | platform_config_edit | 更新配置 | 按钮 |
| platform:log:view | platform_log_view | 查看日志 | 菜单 |
| platform:monitor:view | platform_monitor_view | 查看监控 | 菜单 |
| platform:stat:view | platform_stat_view | 查看统计 | 菜单 |
### 消息模块 (message)
| 源权限标识 | 目标权限标识 | 说明 | 菜单类型 |
|-----------|-------------|------|----------|
| message:notification:create | message_notification_add | 创建通知 | 按钮 |
| message:notification:view | message_notification_view | 查看通知 | 菜单 |
| message:notification:send | message_notification_send | 发送通知 | 按钮 |
| message:template:create | message_template_add | 创建消息模板 | 按钮 |
| message:template:update | message_template_edit | 更新消息模板 | 按钮 |
| message:template:delete | message_template_del | 删除消息模板 | 按钮 |
| message:template:view | message_template_view | 查看消息模板 | 菜单 |
## 菜单配置SQL示例
```sql
-- 工单管理菜单
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(10000, '工单管理', NULL, '/workcase', 0, 'el-icon-tickets', 1, '0', 1),
(10001, '工单列表', 'workcase_ticket_view', '/workcase/list', 10000, '', 1, '1', 1),
(10002, '创建工单', 'workcase_ticket_add', NULL, 10001, '', 1, '2', 1),
(10003, '编辑工单', 'workcase_ticket_edit', NULL, 10001, '', 2, '2', 1),
(10004, '删除工单', 'workcase_ticket_del', NULL, 10001, '', 3, '2', 1),
(10005, '处理工单', 'workcase_ticket_process', NULL, 10001, '', 4, '2', 1),
(10006, '设备管理', 'workcase_ticket_device', NULL, 10001, '', 5, '2', 1),
(10010, '聊天室', 'workcase_room_view', '/workcase/room', 10000, '', 2, '1', 1),
(10011, '创建聊天室', 'workcase_room_add', NULL, 10010, '', 1, '2', 1),
(10012, '编辑聊天室', 'workcase_room_edit', NULL, 10010, '', 2, '2', 1),
(10013, '关闭聊天室', 'workcase_room_close', NULL, 10010, '', 3, '2', 1);
-- AI管理菜单Dify
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(11000, 'AI管理', NULL, '/dify', 0, 'el-icon-cpu', 2, '0', 1),
(11001, '智能体管理', 'dify_agent_view', '/dify/agent', 11000, '', 1, '1', 1),
(11002, '创建智能体', 'dify_agent_add', NULL, 11001, '', 1, '2', 1),
(11003, '编辑智能体', 'dify_agent_edit', NULL, 11001, '', 2, '2', 1),
(11004, '删除智能体', 'dify_agent_del', NULL, 11001, '', 3, '2', 1),
(11010, '知识库管理', 'dify_knowledge_view', '/dify/knowledge', 11000, '', 2, '1', 1),
(11011, '创建知识库', 'dify_knowledge_add', NULL, 11010, '', 1, '2', 1),
(11012, '编辑知识库', 'dify_knowledge_edit', NULL, 11010, '', 2, '2', 1),
(11013, '删除知识库', 'dify_knowledge_del', NULL, 11010, '', 3, '2', 1),
(11014, '上传文件', 'dify_knowledge_file_upload', NULL, 11010, '', 4, '2', 1),
(11020, 'AI对话', 'dify_chat_view', '/dify/chat', 11000, '', 3, '1', 1),
(11021, '创建对话', 'dify_chat_add', NULL, 11020, '', 1, '2', 1);
-- 招标管理菜单
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(12000, '招标管理', NULL, '/bidding', 0, 'el-icon-document', 3, '0', 1),
(12001, '招标项目', 'bidding_project_view', '/bidding/project', 12000, '', 1, '1', 1),
(12002, '创建项目', 'bidding_project_add', NULL, 12001, '', 1, '2', 1),
(12003, '编辑项目', 'bidding_project_edit', NULL, 12001, '', 2, '2', 1),
(12004, '删除项目', 'bidding_project_del', NULL, 12001, '', 3, '2', 1),
(12010, '投标管理', 'bidding_bid_view', '/bidding/bid', 12000, '', 2, '1', 1),
(12011, '创建投标', 'bidding_bid_add', NULL, 12010, '', 1, '2', 1),
(12012, '编辑投标', 'bidding_bid_edit', NULL, 12010, '', 2, '2', 1);
-- 平台管理菜单
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(13000, '平台管理', NULL, '/platform', 0, 'el-icon-setting', 4, '0', 1),
(13001, '系统配置', 'platform_config_view', '/platform/config', 13000, '', 1, '1', 1),
(13002, '编辑配置', 'platform_config_edit', NULL, 13001, '', 1, '2', 1),
(13010, '操作日志', 'platform_log_view', '/platform/log', 13000, '', 2, '1', 1),
(13020, '系统监控', 'platform_monitor_view', '/platform/monitor', 13000, '', 3, '1', 1),
(13030, '统计报表', 'platform_stat_view', '/platform/stat', 13000, '', 4, '1', 1);
-- 消息管理菜单
INSERT INTO sys_menu (menu_id, name, permission, path, parent_id, icon, sort, type, tenant_id) VALUES
(14000, '消息管理', NULL, '/message', 0, 'el-icon-message', 5, '0', 1),
(14001, '通知管理', 'message_notification_view', '/message/notification', 14000, '', 1, '1', 1),
(14002, '创建通知', 'message_notification_add', NULL, 14001, '', 1, '2', 1),
(14003, '发送通知', 'message_notification_send', NULL, 14001, '', 2, '2', 1),
(14010, '消息模板', 'message_template_view', '/message/template', 14000, '', 2, '1', 1),
(14011, '创建模板', 'message_template_add', NULL, 14010, '', 1, '2', 1),
(14012, '编辑模板', 'message_template_edit', NULL, 14010, '', 2, '2', 1),
(14013, '删除模板', 'message_template_del', NULL, 14010, '', 3, '2', 1);
```
## 角色权限分配示例
```sql
-- 为管理员角色分配所有业务权限
INSERT INTO sys_role_menu (role_id, menu_id)
SELECT 1, menu_id FROM sys_menu WHERE menu_id >= 10000 AND menu_id < 15000;
-- 为普通用户角色分配查看权限
INSERT INTO sys_role_menu (role_id, menu_id)
SELECT 2, menu_id FROM sys_menu
WHERE menu_id >= 10000 AND menu_id < 15000
AND (type = '0' OR type = '1' OR permission LIKE '%_view');
```
## 代码转换示例
### Java Controller 转换
```java
// 转换前 (urbanLifelineServ)
@RestController
@RequestMapping("/api/workcase")
public class WorkcaseController {
@PostMapping("/create")
@PreAuthorize("hasAuthority('workcase:ticket:create')")
public ResultDomain<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return ResultDomain.success(workcaseService.save(workcase));
}
}
// 转换后 (pigx-app-server)
@RestController
@RequestMapping("/workcase")
public class WorkcaseController {
@PostMapping
@PreAuthorize("@pms.hasPermission('workcase_ticket_add')")
public R<TbWorkcaseDTO> createWorkcase(@RequestBody TbWorkcaseDTO workcase) {
return R.ok(workcaseService.save(workcase));
}
}
```
### 前端权限判断转换
```javascript
// 转换前 (urbanLifelineWeb)
if (hasPermission('workcase:ticket:create')) {
// 显示创建按钮
}
// 转换后 (pigx-ai-ui)
if (checkPermission(['workcase_ticket_add'])) {
// 显示创建按钮
}
```
## 注意事项
1. **权限格式严格**: 必须使用下划线 `_` 而不是冒号 `:`
2. **注解格式**: 必须包含 `@pms.` 前缀
3. **动作映射**: `create` 统一改为 `add``update` 改为 `edit``delete` 改为 `del`
4. **菜单类型**:
- type='0': 目录
- type='1': 菜单
- type='2': 按钮
5. **menu_id分配**:
- 10000-10999: 工单模块
- 11000-11999: AI模块(Dify)
- 12000-12999: 招标模块
- 13000-13999: 平台管理
- 14000-14999: 消息模块
## 批量转换脚本
可以使用以下正则表达式进行批量替换:
```regex
# 查找
@PreAuthorize\("hasAuthority\('([^:]+):([^:]+):([^']+)'\)"\)
# 替换为
@PreAuthorize("@pms.hasPermission('$1_$2_$3')")
# 特殊处理 create -> add
将 _create 替换为 _add
将 _update 替换为 _edit
将 _delete 替换为 _del
```

View File

@@ -1,756 +0,0 @@
# pigx-dify 模块架构设计
## 1. 模块概述
### 1.1 定位
pigx-dify 是 pigx 平台的 AI 服务模块,专门用于集成 Dify AI 平台,提供智能体管理、知识库管理和 AI 对话功能。
### 1.2 核心功能
- 智能体Agent管理
- 知识库Knowledge管理
- AI 对话Chat功能
- Dify API 集成
- 流式响应支持SSE
### 1.3 技术栈
- Spring Boot 3.5.8
- Spring Cloud 2025.0.0
- MyBatis-Plus 3.5.14
- MySQL 8.0
- Dify API Client
- SSE (Server-Sent Events)
## 2. 模块结构
### 2.1 Maven 项目结构
```xml
<!-- pigx-dify/pom.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx</artifactId>
<version>6.4.0</version>
</parent>
<artifactId>pigx-dify</artifactId>
<packaging>pom</packaging>
<description>Dify AI integration module</description>
<modules>
<module>pigx-dify-api</module>
<module>pigx-dify-biz</module>
</modules>
</project>
```
### 2.2 pigx-dify-api 结构
```xml
<!-- pigx-dify-api/pom.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0">
<parent>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-dify</artifactId>
<version>6.4.0</version>
</parent>
<artifactId>pigx-dify-api</artifactId>
<description>Dify API interfaces and entities</description>
<dependencies>
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-core</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-annotation</artifactId>
</dependency>
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-starter-webmvc-api</artifactId>
</dependency>
</dependencies>
</project>
```
### 2.3 pigx-dify-biz 结构
```xml
<!-- pigx-dify-biz/pom.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0">
<parent>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-dify</artifactId>
<version>6.4.0</version>
</parent>
<artifactId>pigx-dify-biz</artifactId>
<description>Dify business implementation</description>
<dependencies>
<!-- pigx dependencies -->
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-dify-api</artifactId>
</dependency>
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-security</artifactId>
</dependency>
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-log</artifactId>
</dependency>
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-mybatis</artifactId>
</dependency>
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-swagger</artifactId>
</dependency>
<!-- Spring Boot -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-undertow</artifactId>
</dependency>
<!-- Spring Cloud -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-config</artifactId>
</dependency>
<!-- HTTP Client for Dify API -->
<dependency>
<groupId>org.apache.httpcomponents.client5</groupId>
<artifactId>httpclient5</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-webflux</artifactId>
</dependency>
<!-- SSE Support -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-webflux</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
```
## 3. 包结构设计
### 3.1 pigx-dify-api 包结构
```
pigx-dify-api/
└── src/main/java/com/pig4cloud/pigx/dify/api/
├── entity/ # 实体类
│ ├── TbAgent.java # 智能体
│ ├── TbChat.java # 聊天会话
│ ├── TbChatMessage.java # 聊天消息
│ └── TbKnowledge.java # 知识库
├── dto/ # 数据传输对象
│ ├── AgentDTO.java
│ ├── ChatDTO.java
│ ├── ChatMessageDTO.java
│ └── KnowledgeDTO.java
├── vo/ # 视图对象
│ ├── AgentVO.java
│ ├── ChatVO.java
│ └── KnowledgeVO.java
├── feign/ # Feign接口
│ └── RemoteDifyService.java
└── constant/ # 常量定义
└── DifyConstant.java
```
### 3.2 pigx-dify-biz 包结构
```
pigx-dify-biz/
└── src/main/java/com/pig4cloud/pigx/dify/
├── DifyApplication.java # 启动类
├── config/ # 配置类
│ ├── DifyConfig.java # Dify配置
│ ├── WebConfig.java # Web配置
│ └── AsyncConfig.java # 异步配置
├── controller/ # 控制器
│ ├── AgentController.java # 智能体管理
│ ├── ChatController.java # 对话管理
│ └── KnowledgeController.java # 知识库管理
├── service/ # 服务层
│ ├── AgentService.java
│ ├── ChatService.java
│ ├── KnowledgeService.java
│ └── impl/
│ ├── AgentServiceImpl.java
│ ├── ChatServiceImpl.java
│ └── KnowledgeServiceImpl.java
├── mapper/ # 数据访问层
│ ├── AgentMapper.java
│ ├── ChatMapper.java
│ ├── ChatMessageMapper.java
│ └── KnowledgeMapper.java
├── client/ # 外部API客户端
│ ├── DifyApiClient.java # Dify API客户端
│ ├── dto/ # Dify API DTO
│ │ ├── DifyRequest.java
│ │ └── DifyResponse.java
│ └── callback/
│ └── StreamCallback.java # 流式回调
└── handler/ # 处理器
├── SseHandler.java # SSE处理
└── GlobalExceptionHandler.java # 全局异常处理
```
## 4. 核心代码设计
### 4.1 启动类
```java
package com.pig4cloud.pigx.dify;
import com.pig4cloud.pigx.common.feign.annotation.EnablePigxFeignClients;
import com.pig4cloud.pigx.common.security.annotation.EnablePigxResourceServer;
import com.pig4cloud.pigx.common.swagger.annotation.EnablePigxDoc;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
@EnablePigxDoc
@EnablePigxResourceServer
@EnablePigxFeignClients
@EnableDiscoveryClient
@SpringBootApplication
public class DifyApplication {
public static void main(String[] args) {
SpringApplication.run(DifyApplication.class, args);
}
}
```
### 4.2 配置类
```java
package com.pig4cloud.pigx.dify.config;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
@Data
@Configuration
@ConfigurationProperties(prefix = "dify")
public class DifyConfig {
/**
* Dify API基础URL
*/
private String apiBaseUrl = "https://api.dify.ai/v1";
/**
* 默认API Key可被智能体配置覆盖
*/
private String defaultApiKey;
/**
* 连接超时(毫秒)
*/
private Integer connectTimeout = 10000;
/**
* 读取超时(毫秒)
*/
private Integer readTimeout = 30000;
/**
* 流式响应超时(毫秒)
*/
private Integer streamTimeout = 60000;
/**
* 是否启用调试日志
*/
private Boolean debug = false;
}
```
### 4.3 实体设计
```java
package com.pig4cloud.pigx.dify.api.entity;
import com.baomidou.mybatisplus.annotation.*;
import com.baomidou.mybatisplus.extension.activerecord.Model;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.time.LocalDateTime;
@Data
@TableName("tb_agent")
@EqualsAndHashCode(callSuper = true)
public class TbAgent extends Model<TbAgent> {
@TableId(type = IdType.ASSIGN_UUID)
private String agentId;
private String name;
private String description;
private String difyApiKey;
private String difyAgentId;
private String config; // JSON配置
private String icon;
private Integer status; // 0:禁用 1:启用
@TableField(fill = FieldFill.INSERT)
private Long tenantId;
@TableField(fill = FieldFill.INSERT)
private String createBy;
@TableField(fill = FieldFill.INSERT)
private LocalDateTime createTime;
@TableField(fill = FieldFill.UPDATE)
private String updateBy;
@TableField(fill = FieldFill.UPDATE)
private LocalDateTime updateTime;
@TableLogic
private Integer delFlag;
}
```
### 4.4 Controller设计
```java
package com.pig4cloud.pigx.dify.controller;
import com.pig4cloud.pigx.common.core.util.R;
import com.pig4cloud.pigx.common.security.annotation.Inner;
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import com.pig4cloud.pigx.dify.api.dto.ChatDTO;
import com.pig4cloud.pigx.dify.service.ChatService;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.tags.Tag;
import lombok.RequiredArgsConstructor;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
@RestController
@RequiredArgsConstructor
@RequestMapping("/chat")
@Tag(name = "对话管理")
public class ChatController {
private final ChatService chatService;
@Operation(summary = "创建对话")
@PostMapping
@PreAuthorize("@pms.hasPermission('dify_chat_add')")
public R<ChatDTO> createChat(@RequestBody ChatDTO chatDTO) {
chatDTO.setUserId(SecurityUtils.getUser().getId());
chatDTO.setTenantId(SecurityUtils.getUser().getTenantId());
return R.ok(chatService.createChat(chatDTO));
}
@Operation(summary = "流式对话")
@PostMapping("/stream/{chatId}")
@PreAuthorize("@pms.hasPermission('dify_chat_message')")
public SseEmitter streamChat(@PathVariable String chatId,
@RequestBody String message) {
return chatService.streamChat(chatId, message, SecurityUtils.getUser());
}
@Operation(summary = "获取对话历史")
@GetMapping("/{chatId}/messages")
@PreAuthorize("@pms.hasPermission('dify_chat_view')")
public R<?> getChatMessages(@PathVariable String chatId) {
return R.ok(chatService.getChatMessages(chatId));
}
}
```
## 5. 数据库设计
### 5.1 数据表DDL
```sql
-- 智能体表
CREATE TABLE `tb_agent` (
`agent_id` varchar(36) NOT NULL COMMENT '智能体ID',
`name` varchar(100) NOT NULL COMMENT '名称',
`description` varchar(500) DEFAULT NULL COMMENT '描述',
`dify_api_key` varchar(255) DEFAULT NULL COMMENT 'Dify API Key',
`dify_agent_id` varchar(100) DEFAULT NULL COMMENT 'Dify Agent ID',
`config` text COMMENT '配置信息(JSON)',
`icon` varchar(255) DEFAULT NULL COMMENT '图标',
`status` tinyint DEFAULT '1' COMMENT '状态 0:禁用 1:启用',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`create_by` varchar(64) DEFAULT NULL COMMENT '创建人',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_by` varchar(64) DEFAULT NULL COMMENT '更新人',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`del_flag` char(1) DEFAULT '0' COMMENT '删除标记',
PRIMARY KEY (`agent_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='智能体表';
-- 聊天会话表
CREATE TABLE `tb_chat` (
`chat_id` varchar(36) NOT NULL COMMENT '会话ID',
`agent_id` varchar(36) NOT NULL COMMENT '智能体ID',
`user_id` bigint NOT NULL COMMENT '用户ID',
`title` varchar(200) DEFAULT NULL COMMENT '会话标题',
`conversation_id` varchar(100) DEFAULT NULL COMMENT 'Dify会话ID',
`status` tinyint DEFAULT '1' COMMENT '状态 0:关闭 1:活跃',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`del_flag` char(1) DEFAULT '0' COMMENT '删除标记',
PRIMARY KEY (`chat_id`),
KEY `idx_user_id` (`user_id`),
KEY `idx_agent_id` (`agent_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='聊天会话表';
-- 聊天消息表
CREATE TABLE `tb_chat_message` (
`message_id` varchar(36) NOT NULL COMMENT '消息ID',
`chat_id` varchar(36) NOT NULL COMMENT '会话ID',
`content` text NOT NULL COMMENT '消息内容',
`role` varchar(20) NOT NULL COMMENT '角色(user/ai/system)',
`dify_message_id` varchar(100) DEFAULT NULL COMMENT 'Dify消息ID',
`parent_message_id` varchar(36) DEFAULT NULL COMMENT '父消息ID',
`metadata` text COMMENT '元数据(JSON)',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`message_id`),
KEY `idx_chat_id` (`chat_id`),
KEY `idx_create_time` (`create_time`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='聊天消息表';
-- 知识库表
CREATE TABLE `tb_knowledge` (
`knowledge_id` varchar(36) NOT NULL COMMENT '知识库ID',
`title` varchar(200) NOT NULL COMMENT '标题',
`description` text COMMENT '描述',
`dify_dataset_id` varchar(100) DEFAULT NULL COMMENT 'Dify数据集ID',
`status` tinyint DEFAULT '1' COMMENT '状态 0:禁用 1:启用',
`tenant_id` bigint NOT NULL DEFAULT '1' COMMENT '租户ID',
`create_by` varchar(64) DEFAULT NULL COMMENT '创建人',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_by` varchar(64) DEFAULT NULL COMMENT '更新人',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`del_flag` char(1) DEFAULT '0' COMMENT '删除标记',
PRIMARY KEY (`knowledge_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='知识库表';
```
## 6. 配置文件
### 6.1 bootstrap.yml
```yaml
server:
port: 9500
spring:
application:
name: @project.artifactId@
profiles:
active: @profiles.active@
cloud:
nacos:
discovery:
server-addr: ${NACOS_HOST:pigx-register}:${NACOS_PORT:8848}
config:
server-addr: ${spring.cloud.nacos.discovery.server-addr}
file-extension: yml
shared-configs:
- data-id: common.yml
refresh: true
- data-id: db.yml
refresh: true
```
### 6.2 application.yml
```yaml
# Dify配置
dify:
api-base-url: ${DIFY_API_BASE_URL:https://api.dify.ai/v1}
default-api-key: ${DIFY_DEFAULT_API_KEY:}
connect-timeout: 10000
read-timeout: 30000
stream-timeout: 60000
debug: false
# MyBatis-Plus配置
mybatis-plus:
mapper-locations: classpath:/mapper/*.xml
type-aliases-package: com.pig4cloud.pigx.dify.api.entity
configuration:
map-underscore-to-camel-case: true
# 安全配置
security:
oauth2:
resource:
ignore-urls:
- /actuator/**
- /v3/api-docs/**
```
## 7. 服务注册
### 7.1 路由配置
在 pigx-gateway 中添加路由:
```yaml
spring:
cloud:
gateway:
routes:
- id: pigx-dify
uri: lb://pigx-dify
predicates:
- Path=/dify/**
filters:
- StripPrefix=1
```
### 7.2 Feign配置
```java
package com.pig4cloud.pigx.dify.api.feign;
import com.pig4cloud.pigx.common.core.constant.ServiceNameConstants;
import com.pig4cloud.pigx.common.core.util.R;
import com.pig4cloud.pigx.dify.api.dto.ChatDTO;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
@FeignClient(contextId = "remoteDifyService",
value = ServiceNameConstants.DIFY_SERVICE)
public interface RemoteDifyService {
@GetMapping("/chat/{chatId}")
R<ChatDTO> getChatInfo(@PathVariable("chatId") String chatId);
}
```
## 8. 部署配置
### 8.1 Docker配置
```dockerfile
FROM pig4cloud/java:8-jre
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
COPY target/pigx-dify-biz.jar /app.jar
ENTRYPOINT ["java", "-Djava.security.egd=file:/dev/./urandom", "-jar", "/app.jar"]
```
### 8.2 K8s部署
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pigx-dify
namespace: pigx
spec:
replicas: 1
selector:
matchLabels:
app: pigx-dify
template:
metadata:
labels:
app: pigx-dify
spec:
containers:
- name: pigx-dify
image: pigx/pigx-dify:latest
ports:
- containerPort: 9500
env:
- name: NACOS_HOST
value: "pigx-register"
- name: DIFY_API_BASE_URL
value: "https://api.dify.ai/v1"
- name: DIFY_DEFAULT_API_KEY
valueFrom:
secretKeyRef:
name: dify-secret
key: api-key
```
## 9. 集成测试
### 9.1 单元测试
```java
@SpringBootTest
class ChatServiceTest {
@Autowired
private ChatService chatService;
@MockBean
private DifyApiClient difyApiClient;
@Test
void testCreateChat() {
// 测试创建对话
ChatDTO chatDTO = new ChatDTO();
chatDTO.setAgentId("test-agent");
chatDTO.setUserId(1L);
ChatDTO result = chatService.createChat(chatDTO);
assertNotNull(result.getChatId());
}
}
```
### 9.2 API测试
```http
### 创建对话
POST http://localhost:9999/dify/chat
Authorization: Bearer {{token}}
Content-Type: application/json
{
"agentId": "agent-001",
"title": "测试对话"
}
### 发送消息(流式)
POST http://localhost:9999/dify/chat/stream/{{chatId}}
Authorization: Bearer {{token}}
Content-Type: text/plain
你好,请介绍一下自己
```
## 10. 监控告警
### 10.1 健康检查
```java
@Component
public class DifyHealthIndicator implements HealthIndicator {
@Autowired
private DifyApiClient difyApiClient;
@Override
public Health health() {
try {
// 检查Dify API连通性
boolean isHealthy = difyApiClient.checkHealth();
if (isHealthy) {
return Health.up()
.withDetail("dify", "Available")
.build();
}
} catch (Exception e) {
return Health.down()
.withDetail("dify", "Unavailable")
.withException(e)
.build();
}
return Health.down().build();
}
}
```
### 10.2 日志配置
```xml
<!-- logback-spring.xml -->
<configuration>
<logger name="com.pig4cloud.pigx.dify" level="INFO"/>
<logger name="com.pig4cloud.pigx.dify.client" level="DEBUG"/>
</configuration>
```
## 11. 安全考虑
### 11.1 API Key管理
- API Key 加密存储
- 支持多租户隔离
- 定期轮换机制
### 11.2 数据隔离
- 租户级别数据隔离
- 用户权限验证
- 敏感信息脱敏
### 11.3 限流配置
```java
@Configuration
public class RateLimitConfig {
@Bean
public RedisRateLimiter redisRateLimiter() {
return new RedisRateLimiter(10, 20); // 10 requests per second
}
}
```
## 12. 迁移清单
- [ ] 创建 Maven 模块结构
- [ ] 迁移实体类和 Mapper
- [ ] 迁移 Service 层业务逻辑
- [ ] 迁移 Controller 层接口
- [ ] 适配权限注解
- [ ] 迁移 DifyApiClient
- [ ] 配置服务注册和发现
- [ ] 数据库表结构迁移
- [ ] 前端页面迁移
- [ ] 集成测试
- [ ] 部署配置

View File

@@ -1,151 +0,0 @@
# Requirements Document
## Introduction
本文档定义了将 urbanLifelineServ 和 urbanLifelineWeb 项目的**业务功能**迁移到 pigx-ai 和 pigx-ai-ui 平台的需求规范。
**核心原则**
- 只迁移业务功能代码招标、工单、平台管理、AI、消息等
- 人员、部门、权限、认证等基础设施**完全使用 pigx 原生实现**
- 数据库从 PostgreSQL 迁移到 MySQL
- 前端从微前端架构合并到 pigx-ai-ui 单体应用
## Glossary
- **Business_Module**: 需要迁移的业务模块bidding, workcase, platform, ai, message
- **pigx-app-server**: pigx 平台的业务服务模块,用于承载迁移的业务功能
- **pigx-dify**: 新建的 pigx 平台 AI 模块,用于承载原 urbanLifeline 的 AI 功能和 Dify 集成
- **pigx-knowledge**: pigx 平台原有的 AI 知识库模块(不使用)
- **PigxUser**: pigx 原生的用户实体,迁移后的业务代码需要使用此用户模型
- **R<T>**: pigx 统一响应格式
- **DifyApiClient**: 与 Dify 平台交互的客户端,保留在 pigx-dify 模块中
## Requirements
### Requirement 1: 招标模块迁移 (bidding)
**User Story:** As a 业务用户, I want to 在 pigx 平台上使用招标功能, so that 可以进行招标项目管理。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 bidding 服务 THEN pigx-app-server SHALL 包含招标业务的 Controller、Service、Mapper 层代码
2. WHEN Business_Module 处理招标用户关联 THEN pigx-app-server SHALL 使用 PigxUser 替代原有的 User 实体
3. WHEN Business_Module 迁移招标数据表 THEN Database_Migrator SHALL 将 PostgreSQL 表结构转换为 MySQL 并添加 tenant_id 租户字段
4. WHEN Business_Module 迁移招标前端页面 THEN pigx-ai-ui SHALL 在 src/views/bidding 目录下包含所有招标页面组件
5. WHEN Business_Module 处理招标 API 调用 THEN pigx-ai-ui SHALL 使用 pigx 的 request 工具和统一响应格式
### Requirement 2: 工单模块迁移 (workcase)
**User Story:** As a 业务用户, I want to 在 pigx 平台上使用工单功能, so that 可以进行工单流转和处理。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 workcase 服务 THEN pigx-app-server SHALL 包含工单业务的完整代码
2. WHEN Business_Module 处理工单分配 THEN pigx-app-server SHALL 通过 pigx-upms 的 RemoteUserService 获取用户信息
3. WHEN Business_Module 处理工单流程 THEN pigx-app-server SHALL 评估是否集成 pigx-flow 工作流引擎
4. WHEN Business_Module 迁移工单数据表 THEN Database_Migrator SHALL 将表结构转换为 MySQL 并关联 pigx 的 sys_user 表
5. WHEN Business_Module 迁移工单前端页面 THEN pigx-ai-ui SHALL 在 src/views/workcase 目录下包含所有工单页面
6. WHEN Business_Module 处理 Jitsi 视频会议集成 THEN pigx-app-server SHALL 保留视频会议功能并适配 pigx 认证
### Requirement 3: 平台管理模块迁移 (platform)
**User Story:** As a 管理员, I want to 在 pigx 平台上使用平台管理功能, so that 可以进行业务配置和管理。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 platform 服务 THEN pigx-app-server SHALL 包含平台管理业务代码
2. WHEN Business_Module 处理平台配置 THEN pigx-app-server SHALL 使用 pigx 的配置管理机制
3. WHEN Business_Module 迁移平台数据表 THEN Database_Migrator SHALL 将表结构转换为 MySQL
4. WHEN Business_Module 迁移平台前端页面 THEN pigx-ai-ui SHALL 在 src/views/platform 目录下包含管理页面
### Requirement 4: AI 模块迁移 (创建新的 pigx-dify 模块)
**User Story:** As a 用户, I want to 在 pigx 平台上使用 AI 功能, so that 可以使用智能问答和知识库。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 ai 服务 THEN 新建的 pigx-dify 模块 SHALL 承载所有 AI 业务逻辑
2. WHEN Business_Module 处理 AI 对话 THEN pigx-dify SHALL 保留原有的 Dify API 集成方式
3. WHEN Business_Module 处理 Dify 集成 THEN pigx-dify SHALL 包含 DifyApiClient 和相关配置管理
4. WHEN Business_Module 迁移 AI 数据表 THEN Database_Migrator SHALL 将 tb_agent、tb_chat、tb_chat_message、tb_knowledge 等表转换为 MySQL
5. WHEN Business_Module 迁移 AI 前端页面 THEN pigx-ai-ui SHALL 在 src/views/dify 目录下包含 AI 对话界面
6. WHEN Business_Module 处理聊天记录 THEN pigx-dify SHALL 保持原有的 tb_chat 和 tb_chat_message 表结构
7. WHEN Business_Module 处理知识库 THEN pigx-dify SHALL 保持与 Dify Dataset API 的集成
### Requirement 5: 消息模块迁移 (message)
**User Story:** As a 用户, I want to 在 pigx 平台上接收消息通知, so that 可以及时了解业务动态。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 message 服务 THEN pigx-app-server SHALL 包含消息通知业务代码
2. WHEN Business_Module 处理消息推送 THEN pigx-app-server SHALL 使用 pigx-common-websocket 进行实时推送
3. WHEN Business_Module 处理微信通知 THEN pigx-app-server SHALL 保留微信消息推送功能
4. WHEN Business_Module 迁移消息数据表 THEN Database_Migrator SHALL 将表结构转换为 MySQL
### Requirement 6: 文件服务适配
**User Story:** As a 用户, I want to 上传和下载文件, so that 可以管理业务相关的文件资源。
#### Acceptance Criteria
1. WHEN Business_Module 处理文件上传 THEN pigx-app-server SHALL 使用 pigx-common-oss 进行文件存储
2. WHEN Business_Module 处理文件访问 THEN pigx-app-server SHALL 适配 pigx 的文件访问 URL 格式
3. IF Business_Module 有自定义文件处理逻辑 THEN pigx-app-server SHALL 在 OSS 基础上扩展实现
### Requirement 7: 定时任务适配
**User Story:** As a 系统管理员, I want to 管理定时任务, so that 可以执行周期性业务处理。
#### Acceptance Criteria
1. WHEN Business_Module 迁移 crontab 任务 THEN pigx-visual/xxl-job SHALL 包含迁移后的定时任务
2. WHEN Business_Module 处理任务调度 THEN XXL-Job SHALL 替代原有的调度机制
3. WHEN Business_Module 处理任务执行 THEN pigx-app-server SHALL 提供任务执行的 HTTP 接口
### Requirement 8: 数据库迁移
**User Story:** As a DBA, I want to 将业务数据迁移到 MySQL, so that 数据可以在 pigx 平台运行。
#### Acceptance Criteria
1. WHEN Database_Migrator 处理业务表 THEN Migration_System SHALL 生成 MySQL DDL 脚本
2. WHEN Database_Migrator 处理数据类型 THEN Migration_System SHALL 正确映射 PostgreSQL 类型到 MySQL
3. WHEN Database_Migrator 处理用户关联 THEN Migration_System SHALL 将原 user_id 映射到 pigx 的 sys_user.user_id
4. WHEN Database_Migrator 处理租户支持 THEN Migration_System SHALL 为业务表添加 tenant_id 字段
5. WHEN Database_Migrator 执行数据迁移 THEN Migration_System SHALL 保证业务数据完整性
### Requirement 9: 前端共享组件迁移
**User Story:** As a 前端开发者, I want to 迁移共享组件到 pigx-ai-ui, so that 业务页面可以复用这些组件。
#### Acceptance Criteria
1. WHEN Frontend_Migrator 处理 shared 组件 THEN pigx-ai-ui SHALL 在 src/components/urban 目录下包含迁移的组件
2. WHEN Frontend_Migrator 处理组件依赖 THEN pigx-ai-ui SHALL 更新导入路径使用 pigx 的工具函数
3. WHEN Frontend_Migrator 处理样式 THEN pigx-ai-ui SHALL 合并样式并避免与 pigx 样式冲突
4. WHEN Frontend_Migrator 处理 API 调用 THEN 组件 SHALL 使用 pigx 的 request 工具
### Requirement 10: 路由和菜单配置
**User Story:** As a 管理员, I want to 在 pigx 菜单中看到迁移的功能, so that 可以访问业务功能。
#### Acceptance Criteria
1. WHEN Frontend_Migrator 处理路由 THEN pigx-ai-ui SHALL 在路由配置中添加业务模块路由
2. WHEN Admin 配置菜单 THEN pigx-upms SHALL 在 sys_menu 表中添加业务功能菜单
3. WHEN Admin 配置权限 THEN pigx-upms SHALL 为业务功能配置相应的权限标识
### Requirement 11: 权限模型适配
**User Story:** As a 系统管理员, I want to 在 pigx 权限体系中配置业务功能权限, so that 可以控制用户对迁移功能的访问。
#### Acceptance Criteria
1. WHEN Business_Module 迁移权限注解 THEN pigx-app-server SHALL 将 @PreAuthorize 替换为 @HasPermission
2. WHEN Business_Module 处理权限标识 THEN pigx-app-server SHALL 使用 pigx 权限命名规范 (module_action 格式)
3. WHEN Admin 配置业务权限 THEN pigx-upms SHALL 在 sys_menu 表中添加对应的权限菜单项
4. WHEN Business_Module 获取用户信息 THEN pigx-app-server SHALL 使用 SecurityUtils.getUser() 获取 PigxUser
5. WHEN Business_Module 调用用户服务 THEN pigx-app-server SHALL 通过 RemoteUserService 进行 Feign 调用
6. IF Business_Module 需要数据权限控制 THEN pigx-app-server SHALL 利用 pigx 的租户和部门隔离机制

View File

@@ -1,663 +0,0 @@
# SecurityUtils 和 RemoteUserService 配置指南
## 1. 概述
本指南详细说明如何在 pigx 平台中配置和使用 SecurityUtils 和 RemoteUserService实现用户信息获取和远程用户服务调用。
## 2. SecurityUtils 使用指南
### 2.1 SecurityUtils 介绍
SecurityUtils 是 pigx 平台提供的安全工具类,用于获取当前登录用户信息、权限判断等安全相关操作。
### 2.2 Maven 依赖
```xml
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-common-security</artifactId>
</dependency>
```
### 2.3 基本使用
#### 2.3.1 获取当前用户信息
```java
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import com.pig4cloud.pigx.admin.api.entity.SysUser;
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
public void example() {
// 获取完整用户对象
PigxUser pigxUser = SecurityUtils.getUser();
// 获取用户ID
Long userId = pigxUser.getId();
// 获取用户名
String username = pigxUser.getUsername();
// 获取租户ID重要多租户隔离
Long tenantId = pigxUser.getTenantId();
// 获取部门ID
Long deptId = pigxUser.getDeptId();
// 获取用户角色列表
List<Long> roles = pigxUser.getRoles();
// 获取用户权限列表
Collection<String> authorities = pigxUser.getAuthorities();
}
}
```
#### 2.3.2 在实体中自动填充用户信息
```java
import com.baomidou.mybatisplus.annotation.FieldFill;
import com.baomidou.mybatisplus.annotation.TableField;
@Data
@TableName("tb_workcase")
public class TbWorkcase {
private String workcaseId;
// 自动填充创建人
@TableField(fill = FieldFill.INSERT)
private String createBy;
// 自动填充更新人
@TableField(fill = FieldFill.UPDATE)
private String updateBy;
// 自动填充租户ID
@TableField(fill = FieldFill.INSERT)
private Long tenantId;
}
```
配置自动填充处理器:
```java
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import org.apache.ibatis.reflection.MetaObject;
import org.springframework.stereotype.Component;
import java.time.LocalDateTime;
@Component
public class MybatisPlusMetaObjectHandler implements MetaObjectHandler {
@Override
public void insertFill(MetaObject metaObject) {
PigxUser user = SecurityUtils.getUser();
this.strictInsertFill(metaObject, "createTime", LocalDateTime.class, LocalDateTime.now());
this.strictInsertFill(metaObject, "createBy", String.class, user.getUsername());
this.strictInsertFill(metaObject, "tenantId", Long.class, user.getTenantId());
this.strictInsertFill(metaObject, "delFlag", String.class, "0");
}
@Override
public void updateFill(MetaObject metaObject) {
PigxUser user = SecurityUtils.getUser();
this.strictUpdateFill(metaObject, "updateTime", LocalDateTime.class, LocalDateTime.now());
this.strictUpdateFill(metaObject, "updateBy", String.class, user.getUsername());
}
}
```
### 2.4 权限判断
```java
@RestController
@RequestMapping("/workcase")
public class WorkcaseController {
// 方法级权限判断
@PreAuthorize("@pms.hasPermission('workcase_ticket_add')")
@PostMapping
public R<TbWorkcase> create(@RequestBody TbWorkcase workcase) {
return R.ok(workcaseService.save(workcase));
}
// 代码中权限判断
@GetMapping("/admin-only")
public R<?> adminFunction() {
PigxUser user = SecurityUtils.getUser();
// 检查是否有特定权限
if (!user.getAuthorities().contains("workcase_ticket_admin")) {
return R.failed("没有管理员权限");
}
// 执行管理员功能
return R.ok();
}
}
```
### 2.5 异步任务中使用
```java
@Service
public class AsyncService {
// 错误示例:异步线程中可能获取不到用户信息
@Async
public void wrongAsyncMethod() {
PigxUser user = SecurityUtils.getUser(); // 可能为null
}
// 正确示例:传递用户信息
@Async
public void correctAsyncMethod(PigxUser user) {
// 使用传入的用户信息
Long userId = user.getId();
Long tenantId = user.getTenantId();
// 执行异步逻辑
}
// 调用异步方法
public void callAsync() {
PigxUser user = SecurityUtils.getUser();
correctAsyncMethod(user);
}
}
```
## 3. RemoteUserService 配置和使用
### 3.1 RemoteUserService 介绍
RemoteUserService 是通过 Feign 调用 pigx-upms 服务获取用户信息的远程服务接口。
### 3.2 Maven 依赖
```xml
<dependency>
<groupId>com.pig4cloud</groupId>
<artifactId>pigx-upms-api</artifactId>
</dependency>
```
### 3.3 启用 Feign 客户端
在启动类或配置类上添加注解:
```java
import com.pig4cloud.pigx.common.feign.annotation.EnablePigxFeignClients;
@EnablePigxFeignClients
@SpringBootApplication
public class WorkcaseApplication {
public static void main(String[] args) {
SpringApplication.run(WorkcaseApplication.class, args);
}
}
```
### 3.4 基本使用
```java
import com.pig4cloud.pigx.admin.api.feign.RemoteUserService;
import com.pig4cloud.pigx.admin.api.entity.SysUser;
import com.pig4cloud.pigx.common.core.util.R;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class WorkcaseServiceImpl implements WorkcaseService {
@Autowired
private RemoteUserService remoteUserService;
/**
* 根据用户ID获取用户信息
*/
public SysUser getUserById(Long userId) {
R<SysUser> result = remoteUserService.selectById(userId, SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
return result.getData();
}
throw new BusinessException("用户不存在");
}
/**
* 根据用户名获取用户信息
*/
public SysUser getUserByUsername(String username) {
R<UserInfo> result = remoteUserService.info(username, SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
return result.getData().getSysUser();
}
throw new BusinessException("用户不存在");
}
/**
* 批量获取用户信息
*/
public List<SysUser> getUsersByIds(List<Long> userIds) {
List<SysUser> users = new ArrayList<>();
for (Long userId : userIds) {
R<SysUser> result = remoteUserService.selectById(userId, SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
users.add(result.getData());
}
}
return users;
}
}
```
### 3.5 错误处理
```java
@Service
public class WorkcaseServiceImpl {
@Autowired
private RemoteUserService remoteUserService;
public void assignWorkcase(String workcaseId, Long assigneeId) {
try {
// 调用远程服务
R<SysUser> result = remoteUserService.selectById(assigneeId, SecurityConstants.FROM_IN);
// 检查调用是否成功
if (!result.isSuccess()) {
log.error("获取用户信息失败: {}", result.getMsg());
throw new BusinessException("获取用户信息失败");
}
// 检查数据是否存在
SysUser assignee = result.getData();
if (assignee == null) {
throw new BusinessException("用户不存在");
}
// 执行分配逻辑
doAssign(workcaseId, assignee);
} catch (FeignException e) {
// 处理Feign调用异常
log.error("远程服务调用失败", e);
throw new BusinessException("系统繁忙,请稍后重试");
}
}
}
```
### 3.6 配置熔断降级
```java
import com.pig4cloud.pigx.admin.api.feign.RemoteUserService;
import com.pig4cloud.pigx.admin.api.feign.factory.RemoteUserServiceFallbackFactory;
import org.springframework.stereotype.Component;
@Component
public class RemoteUserServiceFallbackImpl implements RemoteUserServiceFallbackFactory {
@Override
public RemoteUserService create(Throwable throwable) {
return new RemoteUserService() {
@Override
public R<SysUser> selectById(Long id, String from) {
log.error("调用用户服务失败", throwable);
return R.failed("用户服务暂时不可用");
}
@Override
public R<UserInfo> info(String username, String from) {
log.error("调用用户服务失败", throwable);
return R.failed("用户服务暂时不可用");
}
};
}
}
```
配置文件中启用熔断:
```yaml
feign:
sentinel:
enabled: true
client:
config:
default:
connectTimeout: 5000
readTimeout: 5000
```
## 4. 部门服务调用
```java
import com.pig4cloud.pigx.admin.api.feign.RemoteDeptService;
import com.pig4cloud.pigx.admin.api.entity.SysDept;
@Service
public class DeptRelatedService {
@Autowired
private RemoteDeptService remoteDeptService;
/**
* 获取部门信息
*/
public SysDept getDeptById(Long deptId) {
R<SysDept> result = remoteDeptService.selectById(deptId, SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
return result.getData();
}
return null;
}
/**
* 获取部门树
*/
public List<SysDept> getDeptTree() {
R<List<SysDept>> result = remoteDeptService.tree(SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
return result.getData();
}
return new ArrayList<>();
}
}
```
## 5. 最佳实践
### 5.1 缓存用户信息
```java
import org.springframework.cache.annotation.Cacheable;
@Service
public class UserCacheService {
@Autowired
private RemoteUserService remoteUserService;
@Cacheable(value = "user", key = "#userId")
public SysUser getUserById(Long userId) {
R<SysUser> result = remoteUserService.selectById(userId, SecurityConstants.FROM_IN);
if (result.isSuccess() && result.getData() != null) {
return result.getData();
}
return null;
}
}
```
### 5.2 批量查询优化
```java
@Service
public class BatchUserService {
@Autowired
private RemoteUserService remoteUserService;
/**
* 批量获取用户信息(优化版)
*/
public Map<Long, SysUser> getUserMap(List<Long> userIds) {
if (CollectionUtils.isEmpty(userIds)) {
return new HashMap<>();
}
// 使用并行流提高效率
return userIds.parallelStream()
.map(userId -> remoteUserService.selectById(userId, SecurityConstants.FROM_IN))
.filter(result -> result.isSuccess() && result.getData() != null)
.map(R::getData)
.collect(Collectors.toMap(SysUser::getUserId, Function.identity()));
}
}
```
### 5.3 租户隔离实现
```java
@Service
public class TenantIsolationService {
/**
* 查询时自动添加租户条件
*/
public List<TbWorkcase> listByTenant() {
PigxUser user = SecurityUtils.getUser();
QueryWrapper<TbWorkcase> wrapper = new QueryWrapper<>();
wrapper.eq("tenant_id", user.getTenantId());
return workcaseMapper.selectList(wrapper);
}
/**
* 保存时自动设置租户ID
*/
public void saveWithTenant(TbWorkcase workcase) {
PigxUser user = SecurityUtils.getUser();
workcase.setTenantId(user.getTenantId());
workcaseMapper.insert(workcase);
}
}
```
## 6. 常见问题
### Q1: SecurityUtils.getUser() 返回 null
**原因**
1. 未登录或 token 过期
2. 在异步线程中调用
3. 在定时任务中调用
**解决方案**
1. 检查 token 有效性
2. 在异步方法调用前获取用户信息并传递
3. 定时任务使用系统用户或指定用户
### Q2: RemoteUserService 调用超时
**原因**
1. 网络问题
2. pigx-upms 服务未启动
3. 配置的超时时间太短
**解决方案**
```yaml
feign:
client:
config:
default:
connectTimeout: 10000 # 连接超时10秒
readTimeout: 10000 # 读取超时10秒
```
### Q3: 多租户数据混乱
**原因**
1. 未正确设置 tenant_id
2. 查询时未添加租户条件
**解决方案**
1. 使用 MyBatis-Plus 的自动填充
2. 配置全局租户拦截器
```java
@Configuration
public class MybatisPlusConfig {
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
// 添加租户拦截器
TenantLineInnerInterceptor tenantInterceptor = new TenantLineInnerInterceptor();
tenantInterceptor.setTenantLineHandler(new TenantLineHandler() {
@Override
public Expression getTenantId() {
PigxUser user = SecurityUtils.getUser();
return new LongValue(user.getTenantId());
}
@Override
public String getTenantIdColumn() {
return "tenant_id";
}
@Override
public boolean ignoreTable(String tableName) {
// 忽略不需要租户隔离的表
return "sys_user".equals(tableName);
}
});
interceptor.addInnerInterceptor(tenantInterceptor);
return interceptor;
}
}
```
### Q4: 如何在没有用户上下文的情况下调用服务
```java
@Service
public class SystemService {
/**
* 使用内部调用标识
*/
public void systemCall() {
// 使用 FROM_IN 标识内部调用
R<SysUser> result = remoteUserService.selectById(1L, SecurityConstants.FROM_IN);
}
/**
* 模拟系统用户
*/
public void executeAsSystem() {
// 创建系统用户上下文
PigxUser systemUser = new PigxUser();
systemUser.setId(0L);
systemUser.setUsername("system");
systemUser.setTenantId(1L);
// 执行逻辑
doSystemWork(systemUser);
}
}
```
## 7. 迁移检查清单
- [ ] 所有 JwtUtils 替换为 SecurityUtils
- [ ] 所有 UserService 替换为 RemoteUserService
- [ ] 所有实体添加 tenant_id 字段
- [ ] 配置 MyBatis-Plus 自动填充
- [ ] 配置 Feign 客户端
- [ ] 添加错误处理和熔断降级
- [ ] 测试用户信息获取
- [ ] 测试远程服务调用
- [ ] 测试租户数据隔离
## 8. 参考代码示例
完整的 Service 实现示例:
```java
package com.pig4cloud.pigx.app.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.pig4cloud.pigx.admin.api.entity.SysUser;
import com.pig4cloud.pigx.admin.api.feign.RemoteUserService;
import com.pig4cloud.pigx.app.entity.TbWorkcase;
import com.pig4cloud.pigx.app.mapper.WorkcaseMapper;
import com.pig4cloud.pigx.app.service.WorkcaseService;
import com.pig4cloud.pigx.common.core.constant.SecurityConstants;
import com.pig4cloud.pigx.common.core.util.R;
import com.pig4cloud.pigx.common.security.service.PigxUser;
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@Slf4j
@Service
@RequiredArgsConstructor
public class WorkcaseServiceImpl extends ServiceImpl<WorkcaseMapper, TbWorkcase>
implements WorkcaseService {
private final RemoteUserService remoteUserService;
@Override
@Transactional(rollbackFor = Exception.class)
public Boolean createWorkcase(TbWorkcase workcase) {
// 获取当前用户
PigxUser currentUser = SecurityUtils.getUser();
// 设置创建人信息
workcase.setCreateBy(currentUser.getUsername());
workcase.setTenantId(currentUser.getTenantId());
workcase.setDeptId(currentUser.getDeptId());
// 保存工单
return this.save(workcase);
}
@Override
public Boolean assignWorkcase(String workcaseId, Long assigneeId) {
// 获取被分配人信息
R<SysUser> result = remoteUserService.selectById(assigneeId, SecurityConstants.FROM_IN);
if (!result.isSuccess() || result.getData() == null) {
throw new RuntimeException("用户不存在");
}
SysUser assignee = result.getData();
// 更新工单
TbWorkcase workcase = this.getById(workcaseId);
workcase.setAssigneeId(assigneeId);
workcase.setAssigneeName(assignee.getUsername());
return this.updateById(workcase);
}
@Override
public List<TbWorkcase> listMyWorkcase() {
PigxUser user = SecurityUtils.getUser();
QueryWrapper<TbWorkcase> wrapper = new QueryWrapper<>();
wrapper.eq("tenant_id", user.getTenantId())
.eq("create_by", user.getUsername())
.orderByDesc("create_time");
return this.list(wrapper);
}
}
```

View File

@@ -1,387 +0,0 @@
# Implementation Plan: urbanLifeline 业务功能迁移到 pigx-ai
## Overview
将 urbanLifelineServ 和 urbanLifelineWeb 的业务功能迁移到 pigx-ai 平台,完全使用 pigx 原生的用户权限体系,只迁移业务代码和数据。
## Tasks
- [x] 1. 数据库迁移准备
- 分析源项目 PostgreSQL 表结构
- 生成 MySQL DDL 转换脚本
- 为所有业务表添加 tenant_id 字段
- _Requirements: 8.1, 8.2, 8.4_
- [x] 1.1 编写数据库转换脚本
- 已完成 database-migration-script.md 文档
- 包含工单、AI、招标、消息模块的完整 MySQL DDL
- **Validates: Requirements 8.1, 8.2, 8.3, 8.4**
- [x] 2. 权限和菜单规划
- 设计业务功能的权限标识规范
- 规划前端路由路径
- 定义菜单层级结构
- _Requirements: 10.2, 11.3_
- [x] 2.1 创建权限标识映射表
- 已完成 permission-mapping.md 文档
- 包含完整的权限映射和菜单配置 SQL
- _Requirements: 11.1, 11.2_
- [x] 2.2 编写权限注解转换指南
- 已完成 permission-annotation-guide.md 文档
- 包含 @PreAuthorize@pms.hasPermission 转换规则
- _Requirements: 11.1, 11.2_
- [x] 2.3 配置 SecurityUtils 和 RemoteUserService
- 已完成 security-config-guide.md 文档
- 包含完整的使用示例和最佳实践
- _Requirements: 11.4, 11.5_
- [x] 2.4 配置业务功能菜单
- 在 pigx sys_menu 表中执行菜单配置 SQLID从10000开始
- 配置对应的权限标识和路由路径
- _Requirements: 10.2, 11.3_
- [x] 2.5 分配角色权限
- 已在 permission-mapping.md 中提供完整的角色权限分配 SQL
- 管理员角色分配所有业务权限menu_id 10000-15000
- 普通用户角色分配查看权限
- 执行时需在 MySQL 中运行 sys_role_menu 插入语句
- _Requirements: 10.3, 11.3_
- [x] 3. 后端基础架构搭建
- 创建 pigx-workcase、pigx-bidding、pigx-dify 三个独立模块
- 每个模块包含 api 和 biz 子模块
- 配置 Maven 依赖和模块引用
- _Requirements: 1.1, 2.1, 3.1, 5.1_
- [x] 4. 工单模块迁移 (pigx-workcase)
- [x] 4.1 迁移工单实体和 Mapper
- 已创建 TbWorkcase、TbChatRoom、TbChatRoomMessage、TbChatRoomMember、TbWorkcaseProcess、TbVideoMeeting 实体
- 使用 pigx 标准格式:@TenantTable、Model<T>、createBy/updateBy/delFlag
- 已创建对应的 Mapper 接口
- _Requirements: 2.1, 2.4_
- [x] 4.2 迁移工单 Service 层
- 已创建 TbWorkcaseService、TbChatRoomService、TbChatRoomMessageService、TbChatRoomMemberService、TbWorkcaseProcessService、TbVideoMeetingService、TbCustomerServiceService
- TbChatRoomService 包含完整业务逻辑:创建聊天室、关闭聊天室、成员管理、消息管理、客服分配、服务评分
- TbVideoMeetingService 包含完整业务逻辑:创建会议、加入会议、开始/结束会议、权限验证
- TbCustomerServiceService 包含客服管理:获取可用客服、更新状态、工作量管理
- 已创建对应的 ServiceImpl 实现类
- _Requirements: 2.1, 2.2_
- [x] 4.3 迁移工单 Controller 层
- 已创建 TbWorkcaseController、TbChatRoomController、TbVideoMeetingController、TbCustomerServiceController
- TbChatRoomController 包含完整的聊天室功能:成员管理、消息管理、客服分配、服务评分
- TbVideoMeetingController 包含完整的会议功能:创建、加入、开始、结束会议
- TbCustomerServiceController 包含客服人员管理功能
- 使用 @HasPermission 权限注解
- 响应格式使用 R<T>
- _Requirements: 2.1, 11.1, 11.4_
- [x] 4.4 迁移视频会议功能
- 已创建 TbVideoMeetingService 和 TbVideoMeetingServiceImpl
- 包含创建会议、加入会议、开始会议、结束会议、获取活跃会议等功能
- 会议访问权限验证(基于聊天室成员)
-Jitsi JWT Token 生成需要后续配置 Jitsi 服务器参数
- _Requirements: 2.6_
- [x] 4.5 迁移聊天室 WebSocket 功能
- 已创建 ChatRoomWebSocketMessage、ChatRoomNotificationMessage 消息类
- 已创建 ChatRoomMessageHandler、ChatRoomNotificationHandler 消息处理器
- 已创建 ChatRoomWebSocketService 用于主动推送消息
- 使用 pigx-common-websocket 进行实时推送
- 配置 WebSocket 路径为 /ws/chat
- _Requirements: 2.1, 5.2_
- [x] 4.6 迁移 Jitsi JWT Token 生成功能
- 已创建 JitsiProperties 配置类
- 已创建 JitsiTokenService 接口和 JitsiTokenServiceImpl 实现
- 已创建 JitsiTokenController 提供 Token 生成、验证、URL 构建 API
- 支持一键生成会议信息(房间名+Token+URL
- 使用 jjwt 0.12.x 版本 API
- _Requirements: 2.6_
- [x] 4.7 迁移词云管理功能
- 已创建 TbWordCloud 实体类
- 已创建 TbWordCloudMapper 接口和 XML 映射文件
- 已创建 TbWordCloudService 接口和 TbWordCloudServiceImpl 实现
- 已创建 TbWordCloudController 提供词云 CRUD 和词频增加 API
- 支持词频自动累加(同一天、同一分类的相同词条)
- _Requirements: 2.1_
- [x] 5. AI 模块迁移pigx-dify 模块)
- [x] 5.1 创建 pigx-dify 模块结构
- 已创建 pigx-dify-api 和 pigx-dify-biz 子模块
- 已配置 Maven 依赖和模块引用
- 已创建启动类和配置文件
- _Requirements: 4.1_
- [x] 5.2 迁移 AI 实体和数据层
- 已创建 TbAgent、TbChat、TbChatMessage、TbKnowledge、TbKnowledgeFile、TbKnowledgeFileLog、PromptCard 实体
- 使用 pigx 标准格式
- 已创建对应的 Mapper 接口
- _Requirements: 4.4, 4.6_
- [x] 5.3 迁移 Dify API 客户端
- 已创建 DifyApiClient 完整功能(知识库管理、文档管理、对话、工作流、模型管理)
- 已创建 DifyProperties 配置类
- 已创建 DifyException 异常类
- 已创建 StreamCallback 回调接口
- 已创建所有 DTO 类ChatRequest/Response、Dataset*、Document*、Retrieval*、Workflow*、Conversation*、MessageHistory*、EmbeddingModel*、RerankModel*、DifyFileInfo
- 支持流式响应和阻塞调用两种模式
- 已配置 OkHttp 依赖
- 已更新 application.yml 添加 Dify 配置
- _Requirements: 4.2, 4.3_
- [x] 5.4 迁移 AI 业务逻辑
- 已创建 TbAgentService、TbChatService、TbKnowledgeService 等服务接口和实现类
- _Requirements: 4.1, 4.2_
- [x] 5.5 迁移 AI Controller 层
- 已创建 TbAgentController、TbChatController、TbKnowledgeController
- 使用 @HasPermission 权限注解
- _Requirements: 4.1, 11.1_
- [x] 5.6 配置 Dify 集成
- 已配置 DifyProperties 包含完整配置API地址、密钥、超时、上传、知识库
- 已在 application.yml 中添加 Dify 配置项
- 配置支持环境变量覆盖
- _Requirements: 4.3, 4.7_
- [x] 6. 招标模块迁移 (pigx-bidding)
- [x] 6.1 迁移招标实体和数据层
- 已创建 TbBiddingProject、TbBiddingDocument、TbBiddingRequirement、TbBidResponse、TbProcessNode 实体
- 已创建对应的 Mapper 接口
- _Requirements: 1.1, 1.3_
- [x] 6.2 迁移招标业务逻辑
- 已创建 TbBiddingProjectService、TbBiddingDocumentService 等服务接口和实现类
- _Requirements: 1.1, 1.2_
- [x] 6.3 迁移招标 API 接口
- 已创建 TbBiddingProjectController、TbBiddingDocumentController
- 使用 @HasPermission 权限注解
- _Requirements: 1.1, 1.5_
- [x] 7. 平台管理模块迁移 (platform)
- [x] 7.1 迁移平台管理功能
- 源项目 platform 模块只有启动类,无实际业务代码
- 平台配置功能可使用 pigx 的 sys_config 表和配置管理功能
- _Requirements: 3.1, 3.2_
- [x] 7.2 迁移平台数据表
- 无需迁移,使用 pigx 现有的配置管理表
- _Requirements: 3.3_
- [x] 8. 消息模块迁移 (message)
- [x] 8.1 迁移消息实体和数据层
- 源项目消息模块主要是邮件/短信发送功能,大部分方法为 TODO 状态
- pigx 已有完善的消息通知功能pigx-common-sms、pigx-common-mail
- 直接使用 pigx 现有的消息功能即可
- _Requirements: 5.1, 5.4_
- [x] 8.2 迁移消息通知功能
- 使用 pigx-common-websocket 进行实时推送(已在 pigx-workcase 中实现)
- 使用 pigx-common-sms 进行短信发送
- 使用 pigx-common-mail 进行邮件发送
- _Requirements: 5.1, 5.2_
- [x] 8.3 保留微信通知功能
- pigx 已有微信公众号/小程序消息推送功能
- 可通过 pigx-mp 模块实现微信消息推送
- _Requirements: 5.3_
- [x] 9. 文件服务适配
- [x] 9.1 替换文件上传逻辑
- 已在 pigx-workcase-biz、pigx-dify-biz、pigx-bidding-biz 中添加 pigx-common-oss 依赖
- 使用 pigx 的 OssTemplate 进行文件上传/下载
- 文件访问 URL 通过 pigx 网关统一管理
- _Requirements: 6.1, 6.2_
- [x] 10. 定时任务迁移
- [x] 10.1 迁移定时任务到 XXL-Job
- 源项目无定时任务需要迁移
- 如需添加定时任务,可使用 pigx-visual/xxl-job 进行配置
- _Requirements: 7.1, 7.2, 7.3_
- [ ] 11. 前端页面迁移
- [x] 11.1 迁移工单前端页面
- ✅ 已创建 API 层 (workcase.ts, chat.ts)
- ✅ 已创建类型定义 (workcase.ts, chatRoom.ts, customer.ts, conversation.ts, wordCloud.ts)
- ✅ 已创建工单列表页面 (views/workcase/index.vue)
- ✅ 已创建工单指派组件 (components/workcase/WorkcaseAssign.vue)
- ✅ 已创建工单详情组件 (views/workcase/detail/WorkcaseDetail.vue)
- ✅ 已创建聊天室消息组件 (views/workcase/chatRoom/ChatMessage.vue)
- 适配 pigx 的 request 工具和响应格式
- _Requirements: 2.5_
- [ ] 11.2 迁移 AI 前端页面
- 将 AI 相关页面迁移到 pigx-ai-ui/src/views/dify 目录
- 包括智能体管理、对话界面、知识库管理
- 适配 pigx 的 request 工具和响应格式
- _Requirements: 4.5_
- [ ] 11.3 迁移招标前端页面
- 将 bidding 页面迁移到 pigx-ai-ui/src/views/urban/bidding
- 适配 pigx 的 request 工具
- _Requirements: 1.4_
- [ ] 11.4 迁移平台管理前端页面
- 将 platform 页面迁移到 pigx-ai-ui/src/views/urban/platform
- _Requirements: 3.4_
- [ ] 11.5 迁移共享组件
- 将 shared 包组件迁移到 pigx-ai-ui/src/components/urban
- 更新导入路径使用 pigx 工具函数
- _Requirements: 9.1, 9.2_
- [ ] 11.6 适配 API 调用
- 创建 pigx-ai-ui/src/api/urban 目录
- 创建 workcase.ts、bidding.ts、platform.ts API 定义
- 创建 pigx-ai-ui/src/api/dify 目录
- 创建 agent.ts、chat.ts、knowledge.ts API 定义
- 使用 pigx 的 request 工具和 R<T> 响应格式
- 更新 API 路径为 pigx 网关规则
- _Requirements: 1.5, 9.4_
- [ ] 12. 数据迁移执行
- [ ] 12.1 执行数据库 DDL 脚本
- 在 MySQL 中执行 database-migration-script.md 中的建表语句
- 验证表结构正确性
- _Requirements: 8.1, 8.2_
- [ ] 12.2 执行业务数据迁移
- 运行数据迁移脚本
- 验证数据完整性
- _Requirements: 8.5_
- [ ] 13. 集成测试和验证
- [ ] 13.1 后端编译验证
- 确保所有迁移代码可以通过编译
- 验证 Maven 依赖正确
- _Requirements: 所有后端需求_
- [ ] 13.2 端到端功能测试
- 测试所有迁移功能的完整流程
- 验证权限控制正确性
- _Requirements: 所有需求_
- [ ] 13.3 租户隔离验证
- 验证多租户数据隔离正确性
- _Requirements: 8.4, 11.6_
- [ ] 14. 最终验收
- 确保所有功能正常运行
- 确认权限控制有效
- 验证多租户数据隔离
## Notes
- 任务 1、2.1、2.2、2.3 的文档已完成,可直接使用
- 重点关注权限适配和用户服务调用的正确性
- 已创建三个独立模块pigx-workcase、pigx-bidding、pigx-dify
- 每个模块包含 api 和 biz 子模块,遵循 pigx 架构规范
- 实体类使用 pigx 标准格式:@TenantTable、Model<T>、createBy/updateBy/delFlag
- 前端迁移需要适配 pigx-ai-ui 的技术栈Vue3 + TypeScript + Element Plus
- 数据库迁移脚本已在 database-migration-script.md 中准备好
## 已创建的文件
### pigx-ai-ui 前端已迁移文件
#### API 层
- `src/api/workcase/workcase.ts` - 工单管理 API
- `src/api/workcase/chat.ts` - 聊天室、客服、视频会议 API
#### 类型定义
- `src/types/workcase/workcase.ts` - 工单相关类型
- `src/types/workcase/chatRoom.ts` - 聊天室相关类型
- `src/types/workcase/customer.ts` - 客服相关类型
- `src/types/workcase/conversation.ts` - 对话相关类型
- `src/types/workcase/wordCloud.ts` - 词云相关类型
#### 组件
- `src/components/workcase/WorkcaseAssign.vue` - 工单指派组件
#### 页面
- `src/views/workcase/index.vue` - 工单列表页面
- `src/views/workcase/detail/WorkcaseDetail.vue` - 工单详情组件
- `src/views/workcase/detail/WorkcaseDetail.scss` - 工单详情样式
- `src/views/workcase/chatRoom/ChatMessage.vue` - 聊天室消息组件
#### 导出文件
- `src/views/workcase/chatRoom/index.ts`
- `src/views/workcase/detail/index.ts`
- Entity: TbWorkcase, TbChatRoom, TbChatRoomMessage, TbChatRoomMember, TbWorkcaseProcess, TbVideoMeeting, TbCustomerService, TbWordCloud
- Mapper: TbWorkcaseMapper, TbChatRoomMapper, TbChatRoomMessageMapper, TbChatRoomMemberMapper, TbWorkcaseProcessMapper, TbVideoMeetingMapper, TbCustomerServiceMapper, TbWordCloudMapper
- Service: TbWorkcaseService, TbChatRoomService含完整业务逻辑, TbChatRoomMessageService, TbChatRoomMemberService, TbWorkcaseProcessService, TbVideoMeetingService含完整业务逻辑, TbCustomerServiceService, TbWordCloudService, JitsiTokenService
- Controller: TbWorkcaseController, TbChatRoomController含成员/消息/客服分配API, TbVideoMeetingController含创建/加入/开始/结束会议API, TbCustomerServiceController, TbWordCloudController, JitsiTokenController
- WebSocket: ChatRoomWebSocketMessage, ChatRoomNotificationMessage, ChatRoomMessageHandler, ChatRoomNotificationHandler, ChatRoomWebSocketService
- Config: JitsiProperties, application.yml (port: 7070, WebSocket: /ws/chat)
- Application: PigxWorkcaseApplication
#### 聊天室功能已迁移的API
- 聊天室CRUD创建、查询、修改、关闭、删除
- 成员管理:添加成员、移除成员、获取成员列表、获取未读数、更新已读状态
- 消息管理:发送消息、分页查询消息、删除消息
- 客服分配:自动分配客服到聊天室
- 服务评分:提交聊天室服务评分
#### 视频会议功能已迁移的API
- 会议CRUD创建、查询、删除
- 会议操作:获取会议信息、加入会议、开始会议、结束会议
- 聊天室关联:获取聊天室当前活跃会议
#### 客服人员管理已迁移的API
- 客服CRUD新增、查询、修改、删除
- 状态管理:更新客服在线状态
- 可用客服:获取可接待客服列表
#### WebSocket 实时推送功能:
- 消息类型chat_message聊天消息、chat_notification通知消息
- 通知类型member_join成员加入、member_leave成员离开、typing正在输入、room_closed聊天室关闭
- ChatRoomWebSocketService主动推送消息、广播到聊天室、发送给指定用户
#### Jitsi JWT Token 功能:
- 生成 JWT Token支持主持人/普通成员角色
- 验证 Token检查 Token 有效性和过期时间
- 构建 iframe URL包含默认配置和自定义配置
- 生成房间名基于工单ID生成唯一房间名
- 一键生成会议信息:房间名+Token+URL
#### 词云管理功能:
- 词云CRUD新增、查询、修改、删除
- 词频累加:同一天、同一分类的相同词条自动累加词频
- 分页查询:支持按词语、分类、日期筛选
### pigx-dify 模块
- Entity: TbAgent, TbChat, TbChatMessage, TbKnowledge, TbKnowledgeFile, TbKnowledgeFileLog, PromptCard
- Mapper: TbAgentMapper, TbChatMapper, TbChatMessageMapper, TbKnowledgeMapper, TbKnowledgeFileMapper, TbKnowledgeFileLogMapper
- Service: TbAgentService, TbChatService, TbChatMessageService, TbKnowledgeService, TbKnowledgeFileService, TbKnowledgeFileLogService
- Controller: TbAgentController, TbChatController, TbKnowledgeController
- Client: DifyApiClient完整的 Dify API 客户端)
- Client DTO: ChatRequest, ChatResponse, DatasetCreateRequest, DatasetCreateResponse, DatasetDetailResponse, DatasetListResponse, DatasetUpdateRequest, DocumentListResponse, DocumentStatusResponse, DocumentUploadRequest, DocumentUploadResponse, RetrievalModel, RetrievalRequest, RetrievalResponse, WorkflowRunRequest, WorkflowRunResponse, ConversationListResponse, ConversationVariablesResponse, MessageHistoryResponse, EmbeddingModelResponse, RerankModelResponse, DifyFileInfo
- Callback: StreamCallback流式响应回调接口
- Exception: DifyException
- Config: DifyProperties, application.yml (port: 7080, Dify API 配置)
- Application: PigxDifyApplication
#### Dify API 客户端功能:
- 知识库管理:创建、查询、更新、删除知识库
- 文档管理:上传文档、查询文档列表、查询文档状态、删除文档
- 知识库检索:从知识库检索相关内容
- 对话功能:流式对话、阻塞式对话、停止对话、消息反馈
- 工作流:执行工作流(阻塞模式)
- 对话历史:获取消息历史、对话列表、对话变量
- 模型管理获取嵌入模型列表、Rerank模型列表
- 通用HTTPGET、POST、PATCH、DELETE 方法
### pigx-bidding 模块
- Entity: TbBiddingProject, TbBiddingDocument, TbBiddingRequirement, TbBidResponse, TbProcessNode
- Mapper: TbBiddingProjectMapper, TbBiddingDocumentMapper, TbBiddingRequirementMapper, TbBidResponseMapper, TbProcessNodeMapper
- Service: TbBiddingProjectService, TbBiddingDocumentService, TbBiddingRequirementService, TbBidResponseService, TbProcessNodeService
- Controller: TbBiddingProjectController, TbBiddingDocumentController
- Application: PigxBiddingApplication
- Config: application.yml (port: 7090)

View File

@@ -1,505 +0,0 @@
# 租户字段添加指南
## 概述
pigx 是一个多租户 SaaS 平台所有业务数据需要通过租户IDtenant_id进行隔离。本指南详细说明如何为所有业务表添加租户字段并实现租户隔离。
## 1. 租户字段规范
### 1.1 字段定义
```sql
`tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID'
```
### 1.2 字段特性
- **类型**BIGINT
- **非空**NOT NULL
- **默认值**1默认租户
- **索引**:建议添加索引以提高查询性能
- **注释**'租户ID'
## 2. 为现有表添加租户字段
### 2.1 通用 SQL 模板
```sql
-- 添加租户字段
ALTER TABLE `表名`
ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID'
AFTER `某个字段`;
-- 添加索引
ALTER TABLE `表名`
ADD INDEX `idx_tenant_id` (`tenant_id`);
-- 如果需要复合索引(常用查询条件+租户)
ALTER TABLE `表名`
ADD INDEX `idx_status_tenant` (`status`, `tenant_id`);
```
### 2.2 批量添加脚本
```sql
-- 工单模块
ALTER TABLE `tb_workcase` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_workcase` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_workcase_process` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_workcase_process` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_workcase_device` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_workcase_device` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_chat_room` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_chat_room` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_chat_room_member` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_chat_room_member` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_chat_room_message` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_chat_room_message` ADD INDEX `idx_tenant_id` (`tenant_id`);
-- AI模块
ALTER TABLE `tb_agent` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_agent` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_chat` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_chat` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_chat_message` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_chat_message` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_knowledge` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_knowledge` ADD INDEX `idx_tenant_id` (`tenant_id`);
-- 招标模块
ALTER TABLE `tb_bidding_project` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_bidding_project` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_bidding_document` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_bidding_document` ADD INDEX `idx_tenant_id` (`tenant_id`);
-- 消息模块
ALTER TABLE `tb_message` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_message` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_message_range` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_message_range` ADD INDEX `idx_tenant_id` (`tenant_id`);
ALTER TABLE `tb_message_receiver` ADD COLUMN `tenant_id` bigint NOT NULL DEFAULT 1 COMMENT '租户ID';
ALTER TABLE `tb_message_receiver` ADD INDEX `idx_tenant_id` (`tenant_id`);
```
## 3. 实体类添加租户字段
### 3.1 实体类模板
```java
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.FieldFill;
@Data
@TableName("tb_workcase")
public class TbWorkcase extends Model<TbWorkcase> {
// ... 其他字段 ...
/**
* 租户ID
*/
@TableField(fill = FieldFill.INSERT)
private Long tenantId;
}
```
### 3.2 MyBatis-Plus 自动填充配置
```java
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import org.apache.ibatis.reflection.MetaObject;
import org.springframework.stereotype.Component;
@Component
public class MybatisPlusMetaObjectHandler implements MetaObjectHandler {
@Override
public void insertFill(MetaObject metaObject) {
// 获取当前用户的租户ID
PigxUser user = SecurityUtils.getUser();
if (user != null) {
// 自动填充租户ID
this.strictInsertFill(metaObject, "tenantId", Long.class, user.getTenantId());
} else {
// 默认租户ID
this.strictInsertFill(metaObject, "tenantId", Long.class, 1L);
}
// 填充其他字段
this.strictInsertFill(metaObject, "createTime", LocalDateTime.class, LocalDateTime.now());
this.strictInsertFill(metaObject, "createBy", String.class, user != null ? user.getUsername() : "system");
this.strictInsertFill(metaObject, "delFlag", String.class, "0");
}
@Override
public void updateFill(MetaObject metaObject) {
this.strictUpdateFill(metaObject, "updateTime", LocalDateTime.class, LocalDateTime.now());
PigxUser user = SecurityUtils.getUser();
if (user != null) {
this.strictUpdateFill(metaObject, "updateBy", String.class, user.getUsername());
}
}
}
```
## 4. 配置租户拦截器
### 4.1 MyBatis-Plus 租户拦截器配置
```java
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.TenantLineInnerInterceptor;
import com.pig4cloud.pigx.common.security.util.SecurityUtils;
import net.sf.jsqlparser.expression.Expression;
import net.sf.jsqlparser.expression.LongValue;
import net.sf.jsqlparser.schema.Column;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.Arrays;
import java.util.List;
@Configuration
public class MybatisPlusConfig {
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
// 添加租户拦截器
interceptor.addInnerInterceptor(tenantLineInnerInterceptor());
// 添加分页拦截器等其他拦截器
// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
return interceptor;
}
private TenantLineInnerInterceptor tenantLineInnerInterceptor() {
return new TenantLineInnerInterceptor(new TenantLineHandler() {
@Override
public Expression getTenantId() {
// 从当前用户获取租户ID
PigxUser user = SecurityUtils.getUser();
Long tenantId = user != null ? user.getTenantId() : 1L;
return new LongValue(tenantId);
}
@Override
public String getTenantIdColumn() {
// 租户ID字段名
return "tenant_id";
}
@Override
public boolean ignoreTable(String tableName) {
// 忽略不需要租户隔离的表
List<String> ignoreTables = Arrays.asList(
"sys_user", // 系统用户表
"sys_role", // 角色表
"sys_menu", // 菜单表
"sys_dict", // 字典表
"sys_log", // 日志表
"temp_user_mapping" // 临时映射表
);
return ignoreTables.contains(tableName);
}
});
}
}
```
## 5. Service 层实现租户隔离
### 5.1 查询时自动添加租户条件
```java
@Service
public class WorkcaseServiceImpl extends ServiceImpl<WorkcaseMapper, TbWorkcase>
implements WorkcaseService {
@Override
public List<TbWorkcase> listByStatus(String status) {
// 租户拦截器会自动添加 tenant_id 条件
return this.list(new QueryWrapper<TbWorkcase>()
.eq("status", status));
// 实际SQL: SELECT * FROM tb_workcase WHERE status = ? AND tenant_id = ?
}
@Override
public TbWorkcase getById(String id) {
// 自动添加租户条件,确保不会查询到其他租户的数据
return super.getById(id);
// 实际SQL: SELECT * FROM tb_workcase WHERE id = ? AND tenant_id = ?
}
}
```
### 5.2 保存时自动设置租户ID
```java
@Service
public class WorkcaseServiceImpl extends ServiceImpl<WorkcaseMapper, TbWorkcase>
implements WorkcaseService {
@Override
@Transactional(rollbackFor = Exception.class)
public Boolean createWorkcase(TbWorkcase workcase) {
// 通过 MetaObjectHandler 自动填充 tenant_id
// 不需要手动设置
return this.save(workcase);
}
// 如果需要手动设置(不推荐)
@Override
public Boolean createWorkcaseManual(TbWorkcase workcase) {
PigxUser user = SecurityUtils.getUser();
workcase.setTenantId(user.getTenantId());
workcase.setCreateBy(user.getUsername());
workcase.setCreateTime(LocalDateTime.now());
return this.save(workcase);
}
}
```
## 6. 特殊场景处理
### 6.1 跨租户查询(管理员功能)
```java
@Service
public class AdminService {
@Autowired
private WorkcaseMapper workcaseMapper;
/**
* 管理员查询所有租户的数据
* 需要特殊权限
*/
@PreAuthorize("@pms.hasPermission('admin_cross_tenant')")
public List<TbWorkcase> listAllTenants() {
// 使用 @InterceptorIgnore 注解忽略租户拦截
return workcaseMapper.selectAllWithoutTenant();
}
}
// Mapper 接口
@Mapper
public interface WorkcaseMapper extends BaseMapper<TbWorkcase> {
@InterceptorIgnore(tenantLine = "true")
@Select("SELECT * FROM tb_workcase")
List<TbWorkcase> selectAllWithoutTenant();
}
```
### 6.2 定时任务中的租户处理
```java
@Component
public class WorkcaseScheduledTask {
@Autowired
private WorkcaseService workcaseService;
/**
* 定时任务:为每个租户执行任务
*/
@Scheduled(cron = "0 0 2 * * ?")
public void processAllTenants() {
// 获取所有租户列表
List<Long> tenantIds = getTenantIds();
for (Long tenantId : tenantIds) {
// 设置当前租户上下文
TenantContextHolder.setTenantId(tenantId);
try {
// 执行业务逻辑
processForTenant(tenantId);
} finally {
// 清除租户上下文
TenantContextHolder.clear();
}
}
}
}
```
### 6.3 异步任务中的租户传递
```java
@Service
public class AsyncService {
/**
* 异步任务需要传递租户ID
*/
@Async
public void processAsync(Long tenantId, String workcaseId) {
// 在异步线程中设置租户ID
TenantContextHolder.setTenantId(tenantId);
try {
// 执行业务逻辑
doProcess(workcaseId);
} finally {
TenantContextHolder.clear();
}
}
// 调用异步方法前获取租户ID
public void callAsync(String workcaseId) {
PigxUser user = SecurityUtils.getUser();
Long tenantId = user.getTenantId();
processAsync(tenantId, workcaseId);
}
}
```
## 7. 数据迁移时的租户处理
### 7.1 历史数据添加租户ID
```sql
-- 为历史数据设置默认租户ID
UPDATE tb_workcase SET tenant_id = 1 WHERE tenant_id IS NULL;
UPDATE tb_chat_room SET tenant_id = 1 WHERE tenant_id IS NULL;
UPDATE tb_agent SET tenant_id = 1 WHERE tenant_id IS NULL;
```
### 7.2 根据部门映射租户
```sql
-- 如果有部门与租户的映射关系
UPDATE tb_workcase w
JOIN sys_dept d ON w.dept_id = d.dept_id
SET w.tenant_id = d.tenant_id
WHERE w.tenant_id = 1;
```
## 8. 测试验证
### 8.1 租户隔离测试
```java
@SpringBootTest
@RunWith(SpringRunner.class)
public class TenantIsolationTest {
@Autowired
private WorkcaseService workcaseService;
@Test
@WithMockUser(authorities = {"workcase_view"})
public void testTenantIsolation() {
// 模拟租户1的用户
mockTenant(1L);
List<TbWorkcase> tenant1List = workcaseService.list();
// 模拟租户2的用户
mockTenant(2L);
List<TbWorkcase> tenant2List = workcaseService.list();
// 验证数据隔离
assertNotEquals(tenant1List, tenant2List);
}
private void mockTenant(Long tenantId) {
PigxUser user = new PigxUser();
user.setTenantId(tenantId);
// 设置到安全上下文
SecurityContextHolder.getContext().setAuthentication(
new UsernamePasswordAuthenticationToken(user, null, user.getAuthorities())
);
}
}
```
### 8.2 SQL 验证
```sql
-- 验证所有表都有租户字段
SELECT
TABLE_NAME,
COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE
TABLE_SCHEMA = 'your_database'
AND COLUMN_NAME = 'tenant_id'
ORDER BY TABLE_NAME;
-- 验证数据分布
SELECT
tenant_id,
COUNT(*) as record_count
FROM tb_workcase
GROUP BY tenant_id;
-- 验证索引
SHOW INDEX FROM tb_workcase WHERE Column_name = 'tenant_id';
```
## 9. 注意事项
### 9.1 性能考虑
- 租户ID字段必须建立索引
- 常用查询条件可以建立复合索引status + tenant_id
- 大表可以考虑按租户分区
### 9.2 安全考虑
- 确保租户拦截器正确配置
- 敏感操作需要记录日志
- 跨租户操作需要特殊权限
### 9.3 开发规范
- 不要在代码中硬编码租户ID
- 始终从 SecurityUtils 获取当前租户
- 使用 MyBatis-Plus 的自动填充功能
### 9.4 数据备份
- 迁移前备份原始数据
- 记录租户ID映射关系
- 保留回滚方案
## 10. 故障排查
### 10.1 常见问题
**问题1查询不到数据**
- 检查租户ID是否正确
- 验证租户拦截器是否生效
- 查看生成的SQL是否包含tenant_id条件
**问题2插入数据失败**
- 检查tenant_id字段是否为NOT NULL
- 验证自动填充是否配置
- 确认当前用户有租户信息
**问题3跨租户数据泄露**
- 检查是否所有查询都经过租户拦截
- 验证忽略表配置是否正确
- 审查自定义SQL是否包含租户条件
### 10.2 调试建议
```yaml
# application.yml - 开启SQL日志
logging:
level:
com.pig4cloud.pigx.*.mapper: DEBUG
com.baomidou.mybatisplus: DEBUG
# 查看实际执行的SQL
mybatis-plus:
configuration:
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
```
## 11. 迁移检查清单
- [ ] 所有业务表添加 tenant_id 字段
- [ ] 所有实体类添加 tenantId 属性
- [ ] 配置 MyBatis-Plus 自动填充
- [ ] 配置租户拦截器
- [ ] 配置忽略表列表
- [ ] 历史数据设置默认租户ID
- [ ] 建立租户ID索引
- [ ] 测试租户隔离功能
- [ ] 测试跨租户查询权限
- [ ] 文档更新和团队培训

27
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,27 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: FastAPI Server",
"type": "python",
"request": "launch",
"program": "${workspaceFolder}/difyPlugin/main.py",
"console": "integratedTerminal",
"justMyCode": true,
"env": {
"PYTHONUNBUFFERED": "1"
},
"cwd": "${workspaceFolder}/difyPlugin",
"args": []
},
{
"name": "Python: Debug Plugin",
"type": "python",
"request": "launch",
"program": "${workspaceFolder}/difyPlugin/app/plugins/pdf/__init__.py",
"console": "integratedTerminal",
"justMyCode": true,
"cwd": "${workspaceFolder}/difyPlugin"
}
]
}

116
Makefile
View File

@@ -1,116 +0,0 @@
# Urban Lifeline 开发环境快捷命令
.PHONY: help up down logs restart clean build ps
# 默认目标
.DEFAULT_GOAL := help
# 帮助信息
help:
@echo "========================================="
@echo " 泰豪电源 Urban Lifeline 开发环境"
@echo "========================================="
@echo ""
@echo "可用命令:"
@echo " make up - 启动所有服务"
@echo " make down - 停止所有服务"
@echo " make restart - 重启所有服务"
@echo " make logs - 查看所有服务日志"
@echo " make ps - 查看服务状态"
@echo " make build - 重新构建镜像"
@echo " make clean - 清理所有数据(慎用)"
@echo ""
@echo "单独服务操作:"
@echo " make logs-portal - 查看主应用日志"
@echo " make logs-gateway - 查看网关日志"
@echo " make restart-portal - 重启主应用"
@echo " make shell-portal - 进入主应用容器"
@echo ""
# 启动所有服务
up:
@echo "启动开发环境..."
docker-compose -f docker-compose.dev.yml up -d
@echo "服务启动中,请稍候..."
@sleep 5
@echo ""
@echo "访问地址:"
@echo " 主应用: http://localhost"
@echo " 招投标: http://localhost/bidding"
@echo " 智能客服: http://localhost/customer-service"
@echo " Gateway: http://localhost/api"
@echo " Nacos: http://localhost/nacos"
@echo ""
# 停止所有服务
down:
@echo "停止所有服务..."
docker-compose -f docker-compose.dev.yml down
# 查看日志
logs:
docker-compose -f docker-compose.dev.yml logs -f
# 重启所有服务
restart:
@echo "重启所有服务..."
docker-compose -f docker-compose.dev.yml restart
# 查看服务状态
ps:
docker-compose -f docker-compose.dev.yml ps
# 重新构建镜像
build:
@echo "重新构建所有镜像..."
docker-compose -f docker-compose.dev.yml build --no-cache
# 清理所有数据(危险操作)
clean:
@echo "警告:此操作将删除所有容器、数据卷和镜像!"
@read -p "确认继续吗?(y/N): " confirm && [ "$$confirm" = "y" ] || exit 1
docker-compose -f docker-compose.dev.yml down -v
docker system prune -af
# ==================== 单独服务操作 ====================
# Portal 日志
logs-portal:
docker-compose -f docker-compose.dev.yml logs -f portal
# Gateway 日志
logs-gateway:
docker-compose -f docker-compose.dev.yml logs -f gateway
# Nginx 日志
logs-nginx:
docker-compose -f docker-compose.dev.yml logs -f nginx
# 共享包日志
logs-shared:
docker-compose -f docker-compose.dev.yml logs -f shared
# 重启 Portal
restart-portal:
docker-compose -f docker-compose.dev.yml restart portal
# 重启 Gateway
restart-gateway:
docker-compose -f docker-compose.dev.yml restart gateway
# 进入 Portal 容器
shell-portal:
docker-compose -f docker-compose.dev.yml exec portal sh
# 进入 Gateway 容器
shell-gateway:
docker-compose -f docker-compose.dev.yml exec gateway sh
# 进入数据库
db:
docker-compose -f docker-compose.dev.yml exec postgres psql -U postgres -d urban_lifeline
# 初始化数据库
db-init:
@echo "初始化数据库..."
docker-compose -f docker-compose.dev.yml exec postgres psql -U postgres -d urban_lifeline -f /docker-entrypoint-initdb.d/initAll.sql

227
README.md
View File

@@ -1,227 +0,0 @@
# 泰豪电源 AI 数智化平台
城市生命线智能管理系统 - Urban Lifeline
## 项目简介
基于 **"一个底座、多种智能体"** 架构的企业级 AI 数智化平台,包含四大核心业务模块:
1. **资料管理智能化** - 智能文档管理与检索
2. **招投标自动化** - 招投标全流程智能化
3. **售后客服智能化** - AI 驱动的智能客服系统
4. **企业内部知识协同** - 知识库管理与协作
## 技术架构
### 后端技术栈
- **Java 21** + **Spring Boot 3.5**
- **Spring Cloud Gateway** - API 网关
- **Nacos** - 服务注册与配置中心
- **PostgreSQL 16** - 主数据库
- **Redis 7** - 缓存
- **Dubbo 3.3** - RPC 框架
### 前端技术栈
- **Vue 3.5** (`<script setup>`)
- **TypeScript 5.7**
- **Vite 6.0** - 构建工具
- **Pinia 2.2** - 状态管理
- **Element Plus 2.9** - UI 组件库
- **pnpm 9.0 + Turborepo 2.0** - Monorepo 管理
### 核心特性:共享组件 HTTP 导入
采用 **Import Maps + ES Module** 方案,实现真正的运行时组件共享:
```vue
<script setup lang="ts">
// 直接从 HTTP URL 导入共享组件
import { UlTable } from '@shared/components' // → http://localhost/shared/components.js
import { http } from '@shared/utils' // → http://localhost/shared/utils.js
import { authApi } from '@shared/api' // → http://localhost/shared/api.js
</script>
```
**工作原理**
1. 共享包构建为 ES Module (`components.js`)
2. 部署到独立 HTTP 服务 (`http://localhost:5000`)
3. 通过 Nginx 统一代理 (`http://localhost/shared/`)
4. 浏览器通过 Import Maps 直接加载
## 快速开始
### 方式一Docker Compose推荐
```bash
# 1. 启动所有服务(自动构建)
make up
# 2. 等待服务启动约1-2分钟
make ps
# 3. 访问应用
open http://localhost
```
### 方式二:本地开发
```bash
# 1. 安装依赖
pnpm install
# 2. 启动后端服务(需要 Java 21
cd urbanLifelineServ
mvn spring-boot:run
# 3. 启动前端服务
cd urbanLifelineWeb
pnpm dev
# 4. 访问
open http://localhost:3000
```
## 项目结构
```
urbanLifeline/
├── urbanLifelineServ/ # 后端服务Spring Boot
│ ├── gateway/ # API 网关
│ ├── auth/ # 认证服务
│ ├── system/ # 系统服务
│ ├── file/ # 文件服务
│ └── common/ # 公共模块
├── urbanLifelineWeb/ # 前端应用Vue 3 Monorepo
│ ├── packages/
│ │ ├── shared/ # 共享组件库
│ │ │ ├── components/ # 公共组件
│ │ │ ├── utils/ # 工具函数
│ │ │ ├── api/ # API 封装
│ │ │ └── composables/ # 组合式函数
│ │ ├── portal/ # 主应用
│ │ ├── app-bidding/ # 招投标应用
│ │ └── app-knowledge/ # 知识协同应用
│ ├── pnpm-workspace.yaml
│ └── turbo.json
├── docker-compose.dev.yml # Docker 开发环境
├── Makefile # 快捷命令
└── docs/ # 文档
├── 前端完整指南.md # ⭐ 前端全流程文档
└── 数据库设计文档.md # ⭐ 数据库全流程文档
```
## 访问地址
| 应用 | 地址 | 说明 |
|------|------|------|
| **主应用** | http://localhost/ | Portal 统一入口 |
| **招投标** | http://localhost/bidding | 招投标智能体 |
| **智能客服** | http://localhost/customer-service | 客服系统 |
| **共享组件** | http://localhost/shared/components.js | ES Module |
| **API 网关** | http://localhost/api | Gateway |
| **Nacos** | http://localhost/nacos | 注册中心 |
## 常用命令
```bash
# Docker 环境
make up # 启动所有服务
make down # 停止所有服务
make logs # 查看日志
make ps # 查看服务状态
make restart # 重启服务
make clean # 清理所有数据
# 单独服务
make logs-portal # 查看主应用日志
make logs-gateway # 查看网关日志
make restart-portal # 重启主应用
make shell-portal # 进入容器
make db # 连接数据库
# 前端开发
pnpm dev # 启动所有应用
pnpm build # 构建所有应用
pnpm --filter portal dev # 启动单个应用
pnpm --filter portal build # 构建单个应用
```
## 文档索引
- **[前端完整指南](./docs/前端完整指南.md)** - ⭐ 架构、开发、部署全流程
- **[数据库完整指南](./docs/数据库完整指南.md)** - ⭐ Schema、表结构、优化方案
## 核心亮点
### 1. 共享组件运行时加载
```html
<!-- index.html -->
<script type="importmap">
{
"imports": {
"@shared/components": "http://localhost/shared/components.js",
"@shared/utils": "http://localhost/shared/utils.js"
}
}
</script>
```
```vue
<script setup lang="ts">
// 浏览器自动从 URL 加载,无需打包!
import { UlTable } from '@shared/components'
</script>
```
### 2. Monorepo + Turborepo
- 统一依赖管理
- 增量构建加速
- 任务并行执行
- 远程缓存
### 3. Docker 一键启动
- 所有服务容器化
- Nginx 统一网关
- 支持热更新HMR
- 数据持久化
### 4. TypeScript 全栈
- 类型安全
- 智能提示
- 重构友好
## 环境要求
### Docker 环境(推荐)
- Docker 20.10+
- Docker Compose 2.0+
### 本地开发
- Node.js 20+
- pnpm 9+
- Java 21+
- Maven 3.9+
- PostgreSQL 16+
## 开发规范
- **前端**ESLint + Prettier + Husky
- **提交规范**Conventional Commits
- **分支策略**Git Flow
- **代码审查**:必须 Code Review
## License
MIT
---
**Built with ❤️ by Taihao Team**

Submodule ai-management-dify updated: f1b32d51cb...9fffb6e421

Submodule ai-management-platform updated: bfea48fa67...199d8180a6

View File

View File

@@ -1,17 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "DifyPlugin: FastAPI",
"type": "debugpy",
"request": "launch",
"program": "run.py",
"cwd": "${workspaceFolder}",
"python": "F:\\Environment\\conda\\envs\\difyPlugin\\python.exe",
"env": {
"PYTHONPATH": "${workspaceFolder}/difyPlugin"
},
"jinja": true
}
]
}

View File

146
difyPlugin/DifyCLI.md Normal file
View File

@@ -0,0 +1,146 @@
> ## Documentation Index
> Fetch the complete documentation index at: https://docs.dify.ai/llms.txt
> Use this file to discover all available pages before exploring further.
# CLI
> Dify 插件开发命令行界面
<Note> ⚠️ 本文档由 AI 自动翻译。如有任何不准确之处,请参考[英文原版](/en/develop-plugin/getting-started/cli)。</Note>
使用命令行界面CLI设置和打包你的 Dify 插件。CLI 提供了一种简化的方式来管理你的插件开发工作流,从初始化到打包。
本指南将指导你如何使用 CLI 进行 Dify 插件开发。
## 前提条件
在开始之前,请确保已安装以下内容:
* Python 版本 ≥ 3.12
* Dify CLI
* Homebrew适用于 Mac 用户)
## 创建 Dify 插件项目
<Tabs>
<Tab title="Mac">
```bash theme={null}
brew tap langgenius/dify
brew install dify
```
</Tab>
<Tab title="Linux">
从 [Dify GitHub 发布页面](https://github.com/langgenius/dify-plugin-daemon/releases) 获取最新的 Dify CLI
```bash theme={null}
# Download dify-plugin-darwin-arm64
chmod +x dify-plugin-darwin-arm64
mv dify-plugin-darwin-arm64 dify
sudo mv dify /usr/local/bin/
```
</Tab>
</Tabs>
现在你已成功安装 Dify CLI。你可以通过运行以下命令来验证安装
```bash theme={null}
dify version
```
你可以使用以下命令创建一个新的 Dify 插件项目:
```bash theme={null}
dify plugin init
```
根据提示填写必填字段:
```bash theme={null}
Edit profile of the plugin
Plugin name (press Enter to next step): hello-world
Author (press Enter to next step): langgenius
Description (press Enter to next step): hello world example
Repository URL (Optional) (press Enter to next step): Repository URL (Optional)
Enable multilingual README: [✔] English is required by default
Languages to generate:
English: [✔] (required)
→ 简体中文 (Simplified Chinese): [✔]
日本語 (Japanese): [✘]
Português (Portuguese - Brazil): [✘]
Controls:
↑/↓ Navigate • Space/Tab Toggle selection • Enter Next step
```
选择 `python` 并按 Enter 继续使用 Python 插件模板。
```bash theme={null}
Select the type of plugin you want to create, and press `Enter` to continue
Before starting, here's some basic knowledge about Plugin types in Dify:
- Tool: Tool Providers like Google Search, Stable Diffusion, etc. Used to perform specific tasks.
- Model: Model Providers like OpenAI, Anthropic, etc. Use their models to enhance AI capabilities.
- Endpoint: Similar to Service API in Dify and Ingress in Kubernetes. Extend HTTP services as endpoints with custom logi
- Agent Strategy: Implement your own agent strategies like Function Calling, ReAct, ToT, CoT, etc.
Based on the ability you want to extend, Plugins are divided into four types: Tool, Model, Extension, and Agent Strategy
- Tool: A tool provider that can also implement endpoints. For example, building a Discord Bot requires both Sending and
- Model: Strictly for model providers, no other extensions allowed.
- Extension: For simple HTTP services that extend functionality.
- Agent Strategy: Implement custom agent logic with a focused approach.
We've provided templates to help you get started. Choose one of the options below:
-> tool
agent-strategy
llm
text-embedding
rerank
tts
speech2text
moderation
extension
```
输入默认的 dify 版本,留空则使用最新版本:
```bash theme={null}
Edit minimal Dify version requirement, leave it blank by default
Minimal Dify version (press Enter to next step):
```
现在你已准备就绪CLI 将创建一个以你提供的插件名称命名的新目录,并为你的插件设置基本结构。
```bash theme={null}
cd hello-world
```
## 运行插件
确保你在 hello-world 目录中
```bash theme={null}
cp .env.example .env
```
编辑 `.env` 文件以设置插件的环境变量,例如 API 密钥或其他配置。你可以在 Dify 仪表板中找到这些变量。登录到你的 Dify 环境,点击右上角的"插件"图标,然后点击调试图标(或类似虫子的图标)。在弹出窗口中,复制"API Key"和"Host Address"。(请参考你本地对应的截图,其中显示了获取密钥和主机地址的界面)
```bash theme={null}
INSTALL_METHOD=remote
REMOTE_INSTALL_HOST=debug-plugin.dify.dev
REMOTE_INSTALL_PORT=5003
REMOTE_INSTALL_KEY=********-****-****-****-************
```
现在你可以使用以下命令在本地运行你的插件:
```bash theme={null}
pip install -r requirements.txt
python -m main
```
***
[编辑此页面](https://github.com/langgenius/dify-docs/edit/main/en/develop-plugin/getting-started/cli.mdx) | [报告问题](https://github.com/langgenius/dify-docs/issues/new?template=docs.yml)

184
difyPlugin/pdf/.difyignore Normal file
View File

@@ -0,0 +1,184 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
# Vscode
.vscode/
# Git
.git/
.gitignore
.github/
# Mac
.DS_Store
# Windows
Thumbs.db
# Dify plugin packages
# To prevent packaging repetitively
*.difypkg

View File

@@ -1,4 +1,3 @@
INSTALL_METHOD=remote
REMOTE_INSTALL_HOST=debug-plugin.dify.dev
REMOTE_INSTALL_PORT=5003
REMOTE_INSTALL_URL=debug.dify.ai:5003
REMOTE_INSTALL_KEY=********-****-****-****-************

View File

@@ -0,0 +1,109 @@
name: Plugin Publish Workflow
on:
release:
types: [published]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Download CLI tool
run: |
mkdir -p $RUNNER_TEMP/bin
cd $RUNNER_TEMP/bin
wget https://github.com/langgenius/dify-plugin-daemon/releases/download/0.0.6/dify-plugin-linux-amd64
chmod +x dify-plugin-linux-amd64
echo "CLI tool location:"
pwd
ls -la dify-plugin-linux-amd64
- name: Get basic info from manifest
id: get_basic_info
run: |
PLUGIN_NAME=$(grep "^name:" manifest.yaml | cut -d' ' -f2)
echo "Plugin name: $PLUGIN_NAME"
echo "plugin_name=$PLUGIN_NAME" >> $GITHUB_OUTPUT
VERSION=$(grep "^version:" manifest.yaml | cut -d' ' -f2)
echo "Plugin version: $VERSION"
echo "version=$VERSION" >> $GITHUB_OUTPUT
# If the author's name is not your github username, you can change the author here
AUTHOR=$(grep "^author:" manifest.yaml | cut -d' ' -f2)
echo "Plugin author: $AUTHOR"
echo "author=$AUTHOR" >> $GITHUB_OUTPUT
- name: Package Plugin
id: package
run: |
cd $GITHUB_WORKSPACE
PACKAGE_NAME="${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}.difypkg"
$RUNNER_TEMP/bin/dify-plugin-linux-amd64 plugin package . -o "$PACKAGE_NAME"
echo "Package result:"
ls -la "$PACKAGE_NAME"
echo "package_name=$PACKAGE_NAME" >> $GITHUB_OUTPUT
echo "\nFull file path:"
pwd
echo "\nDirectory structure:"
tree || ls -R
- name: Checkout target repo
uses: actions/checkout@v3
with:
repository: ${{steps.get_basic_info.outputs.author}}/dify-plugins
path: dify-plugins
token: ${{ secrets.PLUGIN_ACTION }}
fetch-depth: 1
persist-credentials: true
- name: Prepare and create PR
run: |
PACKAGE_NAME="${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}.difypkg"
mkdir -p dify-plugins/${{ steps.get_basic_info.outputs.author }}/${{ steps.get_basic_info.outputs.plugin_name }}
mv "$PACKAGE_NAME" dify-plugins/${{ steps.get_basic_info.outputs.author }}/${{ steps.get_basic_info.outputs.plugin_name }}/
cd dify-plugins
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
git fetch origin main
git checkout main
git pull origin main
BRANCH_NAME="bump-${{ steps.get_basic_info.outputs.plugin_name }}-plugin-${{ steps.get_basic_info.outputs.version }}"
git checkout -b "$BRANCH_NAME"
git add .
git commit -m "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin to version ${{ steps.get_basic_info.outputs.version }}"
git push -u origin "$BRANCH_NAME" --force
git branch -a
echo "Waiting for branch to sync..."
sleep 10 # Wait 10 seconds for branch sync
- name: Create PR via GitHub API
env:
# How to config the token:
# 1. Profile -> Settings -> Developer settings -> Personal access tokens -> Generate new token (with repo scope) -> Copy the token
# 2. Go to the target repository -> Settings -> Secrets and variables -> Actions -> New repository secret -> Add the token as PLUGIN_ACTION
GH_TOKEN: ${{ secrets.PLUGIN_ACTION }}
run: |
gh pr create \
--repo langgenius/dify-plugins \
--head "${{ steps.get_basic_info.outputs.author }}:${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}" \
--base main \
--title "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin to version ${{ steps.get_basic_info.outputs.version }}" \
--body "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin package to version ${{ steps.get_basic_info.outputs.version }}
Changes:
- Updated plugin package file" || echo "PR already exists or creation skipped." # Handle cases where PR already exists

176
difyPlugin/pdf/.gitignore vendored Normal file
View File

@@ -0,0 +1,176 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
# Vscode
.vscode/
# macOS
.DS_Store
.AppleDouble
.LSOverride

137
difyPlugin/pdf/GUIDE.md Normal file
View File

@@ -0,0 +1,137 @@
# Dify Plugin Development Guide
Welcome to Dify plugin development! This guide will help you get started quickly.
## Plugin Types
Dify plugins extend three main capabilities:
| Type | Description | Example |
|------|-------------|---------|
| **Tool** | Perform specific tasks | Google Search, Stable Diffusion |
| **Model** | AI model integrations | OpenAI, Anthropic |
| **Endpoint** | HTTP services | Custom APIs, integrations |
You can create:
- **Tool**: Tool provider with optional endpoints (e.g., Discord bot)
- **Model**: Model provider only
- **Extension**: Simple HTTP service
## Setup
### Requirements
- Python 3.11+
- Dependencies: `pip install -r requirements.txt`
## Development Process
<details>
<summary><b>1. Manifest Structure</b></summary>
Edit `manifest.yaml` to describe your plugin:
```yaml
version: 0.1.0 # Required: Plugin version
type: plugin # Required: plugin or bundle
author: YourOrganization # Required: Organization name
label: # Required: Multi-language names
en_US: Plugin Name
zh_Hans: 插件名称
created_at: 2023-01-01T00:00:00Z # Required: Creation time (RFC3339)
icon: assets/icon.png # Required: Icon path
# Resources and permissions
resource:
memory: 268435456 # Max memory (bytes)
permission:
tool:
enabled: true # Tool permission
model:
enabled: true # Model permission
llm: true
text_embedding: false
# Other model types...
# Other permissions...
# Extensions definition
plugins:
tools:
- tools/my_tool.yaml # Tool definition files
models:
- models/my_model.yaml # Model definition files
endpoints:
- endpoints/my_api.yaml # Endpoint definition files
# Runtime metadata
meta:
version: 0.0.1 # Manifest format version
arch:
- amd64
- arm64
runner:
language: python
version: "3.12"
entrypoint: main
```
**Restrictions:**
- Cannot extend both tools and models
- Must have at least one extension
- Cannot extend both models and endpoints
- Limited to one supplier per extension type
</details>
<details>
<summary><b>2. Implementation Examples</b></summary>
Study these examples to understand plugin implementation:
- [OpenAI](https://github.com/langgenius/dify-plugin-sdks/tree/main/python/examples/openai) - Model provider
- [Google Search](https://github.com/langgenius/dify-plugin-sdks/tree/main/python/examples/google) - Tool provider
- [Neko](https://github.com/langgenius/dify-plugin-sdks/tree/main/python/examples/neko) - Endpoint group
</details>
<details>
<summary><b>3. Testing & Debugging</b></summary>
1. Copy `.env.example` to `.env` and configure:
```
INSTALL_METHOD=remote
REMOTE_INSTALL_URL=debug.dify.ai:5003
REMOTE_INSTALL_KEY=your-debug-key
```
2. Run your plugin:
```bash
python -m main
```
3. Refresh your Dify instance to see the plugin (marked as "debugging")
</details>
<details>
<summary><b>4. Publishing</b></summary>
#### Manual Packaging
```bash
dify-plugin plugin package ./YOUR_PLUGIN_DIR
```
#### Automated GitHub Workflow
Configure GitHub Actions to automate PR creation:
1. Create a Personal Access Token for your forked repository
2. Add it as `PLUGIN_ACTION` secret in your source repo
3. Create `.github/workflows/plugin-publish.yml`
When you create a release, the action will:
- Package your plugin
- Create a PR to your fork
[Detailed workflow documentation](https://docs.dify.ai/plugins/publish-plugins/plugin-auto-publish-pr)
</details>
## Privacy Policy
If publishing to the Marketplace, provide a privacy policy in [PRIVACY.md](PRIVACY.md).

View File

@@ -0,0 +1,3 @@
## Privacy
!!! Please fill in the privacy policy of the plugin.

10
difyPlugin/pdf/README.md Normal file
View File

@@ -0,0 +1,10 @@
## pdf
**Author:** yslg
**Version:** 0.0.1
**Type:** tool
### Description

View File

@@ -0,0 +1,55 @@
<!--
~ Dify Marketplace Template Icon
~ Dify 市场模板图标
~ Dify マーケットプレイステンプレートアイコン
~
~ WARNING / 警告 / 警告:
~
~ English: This is a TEMPLATE icon from Dify Marketplace only. You MUST NOT use this default icon in any way.
~ Please replace it with your own custom icon before submit this plugin.
~
~ 中文: 这只是来自 Dify 市场的模板图标。您绝对不能以任何方式使用此默认图标。
~ 请在提交此插件之前将其替换为您自己的自定义图标。
~
~ 日本語: これは Dify マーケットプレイスのテンプレートアイコンです。このデフォルトアイコンをいかなる方法でも使用してはいけません。
~ このプラグインを提出する前に、独自のカスタムアイコンに置き換えてください。
~
~ DIFY_MARKETPLACE_TEMPLATE_ICON_DO_NOT_USE
-->
<svg width="40" height="40" viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_15253_95095)">
<rect width="40" height="40" fill="#0033FF"/>
<g filter="url(#filter0_n_15253_95095)">
<rect width="40" height="40" fill="url(#paint0_linear_15253_95095)"/>
</g>
<path d="M28 10C28.5523 10 29 10.4477 29 11V16C29 16.5523 28.5523 17 28 17H23V30C23 30.5523 22.5523 31 22 31H18C17.4477 31 17 30.5523 17 30V17H11.5C10.9477 17 10.5 16.5523 10.5 16V13.618C10.5 13.2393 10.714 12.893 11.0528 12.7236L16.5 10H28ZM23 12H16.9721L12.5 14.2361V15H19V29H21V15H23V12ZM27 12H25V15H27V12Z" fill="white"/>
</g>
<defs>
<filter id="filter0_n_15253_95095" x="0" y="0" width="40" height="40" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
<feTurbulence type="fractalNoise" baseFrequency="2 2" stitchTiles="stitch" numOctaves="3" result="noise" seed="8033" />
<feComponentTransfer in="noise" result="coloredNoise1">
<feFuncR type="linear" slope="2" intercept="-0.5" />
<feFuncG type="linear" slope="2" intercept="-0.5" />
<feFuncB type="linear" slope="2" intercept="-0.5" />
<feFuncA type="discrete" tableValues="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "/>
</feComponentTransfer>
<feComposite operator="in" in2="shape" in="coloredNoise1" result="noise1Clipped" />
<feComponentTransfer in="noise1Clipped" result="color1">
<feFuncA type="table" tableValues="0 0.06" />
</feComponentTransfer>
<feMerge result="effect1_noise_15253_95095">
<feMergeNode in="shape" />
<feMergeNode in="color1" />
</feMerge>
</filter>
<linearGradient id="paint0_linear_15253_95095" x1="0" y1="0" x2="40" y2="40" gradientUnits="userSpaceOnUse">
<stop stop-color="#1443FF"/>
<stop offset="1" stop-color="#0031F5"/>
</linearGradient>
<clipPath id="clip0_15253_95095">
<rect width="40" height="40" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.0 KiB

View File

@@ -0,0 +1,55 @@
<!--
~ Dify Marketplace Template Icon
~ Dify 市场模板图标
~ Dify マーケットプレイステンプレートアイコン
~
~ WARNING / 警告 / 警告:
~
~ English: This is a TEMPLATE icon from Dify Marketplace only. You MUST NOT use this default icon in any way.
~ Please replace it with your own custom icon before submit this plugin.
~
~ 中文: 这只是来自 Dify 市场的模板图标。您绝对不能以任何方式使用此默认图标。
~ 请在提交此插件之前将其替换为您自己的自定义图标。
~
~ 日本語: これは Dify マーケットプレイスのテンプレートアイコンです。このデフォルトアイコンをいかなる方法でも使用してはいけません。
~ このプラグインを提出する前に、独自のカスタムアイコンに置き換えてください。
~
~ DIFY_MARKETPLACE_TEMPLATE_ICON_DO_NOT_USE
-->
<svg width="40" height="40" viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_15255_46435)">
<rect width="40" height="40" fill="#0033FF"/>
<g filter="url(#filter0_n_15255_46435)">
<rect width="40" height="40" fill="url(#paint0_linear_15255_46435)"/>
</g>
<path d="M28 10C28.5523 10 29 10.4477 29 11V16C29 16.5523 28.5523 17 28 17H23V30C23 30.5523 22.5523 31 22 31H18C17.4477 31 17 30.5523 17 30V17H11.5C10.9477 17 10.5 16.5523 10.5 16V13.618C10.5 13.2393 10.714 12.893 11.0528 12.7236L16.5 10H28ZM23 12H16.9721L12.5 14.2361V15H19V29H21V15H23V12ZM27 12H25V15H27V12Z" fill="white"/>
</g>
<defs>
<filter id="filter0_n_15255_46435" x="0" y="0" width="40" height="40" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
<feTurbulence type="fractalNoise" baseFrequency="2 2" stitchTiles="stitch" numOctaves="3" result="noise" seed="8033" />
<feComponentTransfer in="noise" result="coloredNoise1">
<feFuncR type="linear" slope="2" intercept="-0.5" />
<feFuncG type="linear" slope="2" intercept="-0.5" />
<feFuncB type="linear" slope="2" intercept="-0.5" />
<feFuncA type="discrete" tableValues="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "/>
</feComponentTransfer>
<feComposite operator="in" in2="shape" in="coloredNoise1" result="noise1Clipped" />
<feComponentTransfer in="noise1Clipped" result="color1">
<feFuncA type="table" tableValues="0 0.06" />
</feComponentTransfer>
<feMerge result="effect1_noise_15255_46435">
<feMergeNode in="shape" />
<feMergeNode in="color1" />
</feMerge>
</filter>
<linearGradient id="paint0_linear_15255_46435" x1="0" y1="0" x2="40" y2="40" gradientUnits="userSpaceOnUse">
<stop stop-color="#1F4CFF"/>
<stop offset="1" stop-color="#0033FF"/>
</linearGradient>
<clipPath id="clip0_15255_46435">
<rect width="40" height="40" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.0 KiB

6
difyPlugin/pdf/main.py Normal file
View File

@@ -0,0 +1,6 @@
from dify_plugin import Plugin, DifyPluginEnv
plugin = Plugin(DifyPluginEnv(MAX_REQUEST_TIMEOUT=120))
if __name__ == '__main__':
plugin.run()

View File

@@ -0,0 +1,37 @@
version: 0.0.1
type: plugin
author: yslg
name: pdf
label:
en_US: pdf
ja_JP: pdf
zh_Hans: pdf
pt_BR: pdf
description:
en_US: pdfTools
ja_JP: pdfTools
zh_Hans: pdfTools
pt_BR: pdfTools
icon: icon.svg
icon_dark: icon-dark.svg
resource:
memory: 268435456
permission:
tool:
enabled: true
plugins:
tools:
- provider/pdf.yaml
meta:
version: 0.0.1
arch:
- amd64
- arm64
runner:
language: python
version: "3.12"
entrypoint: main
minimum_dify_version: null
created_at: 2026-03-02T13:21:03.2806864+08:00
privacy: PRIVACY.md
verified: false

View File

@@ -0,0 +1,64 @@
{
"name": "pdf-plugin",
"version": "1.0.0",
"description": "PDF plugin for analyzing table of contents and extracting text",
"author": "System",
"type": "tool",
"main": "main.py",
"requirements": "requirements.txt",
"icon": "https://neeko-copilot.bytedance.net/api/text2image?prompt=PDF%20document%20icon&size=square",
"settings": [
{
"key": "debug",
"type": "boolean",
"default": false,
"description": "Enable debug mode"
}
],
"functions": [
{
"name": "analyze_toc",
"description": "Analyze PDF and find table of contents",
"parameters": {
"type": "object",
"properties": {
"file": {
"type": "file",
"description": "PDF file to analyze",
"fileTypes": ["pdf"]
}
},
"required": ["file"]
}
},
{
"name": "extract_text",
"description": "Extract text from specified page range",
"parameters": {
"type": "object",
"properties": {
"file": {
"type": "file",
"description": "PDF file to extract text from",
"fileTypes": ["pdf"]
},
"page_range": {
"type": "object",
"properties": {
"start": {
"type": "integer",
"default": 0,
"description": "Start page index"
},
"end": {
"type": "integer",
"description": "End page index"
}
}
}
},
"required": ["file"]
}
}
]
}

View File

@@ -0,0 +1,53 @@
from typing import Any
from dify_plugin import ToolProvider
from dify_plugin.errors.tool import ToolProviderCredentialValidationError
class PdfProvider(ToolProvider):
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
try:
"""
IMPLEMENT YOUR VALIDATION HERE
"""
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))
#########################################################################################
# If OAuth is supported, uncomment the following functions.
# Warning: please make sure that the sdk version is 0.4.2 or higher.
#########################################################################################
# def _oauth_get_authorization_url(self, redirect_uri: str, system_credentials: Mapping[str, Any]) -> str:
# """
# Generate the authorization URL for pdf OAuth.
# """
# try:
# """
# IMPLEMENT YOUR AUTHORIZATION URL GENERATION HERE
# """
# except Exception as e:
# raise ToolProviderOAuthError(str(e))
# return ""
# def _oauth_get_credentials(
# self, redirect_uri: str, system_credentials: Mapping[str, Any], request: Request
# ) -> Mapping[str, Any]:
# """
# Exchange code for access_token.
# """
# try:
# """
# IMPLEMENT YOUR CREDENTIALS EXCHANGE HERE
# """
# except Exception as e:
# raise ToolProviderOAuthError(str(e))
# return dict()
# def _oauth_refresh_credentials(
# self, redirect_uri: str, system_credentials: Mapping[str, Any], credentials: Mapping[str, Any]
# ) -> OAuthCredentials:
# """
# Refresh the credentials
# """
# return OAuthCredentials(credentials=credentials, expires_at=-1)

View File

@@ -0,0 +1,63 @@
identity:
author: "yslg"
name: "pdf"
label:
en_US: "pdf"
zh_Hans: "pdf"
pt_BR: "pdf"
ja_JP: "pdf"
description:
en_US: "pdfTools"
zh_Hans: "pdfTools"
pt_BR: "pdfTools"
ja_JP: "pdfTools"
icon: "icon.svg"
#########################################################################################
# If you want to support OAuth, you can uncomment the following code.
#########################################################################################
# oauth_schema:
# client_schema:
# - name: "client_id"
# type: "secret-input"
# required: true
# url: https://example.com/oauth/authorize
# placeholder:
# en_US: "Please input your Client ID"
# zh_Hans: "请输入你的 Client ID"
# pt_BR: "Insira seu Client ID"
# help:
# en_US: "Client ID is used to authenticate requests to the example.com API."
# zh_Hans: "Client ID 用于认证请求到 example.com API。"
# pt_BR: "Client ID é usado para autenticar solicitações à API do example.com."
# label:
# zh_Hans: "Client ID"
# en_US: "Client ID"
# - name: "client_secret"
# type: "secret-input"
# required: true
# url: https://example.com/oauth/authorize
# placeholder:
# en_US: "Please input your Client Secret"
# zh_Hans: "请输入你的 Client Secret"
# pt_BR: "Insira seu Client Secret"
# help:
# en_US: "Client Secret is used to authenticate requests to the example.com API."
# zh_Hans: "Client Secret 用于认证请求到 example.com API。"
# pt_BR: "Client Secret é usado para autenticar solicitações à API do example.com."
# label:
# zh_Hans: "Client Secret"
# en_US: "Client Secret"
# credentials_schema:
# - name: "access_token"
# type: "secret-input"
# label:
# zh_Hans: "Access Token"
# en_US: "Access Token"
tools:
- tools/pdf.yaml
- tools/pdf_single_page.yaml
extra:
python:
source: provider/pdf.py

View File

@@ -0,0 +1,2 @@
dify_plugin>=0.4.0,<0.7.0
PyPDF2>=3.0.1

View File

@@ -0,0 +1,61 @@
import re
from collections.abc import Generator
from io import BytesIO
from typing import Any
import PyPDF2
from dify_plugin import Tool
from dify_plugin.entities.tool import ToolInvokeMessage
class PdfTool(Tool):
def _invoke(self, tool_parameters: dict[str, Any]) -> Generator[ToolInvokeMessage]:
file = tool_parameters.get("file")
if not file:
yield self.create_text_message("Error: file is required")
return
# file.blob returns bytes
pdf_bytes = file.blob
reader = PyPDF2.PdfReader(BytesIO(pdf_bytes))
num_pages = len(reader.pages)
toc_start = None
toc_end = None
toc_patterns = [
r'目录',
r'Table of Contents',
r'Contents',
r'目次'
]
for page_num in range(num_pages):
page = reader.pages[page_num]
text = page.extract_text() or ""
if any(re.search(pattern, text, re.IGNORECASE) for pattern in toc_patterns):
if toc_start is None:
toc_start = page_num
toc_end = page_num
elif toc_start is not None and toc_end is not None:
break
if toc_start is None:
yield self.create_json_message({
"start": None,
"end": None,
"pages": []
})
return
toc_pages = []
for page_num in range(toc_start, toc_end + 1):
page = reader.pages[page_num]
toc_pages.append(page.extract_text() or "")
yield self.create_json_message({
"start": toc_start,
"end": toc_end,
"pages": toc_pages
})

View File

@@ -0,0 +1,36 @@
identity:
name: "pdf"
author: "yslg"
label:
en_US: "Extract TOC Pages and Content"
zh_Hans: "提取目录页和内容"
pt_BR: "Extrair páginas de sumário e conteúdo"
ja_JP: "目次ページと内容を抽出"
description:
human:
en_US: "Extract table-of-contents page range and all page text in that range"
zh_Hans: "提取目录页范围以及该范围内所有页文本"
pt_BR: "Extrair intervalo de páginas de sumário e todo o texto nesse intervalo"
ja_JP: "目次ページ範囲とその範囲内の全ページテキストを抽出"
llm: "Extract table-of-contents page range and all page text in that range"
parameters:
- name: file
type: file
required: true
label:
en_US: PDF File
zh_Hans: PDF 文件
pt_BR: Arquivo PDF
ja_JP: PDFファイル
human_description:
en_US: "PDF file to process"
zh_Hans: "要处理的 PDF 文件"
pt_BR: "Arquivo PDF para processar"
ja_JP: "処理するPDFファイル"
llm_description: "PDF file to process, output contains start/end/pages"
form: llm
fileTypes:
- "pdf"
extra:
python:
source: tools/pdf.py

View File

@@ -0,0 +1,36 @@
from collections.abc import Generator
from io import BytesIO
from typing import Any
import PyPDF2
from dify_plugin import Tool
from dify_plugin.entities.tool import ToolInvokeMessage
class PdfSinglePageTool(Tool):
def _invoke(self, tool_parameters: dict[str, Any]) -> Generator[ToolInvokeMessage]:
file = tool_parameters.get("file")
page = tool_parameters.get("page", 0)
if not file:
yield self.create_text_message("Error: file is required")
return
pdf_bytes = file.blob
reader = PyPDF2.PdfReader(BytesIO(pdf_bytes))
num_pages = len(reader.pages)
page_index = int(page)
if page_index < 0:
page_index = 0
if page_index >= num_pages:
page_index = num_pages - 1
selected_page = reader.pages[page_index]
text = selected_page.extract_text() or ""
yield self.create_json_message({
"start": page_index,
"end": page_index,
"pages": [text]
})

View File

@@ -0,0 +1,52 @@
identity:
name: "pdf_single_page"
author: "yslg"
label:
en_US: "Extract Single-Page Text"
zh_Hans: "提取单页文字"
pt_BR: "Extrair texto de página única"
ja_JP: "単一ページのテキストを抽出"
description:
human:
en_US: "Extract text from one specified page"
zh_Hans: "提取指定单页文字"
pt_BR: "Extrair texto de uma página especificada"
ja_JP: "指定した1ページのテキストを抽出"
llm: "Extract text from one specified page"
parameters:
- name: file
type: file
required: true
label:
en_US: PDF File
zh_Hans: PDF 文件
pt_BR: Arquivo PDF
ja_JP: PDFファイル
human_description:
en_US: "PDF file to process"
zh_Hans: "要处理的 PDF 文件"
pt_BR: "Arquivo PDF para processar"
ja_JP: "処理するPDFファイル"
llm_description: "PDF file to process"
form: llm
fileTypes:
- "pdf"
- name: page
type: number
required: true
label:
en_US: Page Index
zh_Hans: 页码
pt_BR: Índice da Página
ja_JP: ページ番号
human_description:
en_US: "Single page index to extract"
zh_Hans: "要提取的单页页码"
pt_BR: "Índice da página única para extrair"
ja_JP: "抽出対象のページ番号"
llm_description: "Single page index to extract"
form: llm
default: 0
extra:
python:
source: tools/pdf_single_page.py

122
difyPlugin/需求文档.md Normal file
View File

@@ -0,0 +1,122 @@
# Dify 插件服务需求文档
## 1. 项目概述
开发一个基于 FastAPI 框架的 Dify 插件服务,实现与 Dify 平台的集成,支持多种插件的部署和管理,提供各种功能扩展。
## 2. 技术栈
- **框架**FastAPI
- **语言**Python 3.9+
- **依赖管理**Poetry 或 Pip
- **部署方式**Docker 容器化
## 3. 项目架构
### 3.1 架构设计
- **插件管理系统**:统一管理多个 Dify 插件
- **插件加载机制**:支持动态加载和热更新插件
- **插件隔离**:每个插件运行在独立的环境中
- **API 网关**:统一的 API 入口,路由到对应插件
### 3.2 目录结构
```
difyPlugin/
├── main.py # 应用入口
├── requirements.txt # 依赖管理
├── .env # 环境配置
├── app/
│ ├── api/ # API 路由
│ ├── core/ # 核心配置
│ ├── plugins/ # 插件目录
│ │ ├── plugin1/ # 插件1
│ │ ├── plugin2/ # 插件2
│ │ └── __init__.py # 插件加载器
│ └── services/ # 公共服务
└── tests/ # 测试目录
```
### 3.3 插件规范
- **插件结构**:每个插件包含独立的配置、逻辑和 API
- **插件接口**:统一的插件接口规范
- **插件注册**:自动发现和注册插件
- **插件生命周期**:支持插件的启动、停止和重启
## 4. 核心功能
### 4.1 基础功能
- **健康检查**:提供服务状态检查接口
- **版本管理**:支持插件版本控制
- **认证机制**:实现与 Dify 的安全认证
- **插件管理**:支持插件的注册、启动、停止和卸载
### 4.2 业务功能
- **数据处理**:支持各种数据格式的转换和处理
- **外部 API 集成**:对接第三方服务的 API
- **自定义逻辑**:支持用户自定义业务逻辑
- **事件处理**:响应 Dify 平台的事件触发
## 5. 接口设计
### 5.1 主要接口
- `GET /health`:健康检查
- `GET /api/v1/plugins`:获取插件列表
- `GET /api/v1/plugins/{plugin_id}`:获取插件详情
- `POST /api/v1/plugins/{plugin_id}/execute`:执行插件功能
- `GET /api/v1/plugins/{plugin_id}/metadata`:获取插件元数据
- `POST /api/v1/plugins/{plugin_id}/start`:启动插件
- `POST /api/v1/plugins/{plugin_id}/stop`:停止插件
### 5.2 请求/响应格式
- **请求格式**JSON
- **响应格式**JSON包含状态码和数据
## 6. 部署要求
- **环境变量**:支持通过环境变量配置服务参数
- **日志管理**:集成结构化日志
- **监控指标**:提供 Prometheus 指标接口
- **错误处理**:完善的错误处理和异常捕获
- **插件隔离**:支持插件的独立部署和隔离
## 7. 集成方式
- **Dify 插件注册**:按照 Dify 插件规范注册
- **Webhook 配置**:支持 Dify 平台的 Webhook 回调
- **事件订阅**:订阅 Dify 平台的事件
- **插件发现**:自动发现和注册新插件
## 8. 开发计划
### 8.1 阶段一:项目初始化
- 创建 FastAPI 项目结构
- 配置依赖管理
- 实现插件管理系统
### 8.2 阶段二:核心功能开发
- 实现插件加载机制
- 开发插件接口规范
- 实现数据处理功能
- 集成外部 API
### 8.3 阶段三:测试与部署
- 编写单元测试
- 集成测试
- 容器化部署
- 插件示例开发
## 9. 技术要求
- **代码质量**:遵循 PEP 8 编码规范
- **文档**:完善的 API 文档
- **性能**:优化响应速度和资源占用
- **安全**:实现安全的认证和授权机制
- **可扩展性**:支持插件的动态添加和移除
## 10. 交付物
- **源代码**:完整的项目代码
- **部署文档**:详细的部署步骤
- **API 文档**:自动生成的 API 文档
- **测试报告**:测试结果和覆盖率报告
- **插件开发指南**:插件开发和注册指南

View File

@@ -1,42 +0,0 @@
# ================================================
# Urban Lifeline - Docker 环境变量配置
# 复制此文件为 .env 并修改配置
# ================================================
# 镜像版本
IMAGE_VERSION=latest
# 数据存储根目录
DATA_ROOT=./volumes
LOG_ROOT=./volumes/logs
# ====================== MySQL 配置 ======================
MYSQL_HOST=host.docker.internal
MYSQL_PORT=3306
MYSQL_USER=root
MYSQL_PASSWORD=123456
# ====================== Nacos 配置 ======================
NACOS_SERVER_ADDR=nacos:8848
NACOS_NAMESPACE=
NACOS_AUTH_TOKEN=ZlRkR2ZxR3BvZ1F0a3JxY2V6RUx2cUh1Rkx6V1ZQbE9kUVd1R1VOcWFFS2t3dG5hS0E9PQ==
# ====================== MinIO 配置 ======================
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=minioadmin123
MINIO_ENDPOINT=http://minio:9000
# ====================== Jitsi 配置 ======================
JITSI_PUBLIC_URL=https://org.xyzh.yslg.jitsi
JVB_HOST_ADDRESS=192.168.0.253
JWT_APP_ID=urbanLifeline
JWT_APP_SECRET=urbanLifeline-jitsi-secret-key-2025-production-safe-hs256
# ====================== Dify 配置 ======================
DIFY_API_URL=http://dify-api:5001
# ====================== Spring 配置 ======================
SPRING_PROFILES_ACTIVE=prod
# ====================== API 配置 ======================
API_BASE_URL=http://gateway:8080

View File

@@ -1,318 +0,0 @@
# ================================================
# Urban Lifeline - Docker 构建管理 Makefile
# ================================================
# 使用方法:
# make help - 显示帮助信息
# make build-all - 构建所有镜像
# make build-serv - 构建所有后端服务镜像
# make build-gateway - 构建单个服务镜像
# make save-all - 导出所有镜像
# make up - 启动所有服务
# make down - 停止所有服务
# ================================================
# 项目根目录docker 目录的上级)
PROJECT_ROOT := $(shell cd .. && pwd)
DOCKER_DIR := $(shell pwd)
# 镜像版本
IMAGE_VERSION ?= $(shell date +%Y%m%d_%H%M%S)
IMAGE_REGISTRY ?=
# 服务列表
SERV_SERVICES := gateway system auth file ai workcase
WEB_SERVICES := platform workcase-web
# 服务端口映射
PORT_gateway := 8080
PORT_system := 8082
PORT_auth := 8081
PORT_file := 8084
PORT_ai := 8090
PORT_workcase := 8088
# 输出目录
BUILD_OUTPUT := $(DOCKER_DIR)/output
# 颜色定义
COLOR_RESET := \033[0m
COLOR_GREEN := \033[0;32m
COLOR_YELLOW := \033[0;33m
COLOR_BLUE := \033[0;34m
COLOR_RED := \033[0;31m
# ================================================
# 帮助信息
# ================================================
.PHONY: help
help:
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
@echo "$(COLOR_BLUE) Urban Lifeline Docker 构建管理$(COLOR_RESET)"
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
@echo ""
@echo "$(COLOR_GREEN)构建命令:$(COLOR_RESET)"
@echo " make build-base - 构建基础镜像"
@echo " make build-all - 构建所有镜像"
@echo " make build-serv - 构建所有后端服务镜像"
@echo " make build-web - 构建前端镜像"
@echo " make build-<service> - 构建单个服务 (gateway/system/auth/file/ai/workcase)"
@echo ""
@echo "$(COLOR_GREEN)编译命令:$(COLOR_RESET)"
@echo " make compile-serv - 编译所有后端服务"
@echo " make compile-<service> - 编译单个后端服务"
@echo " make compile-web - 编译所有前端项目"
@echo ""
@echo "$(COLOR_GREEN)导出命令:$(COLOR_RESET)"
@echo " make save-all - 导出所有镜像到 tar 文件"
@echo " make save-serv - 导出所有后端镜像"
@echo " make save-<service> - 导出单个服务镜像"
@echo ""
@echo "$(COLOR_GREEN)运行命令:$(COLOR_RESET)"
@echo " make up - 启动所有服务"
@echo " make up-infra - 启动基础设施 (nacos/minio/jitsi)"
@echo " make up-serv - 启动后端服务"
@echo " make up-web - 启动前端服务"
@echo " make down - 停止所有服务"
@echo " make logs - 查看所有日志"
@echo " make status - 查看服务状态"
@echo ""
@echo "$(COLOR_GREEN)清理命令:$(COLOR_RESET)"
@echo " make clean - 清理构建产物"
@echo " make clean-images - 清理 Docker 镜像"
@echo ""
@echo "$(COLOR_YELLOW)当前版本: $(IMAGE_VERSION)$(COLOR_RESET)"
@echo ""
# ================================================
# 初始化
# ================================================
.PHONY: init
init:
@mkdir -p $(BUILD_OUTPUT)
@mkdir -p $(DOCKER_DIR)/volumes/logs
# ================================================
# 编译后端服务
# ================================================
.PHONY: compile-serv
compile-serv:
@echo "$(COLOR_YELLOW)编译所有后端服务...$(COLOR_RESET)"
@cd $(PROJECT_ROOT)/urbanLifelineServ && mvn clean package -DskipTests
@echo "$(COLOR_GREEN)✓ 后端服务编译完成$(COLOR_RESET)"
.PHONY: $(addprefix compile-,$(SERV_SERVICES))
$(addprefix compile-,$(SERV_SERVICES)): compile-%:
@echo "$(COLOR_YELLOW)编译 $* 服务...$(COLOR_RESET)"
@cd $(PROJECT_ROOT)/urbanLifelineServ && mvn clean package -DskipTests -pl $* -am
@echo "$(COLOR_GREEN)$* 服务编译完成$(COLOR_RESET)"
# ================================================
# 编译前端项目
# ================================================
.PHONY: compile-web
compile-web:
@echo "$(COLOR_YELLOW)编译前端项目...$(COLOR_RESET)"
@cd $(PROJECT_ROOT)/urbanLifelineWeb && pnpm install && pnpm run build:all
@echo "$(COLOR_GREEN)✓ 前端项目编译完成$(COLOR_RESET)"
.PHONY: compile-platform
compile-platform:
@echo "$(COLOR_YELLOW)编译 platform 项目...$(COLOR_RESET)"
@cd $(PROJECT_ROOT)/urbanLifelineWeb/packages/platform && pnpm run build
@echo "$(COLOR_GREEN)✓ platform 编译完成$(COLOR_RESET)"
.PHONY: compile-workcase
compile-workcase-web:
@echo "$(COLOR_YELLOW)编译 workcase 项目...$(COLOR_RESET)"
@cd $(PROJECT_ROOT)/urbanLifelineWeb/packages/workcase && pnpm run build
@echo "$(COLOR_GREEN)✓ workcase 编译完成$(COLOR_RESET)"
# ================================================
# 构建 Docker 镜像
# ================================================
.PHONY: build-base
build-base:
@echo "$(COLOR_YELLOW)构建基础镜像...$(COLOR_RESET)"
@cd $(PROJECT_ROOT) && docker build \
-t urban-lifeline-base-serv:$(IMAGE_VERSION) \
-t urban-lifeline-base-serv:latest \
-f docker/urbanLifeline/serv/Dockerfile.base .
@echo "$(COLOR_GREEN)✓ 基础镜像构建完成$(COLOR_RESET)"
.PHONY: build-all
build-all: build-base build-serv build-web
@echo "$(COLOR_GREEN)✓ 所有镜像构建完成$(COLOR_RESET)"
.PHONY: build-serv
build-serv: $(addprefix build-,$(SERV_SERVICES))
@echo "$(COLOR_GREEN)✓ 所有后端服务镜像构建完成$(COLOR_RESET)"
# 构建单个后端服务镜像
.PHONY: $(addprefix build-,$(SERV_SERVICES))
$(addprefix build-,$(SERV_SERVICES)): build-%:
@echo "$(COLOR_YELLOW)构建 $* 服务镜像...$(COLOR_RESET)"
@if [ ! -f "$(PROJECT_ROOT)/urbanLifelineServ/$*/target/$*-*.jar" ] && \
[ ! -f "$(PROJECT_ROOT)/urbanLifelineServ/$*/target/*.jar" ]; then \
echo "$(COLOR_RED)✗ JAR 包不存在,请先执行 make compile-$*$(COLOR_RESET)"; \
exit 1; \
fi
@cd $(PROJECT_ROOT) && docker build \
--build-arg SERVICE_NAME=$* \
--build-arg SERVICE_PORT=$(PORT_$*) \
-t urban-lifeline-$*:$(IMAGE_VERSION) \
-t urban-lifeline-$*:latest \
-f docker/urbanLifeline/serv/Dockerfile.template .
@echo "$(COLOR_GREEN)$* 镜像构建完成: urban-lifeline-$*:$(IMAGE_VERSION)$(COLOR_RESET)"
.PHONY: build-web
build-web: build-platform build-workcase-web
@echo "$(COLOR_GREEN)✓ 所有前端镜像构建完成$(COLOR_RESET)"
.PHONY: build-platform
build-platform:
@echo "$(COLOR_YELLOW)构建 platform 镜像...$(COLOR_RESET)"
@if [ ! -d "$(PROJECT_ROOT)/urbanLifelineWeb/packages/platform/dist" ]; then \
echo "$(COLOR_RED)✗ platform dist 不存在,请先执行 make compile-web$(COLOR_RESET)"; \
exit 1; \
fi
@cd $(PROJECT_ROOT) && docker build \
--build-arg WEB_NAME=platform \
-t urban-lifeline-platform:$(IMAGE_VERSION) \
-t urban-lifeline-platform:latest \
-f docker/urbanLifeline/web/Dockerfile .
@echo "$(COLOR_GREEN)✓ platform 镜像构建完成$(COLOR_RESET)"
.PHONY: build-workcase-web
build-workcase-web:
@echo "$(COLOR_YELLOW)构建 workcase-web 镜像...$(COLOR_RESET)"
@if [ ! -d "$(PROJECT_ROOT)/urbanLifelineWeb/packages/workcase/dist" ]; then \
echo "$(COLOR_RED)✗ workcase dist 不存在,请先执行 make compile-web$(COLOR_RESET)"; \
exit 1; \
fi
@cd $(PROJECT_ROOT) && docker build \
--build-arg WEB_NAME=workcase \
-t urban-lifeline-workcase-web:$(IMAGE_VERSION) \
-t urban-lifeline-workcase-web:latest \
-f docker/urbanLifeline/web/Dockerfile .
@echo "$(COLOR_GREEN)✓ workcase-web 镜像构建完成$(COLOR_RESET)"
# ================================================
# 导出镜像
# ================================================
.PHONY: save-all
save-all: init save-serv save-web
@echo "$(COLOR_GREEN)✓ 所有镜像导出完成$(COLOR_RESET)"
@echo "输出目录: $(BUILD_OUTPUT)"
@ls -lh $(BUILD_OUTPUT)/*.tar 2>/dev/null || true
.PHONY: save-serv
save-serv: $(addprefix save-,$(SERV_SERVICES))
@echo "$(COLOR_GREEN)✓ 所有后端镜像导出完成$(COLOR_RESET)"
.PHONY: $(addprefix save-,$(SERV_SERVICES))
$(addprefix save-,$(SERV_SERVICES)): save-%: init
@echo "$(COLOR_YELLOW)导出 $* 镜像...$(COLOR_RESET)"
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-$*_$(IMAGE_VERSION).tar \
urban-lifeline-$*:$(IMAGE_VERSION)
@echo "$(COLOR_GREEN)$* 镜像已导出: $(BUILD_OUTPUT)/urban-lifeline-$*_$(IMAGE_VERSION).tar$(COLOR_RESET)"
.PHONY: save-web
save-web: save-platform save-workcase-web
@echo "$(COLOR_GREEN)✓ 所有前端镜像导出完成$(COLOR_RESET)"
.PHONY: save-platform
save-platform: init
@echo "$(COLOR_YELLOW)导出 platform 镜像...$(COLOR_RESET)"
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-platform_$(IMAGE_VERSION).tar \
urban-lifeline-platform:$(IMAGE_VERSION)
@echo "$(COLOR_GREEN)✓ platform 镜像已导出$(COLOR_RESET)"
.PHONY: save-workcase-web
save-workcase-web: init
@echo "$(COLOR_YELLOW)导出 workcase-web 镜像...$(COLOR_RESET)"
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-workcase-web_$(IMAGE_VERSION).tar \
urban-lifeline-workcase-web:$(IMAGE_VERSION)
@echo "$(COLOR_GREEN)✓ workcase-web 镜像已导出$(COLOR_RESET)"
.PHONY: save-base
save-base: init
@echo "$(COLOR_YELLOW)导出基础镜像...$(COLOR_RESET)"
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-base-serv_$(IMAGE_VERSION).tar \
urban-lifeline-base-serv:$(IMAGE_VERSION)
@echo "$(COLOR_GREEN)✓ 基础镜像已导出$(COLOR_RESET)"
# ================================================
# Docker Compose 操作
# ================================================
.PHONY: up
up:
@echo "$(COLOR_YELLOW)启动所有服务...$(COLOR_RESET)"
@docker compose --profile all up -d
@echo "$(COLOR_GREEN)✓ 所有服务已启动$(COLOR_RESET)"
.PHONY: up-infra
up-infra:
@echo "$(COLOR_YELLOW)启动基础设施...$(COLOR_RESET)"
@docker compose --profile infra up -d
@echo "$(COLOR_GREEN)✓ 基础设施已启动$(COLOR_RESET)"
.PHONY: up-serv
up-serv:
@echo "$(COLOR_YELLOW)启动后端服务...$(COLOR_RESET)"
@docker compose --profile serv up -d
@echo "$(COLOR_GREEN)✓ 后端服务已启动$(COLOR_RESET)"
.PHONY: up-web
up-web:
@echo "$(COLOR_YELLOW)前端已集成到 Nginx (infra),请使用 make up-infra$(COLOR_RESET)"
.PHONY: down
down:
@echo "$(COLOR_YELLOW)停止所有服务...$(COLOR_RESET)"
@docker compose --profile all down
@echo "$(COLOR_GREEN)✓ 所有服务已停止$(COLOR_RESET)"
.PHONY: restart
restart: down up
@echo "$(COLOR_GREEN)✓ 所有服务已重启$(COLOR_RESET)"
.PHONY: logs
logs:
@docker compose logs -f
.PHONY: status
status:
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
@echo "$(COLOR_BLUE) 服务运行状态$(COLOR_RESET)"
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
@docker compose ps
# ================================================
# 清理
# ================================================
.PHONY: clean
clean:
@echo "$(COLOR_YELLOW)清理构建产物...$(COLOR_RESET)"
@rm -rf $(BUILD_OUTPUT)
@echo "$(COLOR_GREEN)✓ 清理完成$(COLOR_RESET)"
.PHONY: clean-images
clean-images:
@echo "$(COLOR_YELLOW)清理 Docker 镜像...$(COLOR_RESET)"
@docker images | grep "urban-lifeline" | awk '{print $$3}' | xargs -r docker rmi -f
@echo "$(COLOR_GREEN)✓ 镜像清理完成$(COLOR_RESET)"
# ================================================
# 快捷命令
# ================================================
.PHONY: full-build
full-build: compile-serv compile-web build-all
@echo "$(COLOR_GREEN)✓ 完整构建完成$(COLOR_RESET)"
.PHONY: full-deploy
full-deploy: full-build save-all
@echo "$(COLOR_GREEN)✓ 完整部署包准备完成$(COLOR_RESET)"
@echo "输出目录: $(BUILD_OUTPUT)"
# 默认目标
.DEFAULT_GOAL := help

View File

@@ -1,188 +0,0 @@
# Urban Lifeline Docker 部署指南
## 目录结构
```
docker/
├── docker-compose.yml # 总控 compose 文件
├── Makefile # 构建管理脚本
├── .env.example # 环境变量模板
├── README.md # 本文档
├── infra/ # Level 1: 基础设施
│ └── docker-compose.yml # Nacos, MinIO, Jitsi Meet
├── urbanLifeline/
│ ├── serv/ # Level 2: 后端服务
│ │ ├── docker-compose.yml
│ │ ├── Dockerfile.base # 基础镜像
│ │ ├── Dockerfile.template # 服务镜像模板
│ │ └── start.sh # 启动脚本
│ │
│ └── web/ # Level 3: 前端服务
│ ├── docker-compose.yml
│ ├── Dockerfile
│ └── nginx/ # Nginx 配置
│ ├── nginx.conf
│ └── conf.d/
├── dify/ # Dify AI 平台(独立管理)
│ └── docker-compose.yaml
├── volumes/ # 数据卷目录
│ ├── nacos/
│ ├── minio/
│ ├── jitsi/
│ └── logs/
└── output/ # 镜像导出目录
```
## 快速开始
### 1. 准备环境
```bash
# 复制环境变量配置
cp .env.example .env
# 编辑配置(根据实际环境修改)
vim .env
```
### 2. 编译项目
```bash
# 编译所有后端服务
make compile-serv
# 编译所有前端项目
make compile-web
# 或者一次性编译所有
make full-build
```
### 3. 构建镜像
```bash
# 构建基础镜像(首次需要)
make build-base
# 构建所有镜像
make build-all
# 或者单独构建
make build-gateway
make build-system
make build-web
```
### 4. 启动服务
```bash
# 启动所有服务
make up
# 或者分层启动
make up-infra # 先启动基础设施
make up-serv # 再启动后端服务
make up-web # 最后启动前端
```
### 5. 查看状态
```bash
# 查看服务状态
make status
# 查看日志
make logs
# 查看特定服务日志
docker compose logs -f gateway
```
## 服务层级
### Level 1: 基础设施 (infra)
- **Nginx**: 反向代理 + 前端静态资源 (80/443)
- **Nacos**: 服务注册与配置中心 (8848)
- **MinIO**: 对象存储服务 (9000/9001)
- **Jitsi Meet**: 视频会议服务 (8280)
### Level 2: 后端服务 (serv)
启动顺序: gateway → system → file → auth → ai → workcase
| 服务 | 端口 | 说明 |
|------|------|------|
| gateway | 8080 | API 网关 |
| system | 8082 | 系统服务 |
| file | 8084 | 文件服务 |
| auth | 8081 | 认证服务 |
| ai | 8090 | AI 服务 |
| workcase | 8088 | 工单服务 |
### Level 3: 前端应用 (web)
前端静态资源已打包到 Nginx 镜像中,由 infra 层的 Nginx 服务提供:
- /platform - 管理平台
- /workcase - 工单系统
## 常用命令
```bash
# 构建
make build-all # 构建所有镜像
make build-gateway # 构建单个服务
# 运行
make up # 启动所有
make up-infra # 只启动基础设施
make down # 停止所有
# 导出
make save-all # 导出所有镜像
make save-gateway # 导出单个镜像
# 清理
make clean # 清理构建产物
make clean-images # 清理 Docker 镜像
```
## 单独管理子服务
每个子目录都有独立的 docker-compose.yml可以单独管理
```bash
# 只管理基础设施
cd infra
docker compose up -d nacos
docker compose logs -f nacos
# 只管理后端服务
cd urbanLifeline/serv
docker compose up -d gateway
docker compose restart system
```
## 镜像导出与部署
```bash
# 导出所有镜像
make save-all
# 镜像文件在 output/ 目录
ls -la output/
# 在目标服务器加载镜像
docker load -i urban-lifeline-gateway_20251228_120000.tar
docker load -i urban-lifeline-web_20251228_120000.tar
```
## 注意事项
1. **首次部署**需要先构建基础镜像: `make build-base`
2. **MySQL** 需要在宿主机或单独容器中运行,配置 `MYSQL_HOST`
3. **Dify** 有独立的 docker-compose`dify/` 目录单独管理
4. 修改 `.env` 后需要重启服务才能生效
5. 生产环境建议配置 SSL 证书

View File

@@ -1,381 +0,0 @@
#!/bin/bash
##############################################
# Urban Lifeline - 构建和打包脚本
#
# 功能:
# 1. 编译后端/前端项目
# 2. 构建 Docker 镜像
# 3. 导出镜像到 tar 文件
#
# 使用方法:
# ./build.sh [target] [options]
#
# 目标(target)
# base - 构建基础镜像
# serv - 构建所有后端服务镜像
# web - 构建前端镜像
# all - 构建所有镜像(默认)
# gateway - 构建单个后端服务
# system/auth/file/ai/workcase - 同上
#
# 选项(options)
# compile - 先编译代码再构建镜像
# save - 构建后导出镜像到 tar 文件
# save=VERSION - 导出指定版本的镜像
#
# 示例:
# ./build.sh all compile save # 编译+构建+导出所有
# ./build.sh gateway compile # 编译+构建 gateway
# ./build.sh serv save # 构建+导出所有后端
# ./build.sh web compile save # 编译+构建+导出前端
##############################################
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_step() { echo -e "${BLUE}[STEP]${NC} $1"; }
# 项目路径
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
PROJECT_ROOT=$(cd "${SCRIPT_DIR}/.." && pwd)
DOCKER_DIR="${SCRIPT_DIR}"
BUILD_OUTPUT="${DOCKER_DIR}/output"
IMAGE_VERSION=$(date +%Y%m%d_%H%M%S)
# 服务列表
SERV_SERVICES="gateway system auth file ai workcase"
# 服务端口映射
declare -A SERVICE_PORTS=(
["gateway"]=8080
["system"]=8082
["auth"]=8081
["file"]=8084
["ai"]=8090
["workcase"]=8088
)
# 解析参数
BUILD_TARGET="${1:-all}"
shift || true
DO_COMPILE=false
DO_SAVE=false
SAVE_VERSION=""
for arg in "$@"; do
case $arg in
compile)
DO_COMPILE=true
;;
save)
DO_SAVE=true
;;
save=*)
DO_SAVE=true
SAVE_VERSION="${arg#save=}"
;;
esac
done
# 确定保存版本
if [ "${DO_SAVE}" = true ] && [ -z "${SAVE_VERSION}" ]; then
SAVE_VERSION="${IMAGE_VERSION}"
fi
echo "=========================================="
echo "Urban Lifeline - 构建脚本"
echo "=========================================="
log_info "构建目标: ${BUILD_TARGET}"
log_info "构建版本: ${IMAGE_VERSION}"
log_info "编译代码: ${DO_COMPILE}"
log_info "保存镜像: ${DO_SAVE}"
[ "${DO_SAVE}" = true ] && log_info "保存版本: ${SAVE_VERSION}"
echo "=========================================="
echo ""
mkdir -p "${BUILD_OUTPUT}"
# ================================================
# 编译函数
# ================================================
compile_serv_all() {
log_step "编译所有后端服务"
cd "${PROJECT_ROOT}/urbanLifelineServ"
mvn clean package -DskipTests
log_info "✅ 后端服务编译完成"
}
compile_serv_single() {
local service=$1
log_step "编译 ${service} 服务"
cd "${PROJECT_ROOT}/urbanLifelineServ"
mvn clean package -DskipTests -pl ${service} -am
log_info "${service} 服务编译完成"
}
compile_web() {
log_step "编译前端项目"
cd "${PROJECT_ROOT}/urbanLifelineWeb"
# 检查 pnpm
if command -v pnpm &> /dev/null; then
pnpm install
pnpm run build:all 2>/dev/null || {
# 如果没有 build:all 脚本,分别构建
log_info "分别构建各前端项目..."
cd packages/shared && pnpm run build && cd ../..
cd packages/platform && pnpm run build && cd ../..
cd packages/workcase && pnpm run build && cd ../..
}
else
npm install
npm run build:all 2>/dev/null || {
cd packages/shared && npm run build && cd ../..
cd packages/platform && npm run build && cd ../..
cd packages/workcase && npm run build && cd ../..
}
fi
log_info "✅ 前端项目编译完成"
}
# ================================================
# 构建函数
# ================================================
build_base() {
log_step "构建基础镜像"
cd "${PROJECT_ROOT}"
docker build \
-t urban-lifeline-base-serv:${IMAGE_VERSION} \
-t urban-lifeline-base-serv:latest \
-f docker/urbanLifeline/serv/Dockerfile.base .
log_info "✅ 基础镜像构建完成: urban-lifeline-base-serv:${IMAGE_VERSION}"
}
build_serv_single() {
local service=$1
local port=${SERVICE_PORTS[$service]}
log_step "构建 ${service} 服务镜像"
# 检查 JAR 包
local jar_file=$(find "${PROJECT_ROOT}/urbanLifelineServ/${service}/target" -name "*.jar" -type f 2>/dev/null | head -1)
if [ -z "$jar_file" ]; then
log_error "JAR 包不存在,请先编译: ./build.sh ${service} compile"
exit 1
fi
cd "${PROJECT_ROOT}"
docker build \
--build-arg SERVICE_NAME=${service} \
--build-arg SERVICE_PORT=${port} \
-t urban-lifeline-${service}:${IMAGE_VERSION} \
-t urban-lifeline-${service}:latest \
-f docker/urbanLifeline/serv/Dockerfile.template .
log_info "${service} 镜像构建完成: urban-lifeline-${service}:${IMAGE_VERSION}"
}
build_serv_all() {
for service in ${SERV_SERVICES}; do
build_serv_single ${service}
done
log_info "✅ 所有后端服务镜像构建完成"
}
build_web() {
build_platform
build_workcase_web
log_info "✅ 所有前端镜像构建完成"
}
build_platform() {
log_step "构建 platform 镜像"
if [ ! -d "${PROJECT_ROOT}/urbanLifelineWeb/packages/platform/dist" ]; then
log_error "platform dist 不存在,请先编译: ./build.sh platform compile"
exit 1
fi
cd "${PROJECT_ROOT}"
docker build \
--build-arg WEB_NAME=platform \
-t urban-lifeline-platform:${IMAGE_VERSION} \
-t urban-lifeline-platform:latest \
-f docker/urbanLifeline/web/Dockerfile .
log_info "✅ platform 镜像构建完成: urban-lifeline-platform:${IMAGE_VERSION}"
}
build_workcase_web() {
log_step "构建 workcase-web 镜像"
if [ ! -d "${PROJECT_ROOT}/urbanLifelineWeb/packages/workcase/dist" ]; then
log_error "workcase dist 不存在,请先编译: ./build.sh workcase-web compile"
exit 1
fi
cd "${PROJECT_ROOT}"
docker build \
--build-arg WEB_NAME=workcase \
-t urban-lifeline-workcase-web:${IMAGE_VERSION} \
-t urban-lifeline-workcase-web:latest \
-f docker/urbanLifeline/web/Dockerfile .
log_info "✅ workcase-web 镜像构建完成: urban-lifeline-workcase-web:${IMAGE_VERSION}"
}
# ================================================
# 导出函数
# ================================================
save_image() {
local image_name=$1
local version=${SAVE_VERSION}
local output_file="${BUILD_OUTPUT}/${image_name}_${version}.tar"
log_info "导出镜像: ${image_name}:${version}"
if ! docker images | grep -q "${image_name}.*${version}"; then
log_error "镜像不存在: ${image_name}:${version}"
return 1
fi
docker save -o "${output_file}" ${image_name}:${version}
local size=$(du -h "${output_file}" | cut -f1)
log_info "✅ 镜像已导出: ${output_file} (${size})"
}
save_serv_all() {
for service in ${SERV_SERVICES}; do
save_image "urban-lifeline-${service}"
done
}
# ================================================
# 主流程
# ================================================
main() {
# 编译
if [ "${DO_COMPILE}" = true ]; then
case ${BUILD_TARGET} in
base)
# 基础镜像不需要编译
;;
serv)
compile_serv_all
;;
web)
compile_web
;;
all)
compile_serv_all
compile_web
;;
gateway|system|auth|file|ai|workcase)
compile_serv_single ${BUILD_TARGET}
;;
*)
log_error "未知目标: ${BUILD_TARGET}"
exit 1
;;
esac
fi
# 构建镜像
case ${BUILD_TARGET} in
base)
build_base
;;
serv)
build_serv_all
;;
web)
build_web
;;
platform)
build_platform
;;
workcase-web)
build_workcase_web
;;
all)
# 检查基础镜像
if ! docker images | grep -q "urban-lifeline-base-serv.*latest"; then
log_warn "基础镜像不存在,先构建基础镜像"
build_base
fi
build_serv_all
build_web
;;
gateway|system|auth|file|ai|workcase)
build_serv_single ${BUILD_TARGET}
;;
*)
log_error "未知目标: ${BUILD_TARGET}"
echo ""
echo "可用目标: base, serv, web, all, gateway, system, auth, file, ai, workcase, platform, workcase-web"
exit 1
;;
esac
# 导出镜像
if [ "${DO_SAVE}" = true ]; then
case ${BUILD_TARGET} in
base)
save_image "urban-lifeline-base-serv"
;;
serv)
save_serv_all
;;
web)
save_image "urban-lifeline-platform"
save_image "urban-lifeline-workcase-web"
;;
platform)
save_image "urban-lifeline-platform"
;;
workcase-web)
save_image "urban-lifeline-workcase-web"
;;
all)
save_image "urban-lifeline-base-serv"
save_serv_all
save_image "urban-lifeline-platform"
save_image "urban-lifeline-workcase-web"
;;
gateway|system|auth|file|ai|workcase)
save_image "urban-lifeline-${BUILD_TARGET}"
;;
esac
echo ""
log_info "导出文件列表:"
ls -lh "${BUILD_OUTPUT}"/*.tar 2>/dev/null || true
fi
# 显示镜像列表
echo ""
log_info "Docker 镜像列表:"
docker images | grep "urban-lifeline" | head -20
echo ""
echo "=========================================="
log_info "✅ 构建完成!"
echo "=========================================="
}
main

File diff suppressed because it is too large Load Diff

View File

@@ -1,119 +0,0 @@
## README for docker Deployment
Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
### What's Updated
- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\
For more information, refer `docker/certbot/README.md`.
- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
> What is `.env`? </br> </br>
> The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
### How to Deploy Dify with `docker-compose.yaml`
1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
1. **Environment Setup**:
- Navigate to the `docker` directory.
- Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
1. **Running the Services**:
- Execute `docker compose up` from the `docker` directory to start the services.
- To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
1. **SSL Certificate Setup**:
- Refer `docker/certbot/README.md` to set up SSL certificates using Certbot.
1. **OpenTelemetry Collector Setup**:
- Change `ENABLE_OTEL` to `true` in `.env`.
- Configure `OTLP_BASE_ENDPOINT` properly.
### How to Deploy Middleware for Developing Dify
1. **Middleware Setup**:
- Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
- Navigate to the `docker` directory.
- Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
1. **Running Middleware Services**:
- Navigate to the `docker` directory.
- Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance.
> Compose automatically loads `COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate` from `middleware.env`, so no extra `--profile` flags are needed. Adjust variables in `middleware.env` if you want a different combination of services.
### Migration for Existing Users
For users migrating from the `docker-legacy` setup:
1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
1. **Transfer Customizations**:
- If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
1. **Data Migration**:
- Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
### Overview of `.env`
#### Key Modules and Customization
- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontend operate.
#### Other notable variables
The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
1. **Common Variables**:
- `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
- `APP_WEB_URL`: Frontend application URL.
- `FILES_URL`: Base URL for file downloads and previews.
1. **Server Configuration**:
- `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
- `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
1. **Database Configuration**:
- `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
1. **Redis Configuration**:
- `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
1. **Celery Configuration**:
- `CELERY_BROKER_URL`: Configuration for Celery message broker.
1. **Storage Configuration**:
- `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
1. **Vector Database Configuration**:
- `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
- Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`.
1. **CORS Configuration**:
- `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
1. **OpenTelemetry Configuration**:
- `ENABLE_OTEL`: Enable OpenTelemetry collector in api.
- `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter.
1. **Other Service-Specific Environment Variables**:
- Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
### Additional Information
- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.

View File

@@ -1,76 +0,0 @@
# Launching new servers with SSL certificates
## Short description
docker compose certbot configurations with Backward compatibility (without certbot container).\
Use `docker compose --profile certbot up` to use this features.
## The simplest way for launching new servers with SSL certificates
1. Get letsencrypt certs\
set `.env` values
```properties
NGINX_SSL_CERT_FILENAME=fullchain.pem
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
NGINX_ENABLE_CERTBOT_CHALLENGE=true
CERTBOT_DOMAIN=your_domain.com
CERTBOT_EMAIL=example@your_domain.com
```
execute command:
```shell
docker network prune
docker compose --profile certbot up --force-recreate -d
```
then after the containers launched:
```shell
docker compose exec -it certbot /bin/sh /update-cert.sh
```
1. Edit `.env` file and `docker compose --profile certbot up` again.\
set `.env` value additionally
```properties
NGINX_HTTPS_ENABLED=true
```
execute command:
```shell
docker compose --profile certbot up -d --no-deps --force-recreate nginx
```
Then you can access your serve with HTTPS.\
[https://your_domain.com](https://your_domain.com)
## SSL certificates renewal
For SSL certificates renewal, execute commands below:
```shell
docker compose exec -it certbot /bin/sh /update-cert.sh
docker compose exec nginx nginx -s reload
```
## Options for certbot
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
```properties
CERTBOT_OPTIONS=--dry-run
```
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
```shell
docker compose --profile certbot up -d --no-deps --force-recreate certbot
docker compose exec -it certbot /bin/sh /update-cert.sh
```
Then, reload the nginx container if necessary.
```shell
docker compose exec nginx nginx -s reload
```
## For legacy servers
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
```shell
docker compose up -d
```

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
printf '%s\n' "Docker entrypoint script is running"
printf '%s\n' "\nChecking specific environment variables:"
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
printf '%s\n' "\nChecking mounted directories:"
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
if [ -d "$dir" ]; then
printf '%s\n' "$dir exists. Contents:"
ls -la "$dir"
else
printf '%s\n' "$dir does not exist."
fi
done
printf '%s\n' "\nGenerating update-cert.sh from template"
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
/update-cert.template.txt > /update-cert.sh
chmod +x /update-cert.sh
printf '%s\n' "\nExecuting command:" "$@"
exec "$@"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -e
DOMAIN="${CERTBOT_DOMAIN}"
EMAIL="${CERTBOT_EMAIL}"
OPTIONS="${CERTBOT_OPTIONS}"
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
# Check if the certificate already exists
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
echo "Certificate exists. Attempting to renew..."
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
else
echo "Certificate does not exist. Obtaining a new certificate..."
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
fi
echo "Certificate operation successful"
# Note: Nginx reload should be handled outside this container
echo "Please ensure to reload Nginx to apply any certificate changes."

View File

@@ -1,4 +0,0 @@
FROM couchbase/server:latest AS stage_base
# FROM couchbase:latest AS stage_base
COPY init-cbserver.sh /opt/couchbase/init/
RUN chmod +x /opt/couchbase/init/init-cbserver.sh

View File

@@ -1,44 +0,0 @@
#!/bin/bash
# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
/entrypoint.sh couchbase-server &
# track if setup is complete so we don't try to setup again
FILE=/opt/couchbase/init/setupComplete.txt
if ! [ -f "$FILE" ]; then
# used to automatically create the cluster based on environment variables
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
sleep 20s
/opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
--cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
--cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
--services data,index,query,fts \
--cluster-ramsize $COUCHBASE_RAM_SIZE \
--cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
--cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
--cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
--index-storage-setting default
sleep 2s
# used to auto create the bucket based on environment variables
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
/opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
--username $COUCHBASE_ADMINISTRATOR_USERNAME \
--password $COUCHBASE_ADMINISTRATOR_PASSWORD \
--bucket $COUCHBASE_BUCKET \
--bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
--bucket-type couchbase
# create file so we know that the cluster is setup and don't run the setup again
touch $FILE
fi
# docker compose will stop the container from running unless we do this
# known issue and workaround
tail -f /dev/null

View File

@@ -1,907 +0,0 @@
x-shared-env: &shared-api-worker-env
services:
# Init container to fix permissions
init_permissions:
image: busybox:latest
command:
- sh
- -c
- |
FLAG_FILE="/app/api/storage/.init_permissions"
if [ -f "$${FLAG_FILE}" ]; then
echo "Permissions already initialized. Exiting."
exit 0
fi
echo "Initializing permissions for /app/api/storage"
chown -R 1001:1001 /app/api/storage && touch "$${FLAG_FILE}"
echo "Permissions initialized. Exiting."
volumes:
- ./volumes/app/storage:/app/api/storage
restart: "no"
# API service
api:
image: langgenius/dify-api:1.11.1
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
depends_on:
init_permissions:
condition: service_completed_successfully
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
oceanbase:
condition: service_healthy
required: false
seekdb:
condition: service_healthy
required: false
redis:
condition: service_started
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.11.1
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing all queues.
MODE: worker
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
depends_on:
init_permissions:
condition: service_completed_successfully
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
oceanbase:
condition: service_healthy
required: false
seekdb:
condition: service_healthy
required: false
redis:
condition: service_started
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.11.1
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
MODE: beat
depends_on:
init_permissions:
condition: service_completed_successfully
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
oceanbase:
condition: service_healthy
required: false
seekdb:
condition: service_healthy
required: false
redis:
condition: service_started
networks:
- ssrf_proxy_network
- default
# Frontend web application.
web:
image: langgenius/dify-web:1.11.1
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
ALLOW_EMBED: ${ALLOW_EMBED:-false}
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
PM2_INSTANCES: ${PM2_INSTANCES:-2}
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
# The PostgreSQL database.
db_postgres:
image: postgres:15-alpine
profiles:
- postgresql
restart: always
environment:
POSTGRES_USER: ${DB_USERNAME:-postgres}
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
POSTGRES_DB: ${DB_DATABASE:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test:
[
"CMD",
"pg_isready",
"-h",
"db_postgres",
"-U",
"${DB_USERNAME:-postgres}",
"-d",
"${DB_DATABASE:-dify}",
]
interval: 1s
timeout: 3s
retries: 60
# The mysql database.
db_mysql:
image: mysql:8.0
profiles:
- mysql
restart: always
environment:
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
MYSQL_DATABASE: ${DB_DATABASE:-dify}
command: >
--max_connections=1000
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
volumes:
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
healthcheck:
test:
[
"CMD",
"mysqladmin",
"ping",
"-u",
"root",
"-p${DB_PASSWORD:-difyai123456}",
]
interval: 1s
timeout: 3s
retries: 30
# The redis cache.
redis:
image: redis:6-alpine
restart: always
environment:
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
volumes:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test:
[
"CMD-SHELL",
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
]
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.12
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
- ./volumes/sandbox/conf:/conf
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
networks:
- ssrf_proxy_network
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.5.1-local
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
AWS_REGION: ${PLUGIN_AWS_REGION:-}
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
ports:
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
volumes:
- ./volumes/plugin_daemon:/app/storage
depends_on:
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
oceanbase:
condition: service_healthy
required: false
seekdb:
condition: service_healthy
required: false
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
networks:
- ssrf_proxy_network
- default
# Certbot service
# use `docker-compose --profile certbot up` to start the certbot service.
certbot:
image: certbot/certbot
profiles:
- certbot
volumes:
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
- ./volumes/certbot/logs:/var/log/letsencrypt
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
- ./certbot/update-cert.template.txt:/update-cert.template.txt
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
environment:
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: ["/docker-entrypoint.sh"]
command: ["tail", "-f", "/dev/null"]
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
nginx:
image: nginx:latest
restart: always
volumes:
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
- ./nginx/conf.d:/etc/nginx/conf.d
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_PORT: ${NGINX_PORT:-80}
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
# and modify the env vars below in .env if HTTPS_ENABLED is true.
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
depends_on:
- api
- web
ports:
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.27.0
profiles:
- weaviate
restart: always
volumes:
# Mount the Weaviate data directory to the con tainer.
- ./volumes/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
# OceanBase vector database
oceanbase:
image: oceanbase/oceanbase-ce:4.3.5-lts
container_name: oceanbase
profiles:
- oceanbase
restart: always
volumes:
- ./volumes/oceanbase/data:/root/ob
- ./volumes/oceanbase/conf:/root/.obd/cluster
- ./volumes/oceanbase/init.d:/root/boot/init.d
environment:
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
OB_SERVER_IP: 127.0.0.1
MODE: mini
LANG: en_US.UTF-8
ports:
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
healthcheck:
test:
[
"CMD-SHELL",
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
]
interval: 10s
retries: 30
start_period: 30s
timeout: 10s
# seekdb vector database
seekdb:
image: oceanbase/seekdb:latest
container_name: seekdb
profiles:
- seekdb
restart: always
volumes:
- ./volumes/seekdb:/var/lib/oceanbase
environment:
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
REPORTER: dify-ai-seekdb
ports:
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
healthcheck:
test:
[
"CMD-SHELL",
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
]
interval: 5s
retries: 60
timeout: 5s
# Qdrant vector store.
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
qdrant:
image: langgenius/qdrant:v1.8.3
profiles:
- qdrant
restart: always
volumes:
- ./volumes/qdrant:/qdrant/storage
environment:
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
# The Couchbase vector store.
couchbase-server:
build: ./couchbase-server
profiles:
- couchbase
restart: always
environment:
- CLUSTER_NAME=dify_search
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
- COUCHBASE_BUCKET_RAMSIZE=512
- COUCHBASE_RAM_SIZE=2048
- COUCHBASE_EVENTING_RAM_SIZE=512
- COUCHBASE_INDEX_RAM_SIZE=512
- COUCHBASE_FTS_RAM_SIZE=1024
hostname: couchbase-server
container_name: couchbase-server
working_dir: /opt/couchbase
stdin_open: true
tty: true
entrypoint: [""]
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
healthcheck:
# ensure bucket was created before proceeding
test:
[
"CMD-SHELL",
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
]
interval: 10s
retries: 10
start_period: 30s
timeout: 10s
# The pgvector vector database.
pgvector:
image: pgvector/pgvector:pg16
profiles:
- pgvector
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
# pg_bigm module for full text search
PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
entrypoint: ["/docker-entrypoint.sh"]
healthcheck:
test: ["CMD", "pg_isready"]
interval: 1s
timeout: 3s
retries: 30
# get image from https://www.vastdata.com.cn/
vastbase:
image: vastdata/vastbase-vector
profiles:
- vastbase
restart: always
environment:
- VB_DBCOMPATIBILITY=PG
- VB_DB=dify
- VB_USERNAME=dify
- VB_PASSWORD=Difyai123456
ports:
- "5434:5432"
volumes:
- ./vastbase/lic:/home/vastbase/vastbase/lic
- ./vastbase/data:/home/vastbase/data
- ./vastbase/backup:/home/vastbase/backup
- ./vastbase/backup_log:/home/vastbase/backup_log
healthcheck:
test: ["CMD", "pg_isready"]
interval: 1s
timeout: 3s
retries: 30
# pgvecto-rs vector store
pgvecto-rs:
image: tensorchord/pgvecto-rs:pg16-v0.3.0
profiles:
- pgvecto-rs
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "pg_isready"]
interval: 1s
timeout: 3s
retries: 30
# Chroma vector database
chroma:
image: ghcr.io/chroma-core/chroma:0.5.20
profiles:
- chroma
restart: always
volumes:
- ./volumes/chroma:/chroma/chroma
environment:
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
# Oracle vector database
oracle:
image: container-registry.oracle.com/database/free:latest
profiles:
- oracle
restart: always
volumes:
- source: oradata
type: volume
target: /opt/oracle/oradata
- ./startupscripts:/opt/oracle/scripts/startup
environment:
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
# Milvus vector database services
etcd:
container_name: milvus-etcd
image: quay.io/coreos/etcd:v3.5.5
profiles:
- milvus
environment:
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
volumes:
- ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
minio:
container_name: milvus-minio
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
profiles:
- milvus
environment:
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
volumes:
- ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
milvus-standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.6.3
profiles:
- milvus
command: ["milvus", "run", "standalone"]
environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
volumes:
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
depends_on:
- etcd
- minio
ports:
- 19530:19530
- 9091:9091
networks:
- milvus
# Opensearch vector database
opensearch:
container_name: opensearch
image: opensearchproject/opensearch:latest
profiles:
- opensearch
environment:
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
ulimits:
memlock:
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
nofile:
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
volumes:
- ./volumes/opensearch/data:/usr/share/opensearch/data
networks:
- opensearch-net
opensearch-dashboards:
container_name: opensearch-dashboards
image: opensearchproject/opensearch-dashboards:latest
profiles:
- opensearch
environment:
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
volumes:
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
networks:
- opensearch-net
depends_on:
- opensearch
# opengauss vector database.
opengauss:
image: opengauss/opengauss:7.0.0-RC1
profiles:
- opengauss
privileged: true
restart: always
environment:
GS_USERNAME: ${OPENGAUSS_USER:-postgres}
GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
GS_PORT: ${OPENGAUSS_PORT:-6600}
GS_DB: ${OPENGAUSS_DATABASE:-dify}
volumes:
- ./volumes/opengauss/data:/var/lib/opengauss/data
healthcheck:
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
interval: 10s
timeout: 10s
retries: 10
ports:
- ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
# MyScale vector database
myscale:
container_name: myscale
image: myscale/myscaledb:1.6.4
profiles:
- myscale
restart: always
tty: true
volumes:
- ./volumes/myscale/data:/var/lib/clickhouse
- ./volumes/myscale/log:/var/log/clickhouse-server
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
ports:
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
# Matrixone vector store.
matrixone:
hostname: matrixone
image: matrixorigin/matrixone:2.1.1
profiles:
- matrixone
restart: always
volumes:
- ./volumes/matrixone/data:/mo-data
ports:
- ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001}
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
container_name: elasticsearch
profiles:
- elasticsearch
- elasticsearch-ja
restart: always
volumes:
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- dify_es01_data:/usr/share/elasticsearch/data
environment:
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
VECTOR_STORE: ${VECTOR_STORE:-}
cluster.name: dify-es-cluster
node.name: dify-es0
discovery.type: single-node
xpack.license.self_generated.type: basic
xpack.security.enabled: "true"
xpack.security.enrollment.enabled: "false"
xpack.security.http.ssl.enabled: "false"
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
deploy:
resources:
limits:
memory: 2g
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
healthcheck:
test:
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
interval: 30s
timeout: 10s
retries: 50
# https://www.elastic.co/guide/en/kibana/current/docker.html
# https://www.elastic.co/guide/en/kibana/current/settings.html
kibana:
image: docker.elastic.co/kibana/kibana:8.14.3
container_name: kibana
profiles:
- elasticsearch
depends_on:
- elasticsearch
restart: always
environment:
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
XPACK_SECURITY_ENABLED: "true"
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
XPACK_FLEET_ISAIRGAPPED: "true"
I18N_LOCALE: zh-CN
SERVER_PORT: "5601"
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports:
- ${KIBANA_PORT:-5601}:5601
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
interval: 30s
timeout: 10s
retries: 3
# unstructured .
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
unstructured:
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
profiles:
- unstructured
restart: always
volumes:
- ./volumes/unstructured:/app/data
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true
milvus:
driver: bridge
opensearch-net:
driver: bridge
internal: true
volumes:
oradata:
dify_es01_data:

View File

@@ -1,250 +0,0 @@
services:
# The postgres database.
db_postgres:
image: postgres:15-alpine
profiles:
- ""
- postgresql
restart: always
env_file:
- ./middleware.env
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
POSTGRES_DB: ${DB_DATABASE:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
volumes:
- ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
ports:
- "${EXPOSE_POSTGRES_PORT:-5432}:5432"
healthcheck:
test:
[
"CMD",
"pg_isready",
"-h",
"db_postgres",
"-U",
"${DB_USERNAME:-postgres}",
"-d",
"${DB_DATABASE:-dify}",
]
interval: 1s
timeout: 3s
retries: 30
db_mysql:
image: mysql:8.0
profiles:
- mysql
restart: always
env_file:
- ./middleware.env
environment:
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
MYSQL_DATABASE: ${DB_DATABASE:-dify}
command: >
--max_connections=1000
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
volumes:
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
ports:
- "${EXPOSE_MYSQL_PORT:-3306}:3306"
healthcheck:
test:
[
"CMD",
"mysqladmin",
"ping",
"-u",
"root",
"-p${DB_PASSWORD:-difyai123456}",
]
interval: 1s
timeout: 3s
retries: 30
# The redis cache.
redis:
image: redis:6-alpine
restart: always
env_file:
- ./middleware.env
environment:
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
volumes:
# Mount the redis data directory to the container.
- ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
ports:
- "${EXPOSE_REDIS_PORT:-6379}:6379"
healthcheck:
test:
[
"CMD-SHELL",
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
]
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.12
restart: always
env_file:
- ./middleware.env
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
- ./volumes/sandbox/conf:/conf
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
networks:
- ssrf_proxy_network
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.5.1-local
restart: always
env_file:
- ./middleware.env
environment:
# Use the shared environment variables.
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://host.docker.internal:5001}
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
AWS_REGION: ${PLUGIN_AWS_REGION:-}
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
THIRD_PARTY_SIGNATURE_VERIFICATION_ENABLED: true
THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS: /app/keys/publickey.pem
FORCE_VERIFYING_SIGNATURE: false
ports:
- "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}"
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
volumes:
- ./volumes/plugin_daemon:/app/storage
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
env_file:
- ./middleware.env
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
ports:
- "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
- "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
networks:
- ssrf_proxy_network
- default
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.27.0
profiles:
- ""
- weaviate
restart: always
volumes:
# Mount the Weaviate data directory to the container.
- ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
env_file:
- ./middleware.env
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
ports:
- "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
- "${EXPOSE_WEAVIATE_GRPC_PORT:-50051}:50051"
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true

Binary file not shown.

Before

Width:  |  Height:  |  Size: 170 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +0,0 @@
#!/bin/bash
set -e
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
# Check if the ICU tokenizer plugin is installed
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
printf '%s\n' "Installing the ICU tokenizer plugin"
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
printf '%s\n' "Failed to install the ICU tokenizer plugin"
exit 1
fi
fi
# Check if the Japanese language analyzer plugin is installed
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
printf '%s\n' "Installing the Japanese language analyzer plugin"
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
exit 1
fi
fi
fi
# Run the original entrypoint script
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh

View File

@@ -1,112 +0,0 @@
#!/usr/bin/env python3
import os
import re
import sys
def parse_env_example(file_path):
"""
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
"""
env_vars = {}
with open(file_path, "r") as f:
for line_number, line in enumerate(f, 1):
line = line.strip()
# Ignore empty lines and comments
if not line or line.startswith("#"):
continue
# Use regex to parse KEY=VALUE
match = re.match(r"^([^=]+)=(.*)$", line)
if match:
key = match.group(1).strip()
value = match.group(2).strip()
# Remove possible quotes around the value
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1]
env_vars[key] = value
else:
print(f"Warning: Unable to parse line {line_number}: {line}")
return env_vars
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
"""
Generates a shared environment variables block as a YAML string.
"""
lines = [f"x-shared-env: &{anchor_name}"]
for key, default in env_vars.items():
if key == "COMPOSE_PROFILES":
continue
# If default value is empty, use ${KEY:-}
if default == "":
lines.append(f" {key}: ${{{key}:-}}")
else:
# If default value contains special characters, wrap it in quotes
if re.search(r"[:\s]", default):
default = f"{default}"
lines.append(f" {key}: ${{{key}:-{default}}}")
return "\n".join(lines)
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
"""
Inserts the shared environment variables block and header comments into the template file,
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
"""
with open(template_path, "r") as f:
template_content = f.read()
# Remove existing x-shared-env: &shared-api-worker-env lines
template_content = re.sub(
r"^x-shared-env: &shared-api-worker-env\s*\n?",
"",
template_content,
flags=re.MULTILINE,
)
# Prepare the final content with header comments and shared env block
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
with open(output_path, "w") as f:
f.write(final_content)
print(f"Generated {output_path}")
def main():
env_example_path = ".env.example"
template_path = "docker-compose-template.yaml"
output_path = "docker-compose.yaml"
anchor_name = "shared-api-worker-env" # Can be modified as needed
# Define header comments to be added at the top of docker-compose.yaml
header_comments = (
"# ==================================================================\n"
"# WARNING: This file is auto-generated by generate_docker_compose\n"
"# Do not modify this file directly. Instead, update the .env.example\n"
"# or docker-compose-template.yaml and regenerate this file.\n"
"# ==================================================================\n"
)
# Check if required files exist
for path in [env_example_path, template_path]:
if not os.path.isfile(path):
print(f"Error: File {path} does not exist.")
sys.exit(1)
# Parse .env.example file
env_vars = parse_env_example(env_example_path)
if not env_vars:
print("Warning: No environment variables found in .env.example.")
# Generate shared environment variables block
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
# Insert shared environment variables block and header comments into the template
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
if __name__ == "__main__":
main()

View File

@@ -1,215 +0,0 @@
# ------------------------------
# Environment Variables for db Service
# ------------------------------
# Database Configuration
# Database type, supported values are `postgresql` and `mysql`
DB_TYPE=postgresql
# For MySQL, only `root` user is supported for now
DB_USERNAME=postgres
DB_PASSWORD=difyai123456
DB_HOST=db_postgres
DB_PORT=5432
DB_DATABASE=dify
# PostgreSQL Configuration
# postgres data directory
PGDATA=/var/lib/postgresql/data/pgdata
PGDATA_HOST_VOLUME=./volumes/db/data
# Maximum number of connections to the database
# Default is 100
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
POSTGRES_MAX_CONNECTIONS=100
# Sets the amount of shared memory used for postgres's shared buffers.
# Default is 128MB
# Recommended value: 25% of available memory
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
POSTGRES_SHARED_BUFFERS=128MB
# Sets the amount of memory used by each database worker for working space.
# Default is 4MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
POSTGRES_WORK_MEM=4MB
# Sets the amount of memory reserved for maintenance activities.
# Default is 64MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
POSTGRES_MAINTENANCE_WORK_MEM=64MB
# Sets the planner's assumption about the effective cache size.
# Default is 4096MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
# Sets the maximum allowed duration of any statement before termination.
# Default is 0 (no timeout).
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT
# A value of 0 prevents the server from timing out statements.
POSTGRES_STATEMENT_TIMEOUT=0
# Sets the maximum allowed duration of any idle in-transaction session before termination.
# Default is 0 (no timeout).
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT
# A value of 0 prevents the server from terminating idle sessions.
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
# MySQL Configuration
# MySQL data directory host volume
MYSQL_HOST_VOLUME=./volumes/mysql/data
# MySQL Performance Configuration
# Maximum number of connections to MySQL
# Default is 1000
MYSQL_MAX_CONNECTIONS=1000
# InnoDB buffer pool size
# Default is 512M
# Recommended value: 70-80% of available memory for dedicated MySQL server
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size
MYSQL_INNODB_BUFFER_POOL_SIZE=512M
# InnoDB log file size
# Default is 128M
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size
MYSQL_INNODB_LOG_FILE_SIZE=128M
# InnoDB flush log at transaction commit
# Default is 2 (flush to OS cache, sync every second)
# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache)
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2
# -----------------------------
# Environment Variables for redis Service
# -----------------------------
REDIS_HOST_VOLUME=./volumes/redis/data
REDIS_PASSWORD=difyai123456
# ------------------------------
# Environment Variables for sandbox Service
# ------------------------------
SANDBOX_API_KEY=dify-sandbox
SANDBOX_GIN_MODE=release
SANDBOX_WORKER_TIMEOUT=15
SANDBOX_ENABLE_NETWORK=true
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
SANDBOX_PORT=8194
# ------------------------------
# Environment Variables for ssrf_proxy Service
# ------------------------------
SSRF_HTTP_PORT=3128
SSRF_COREDUMP_DIR=/var/spool/squid
SSRF_REVERSE_PROXY_PORT=8194
SSRF_SANDBOX_HOST=sandbox
# ------------------------------
# Environment Variables for weaviate Service
# ------------------------------
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
WEAVIATE_CLUSTER_HOSTNAME=node1
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
WEAVIATE_DISABLE_TELEMETRY=false
WEAVIATE_HOST_VOLUME=./volumes/weaviate
# ------------------------------
# Docker Compose profile configuration
# ------------------------------
# Loaded automatically when running `docker compose --env-file middleware.env ...`.
# Controls which DB/vector services start, so no extra `--profile` flag is needed.
COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate
# ------------------------------
# Docker Compose Service Expose Host Port Configurations
# ------------------------------
EXPOSE_POSTGRES_PORT=5432
EXPOSE_MYSQL_PORT=3306
EXPOSE_REDIS_PORT=6379
EXPOSE_SANDBOX_PORT=8194
EXPOSE_SSRF_PROXY_PORT=3128
EXPOSE_WEAVIATE_PORT=8080
# ------------------------------
# Plugin Daemon Configuration
# ------------------------------
DB_PLUGIN_DATABASE=dify_plugin
EXPOSE_PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
PLUGIN_DAEMON_URL=http://host.docker.internal:5002
PLUGIN_MAX_PACKAGE_SIZE=52428800
PLUGIN_PPROF_ENABLED=false
PLUGIN_WORKING_PATH=/app/storage/cwd
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
PLUGIN_DEBUGGING_PORT=5003
PLUGIN_DEBUGGING_HOST=0.0.0.0
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
PLUGIN_DIFY_INNER_API_URL=http://host.docker.internal:5001
MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai
FORCE_VERIFYING_SIGNATURE=true
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
PLUGIN_MAX_EXECUTION_TIMEOUT=600
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
PIP_MIRROR_URL=
# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example
# Plugin storage type, local aws_s3 tencent_cos azure_blob
PLUGIN_STORAGE_TYPE=local
PLUGIN_STORAGE_LOCAL_ROOT=/app/storage
PLUGIN_WORKING_PATH=/app/storage/cwd
PLUGIN_INSTALLED_PATH=plugin
PLUGIN_PACKAGE_CACHE_PATH=plugin_packages
PLUGIN_MEDIA_CACHE_PATH=assets
# Plugin oss bucket
PLUGIN_STORAGE_OSS_BUCKET=
# Plugin oss s3 credentials
PLUGIN_S3_USE_AWS_MANAGED_IAM=false
PLUGIN_S3_USE_AWS=false
PLUGIN_S3_ENDPOINT=
PLUGIN_S3_USE_PATH_STYLE=false
PLUGIN_AWS_ACCESS_KEY=
PLUGIN_AWS_SECRET_KEY=
PLUGIN_AWS_REGION=
# Plugin oss azure blob
PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME=
PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING=
# Plugin oss tencent cos
PLUGIN_TENCENT_COS_SECRET_KEY=
PLUGIN_TENCENT_COS_SECRET_ID=
PLUGIN_TENCENT_COS_REGION=
# Plugin oss aliyun oss
PLUGIN_ALIYUN_OSS_REGION=
PLUGIN_ALIYUN_OSS_ENDPOINT=
PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID=
PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET=
PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4
PLUGIN_ALIYUN_OSS_PATH=
# Plugin oss volcengine tos
PLUGIN_VOLCENGINE_TOS_ENDPOINT=
PLUGIN_VOLCENGINE_TOS_ACCESS_KEY=
PLUGIN_VOLCENGINE_TOS_SECRET_KEY=
PLUGIN_VOLCENGINE_TOS_REGION=

View File

@@ -1,58 +0,0 @@
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
server {
listen 80;
server_name _;
location /console/api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /v1 {
proxy_pass http://api:5001;
include proxy.conf;
}
location /files {
proxy_pass http://api:5001;
include proxy.conf;
}
location /explore {
proxy_pass http://web:3000;
include proxy.conf;
}
location /e/ {
proxy_pass http://plugin_daemon:5002;
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
include proxy.conf;
}
location / {
proxy_pass http://web:3000;
include proxy.conf;
}
location /mcp {
proxy_pass http://api:5001;
include proxy.conf;
}
location /triggers {
proxy_pass http://api:5001;
include proxy.conf;
}
# placeholder for acme challenge location
# placeholder for https config defined in https.conf.template
}

View File

@@ -1,58 +0,0 @@
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
server {
listen ${NGINX_PORT};
server_name ${NGINX_SERVER_NAME};
location /console/api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /v1 {
proxy_pass http://api:5001;
include proxy.conf;
}
location /files {
proxy_pass http://api:5001;
include proxy.conf;
}
location /explore {
proxy_pass http://web:3000;
include proxy.conf;
}
location /e/ {
proxy_pass http://plugin_daemon:5002;
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
include proxy.conf;
}
location / {
proxy_pass http://web:3000;
include proxy.conf;
}
location /mcp {
proxy_pass http://api:5001;
include proxy.conf;
}
location /triggers {
proxy_pass http://api:5001;
include proxy.conf;
}
# placeholder for acme challenge location
${ACME_CHALLENGE_LOCATION}
# placeholder for https config defined in https.conf.template
${HTTPS_CONFIG}
}

View File

@@ -1,42 +0,0 @@
#!/bin/bash
HTTPS_CONFIG=''
if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
# Check if the certificate and key files for the specified domain exist
if [ -n "${CERTBOT_DOMAIN}" ] && \
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
else
SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
fi
export SSL_CERTIFICATE_PATH
export SSL_CERTIFICATE_KEY_PATH
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
export HTTPS_CONFIG
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
fi
export HTTPS_CONFIG
if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
else
ACME_CHALLENGE_LOCATION=''
fi
export ACME_CHALLENGE_LOCATION
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
# Start Nginx using the default entrypoint
exec nginx -g 'daemon off;'

View File

@@ -1,9 +0,0 @@
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
listen ${NGINX_SSL_PORT} ssl;
ssl_certificate ${SSL_CERTIFICATE_PATH};
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
ssl_protocols ${NGINX_SSL_PROTOCOLS};
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;

View File

@@ -1,34 +0,0 @@
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
user nginx;
worker_processes ${NGINX_WORKER_PROCESSES};
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
#gzip on;
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
include /etc/nginx/conf.d/*.conf;
}

View File

@@ -1,11 +0,0 @@
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};

View File

@@ -1,24 +0,0 @@
#!/bin/bash
PG_MAJOR=16
if [ "${PG_BIGM}" = "true" ]; then
# install pg_bigm
apt-get update
apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR}
curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz
tar xf v${PG_BIGM_VERSION}.tar.gz
cd pg_bigm-${PG_BIGM_VERSION} || exit 1
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install
cd - || exit 1
rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION}
# enable pg_bigm
sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf
fi
# Run the original entrypoint script
exec /usr/local/bin/docker-entrypoint.sh postgres

View File

@@ -1,42 +0,0 @@
#!/bin/bash
# Modified based on Squid OCI image entrypoint
# This entrypoint aims to forward the squid logs to stdout to assist users of
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
# access the service logs.
# Moreover, it invokes the squid binary, leaving all the desired parameters to
# be provided by the "command" passed to the spawned container. If no command
# is provided by the user, the default behavior (as per the CMD statement in
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
# systemd unit.
# [1] The default configuration is changed in the Dockerfile to allow local
# network connections. See the Dockerfile for further information.
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
fi
tail -F /var/log/squid/access.log 2>/dev/null &
tail -F /var/log/squid/error.log 2>/dev/null &
tail -F /var/log/squid/store.log 2>/dev/null &
tail -F /var/log/squid/cache.log 2>/dev/null &
# Replace environment variables in the template and output to the squid.conf
echo "[ENTRYPOINT] replacing environment variables in the template"
awk '{
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
var = substr($0, RSTART+2, RLENGTH-3)
val = ENVIRON[var]
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
}
print
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
/usr/sbin/squid -Nz
echo "[ENTRYPOINT] starting squid"
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1

View File

@@ -1,56 +0,0 @@
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
acl allowed_domains dstdomain .marketplace.dify.ai
http_access allow allowed_domains
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
http_access allow localhost
include /etc/squid/conf.d/*.conf
http_access deny all
################################## Proxy Server ################################
http_port ${HTTP_PORT}
coredump_dir ${COREDUMP_DIR}
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern . 0 20% 4320
# cache_dir ufs /var/spool/squid 100 16 256
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
################################## Reverse Proxy To Sandbox ################################
http_port ${REVERSE_PROXY_PORT} accel vhost
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
acl src_all src all
http_access allow src_all
# Unless the option's size is increased, an error will occur when uploading more than two files.
client_request_buffer_max_size 100 MB

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
DB_INITIALIZED="/opt/oracle/oradata/dbinit"
#[ -f ${DB_INITIALIZED} ] && exit
#touch ${DB_INITIALIZED}
if [ -f ${DB_INITIALIZED} ]; then
echo 'File exists. Standards for have been Init'
exit
else
echo 'File does not exist. Standards for first time Start up this DB'
"$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
touch ${DB_INITIALIZED}
fi

View File

@@ -1,10 +0,0 @@
show pdbs;
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
alter session set container= freepdb1;
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
grant DB_DEVELOPER_ROLE to dify;
BEGIN
CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER');
END;
/

View File

@@ -1,4 +0,0 @@
# PD Configuration File reference:
# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
[replication]
max-replicas = 1

View File

@@ -1,13 +0,0 @@
# TiFlash tiflash-learner.toml Configuration File reference:
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
log-file = "/logs/tiflash_tikv.log"
[server]
engine-addr = "tiflash:4030"
addr = "0.0.0.0:20280"
advertise-addr = "tiflash:20280"
status-addr = "tiflash:20292"
[storage]
data-dir = "/data/flash"

View File

@@ -1,19 +0,0 @@
# TiFlash tiflash.toml Configuration File reference:
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
listen_host = "0.0.0.0"
path = "/data"
[flash]
tidb_status_addr = "tidb:10080"
service_addr = "tiflash:4030"
[flash.proxy]
config = "/tiflash-learner.toml"
[logger]
errorlog = "/logs/tiflash_error.log"
log = "/logs/tiflash.log"
[raft]
pd_addr = "pd0:2379"

View File

@@ -1,63 +0,0 @@
services:
pd0:
image: pingcap/pd:v8.5.1
# ports:
# - "2379"
volumes:
- ./config/pd.toml:/pd.toml:ro
- ./volumes/data:/data
- ./volumes/logs:/logs
command:
- --name=pd0
- --client-urls=http://0.0.0.0:2379
- --peer-urls=http://0.0.0.0:2380
- --advertise-client-urls=http://pd0:2379
- --advertise-peer-urls=http://pd0:2380
- --initial-cluster=pd0=http://pd0:2380
- --data-dir=/data/pd
- --config=/pd.toml
- --log-file=/logs/pd.log
restart: on-failure
tikv:
image: pingcap/tikv:v8.5.1
volumes:
- ./volumes/data:/data
- ./volumes/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv:20160
- --status-addr=tikv:20180
- --data-dir=/data/tikv
- --pd=pd0:2379
- --log-file=/logs/tikv.log
depends_on:
- "pd0"
restart: on-failure
tidb:
image: pingcap/tidb:v8.5.1
# ports:
# - "4000:4000"
volumes:
- ./volumes/logs:/logs
command:
- --advertise-address=tidb
- --store=tikv
- --path=pd0:2379
- --log-file=/logs/tidb.log
depends_on:
- "tikv"
restart: on-failure
tiflash:
image: pingcap/tiflash:v8.5.1
volumes:
- ./config/tiflash.toml:/tiflash.toml:ro
- ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
- ./volumes/data:/data
- ./volumes/logs:/logs
command:
- server
- --config-file=/tiflash.toml
depends_on:
- "tikv"
- "tidb"
restart: on-failure

View File

@@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAzviset7ubYZgOjpGZTLgI01lkJ1b54EZCctU+KKd3VJ9B/su
cCTZ1ql7Fs5B4shZUu2MQOlO2twDC3gxW8ENj+yyU+r8toukxar2Iie3Qsp5tcJH
mWsq7Imo5RmUGLLwDV8AU0xRc4bRD5YGwiwZcRPODpgQBpnsKKoPNyvCeudEPr2S
gs63aQCDyv1Ny9jLTjvvHr2k531M5wChzT5miu0r0R+Hjrah0XnbRlQUubqHJOLH
FFp+XJowd0KAN26fjpEbFY2FwRa5POlbc1e2PEu+s3WJa0Iz5GQbhcOJ7ppA9Rft
k2CEAoNHfkbZcAxsySa++8+1Nq4hLFJubDYnWwIDAQABAoIBABEJJ5acKj86SUoM
U+ZZMAPCh3Kbe94WootLgOdPk0jiqkdG5dqgiIxXujmf/oz+dVkTE8agAuKW1hVS
mQAhaCQJlTVnvnypX9r5UyBv6Rtb88qsfseM/6UlQOaimq2xxM/aRaTq6jq6b74l
ria7Xy4PNZhpQcsZTkvxO3wMuQmelkkuhqbQF2fpNTTIZfbEehJatOTWIcWWkqRo
mZkKd4pHluaXDvczFFo31jSJ4jzrJS11Tx1s21rM4Iq2W+3B6nEVfvHaFqWMHve7
+22ZWUUiMJbIpr7hy0bSBjY78HdR/EcQqPqT7uEawFaft8ybMP5AkuLEG9I6VtWq
gfPX/ckCgYEA3XbHuDP6Yzlc9tXapQEulM82B3mDtKAX72AqYGEpvZInrge6Fjwq
SdMGQHP/VsLq7t78g9negYQjT6J0qJ2V6s/1iVyr6uU3IQ06Q7+8xp6AQPSFShJk
o2nnODuvVMwwwH94ga1ntfPdzhYKfJ2E9Cbbn+NUZB/uxnsAxTTGGNcCgYEA7z9S
/M8eUfCOEBEC9EGuto8/hZ7Zmm0fuAdKx0zZzSVsKxeHUQoQJ1YQfnjKdlxNv1FI
K9g7wcMEvkUv/CMx7pQ/R4eyvbGlxGRAy1jUhgx1hxifBeYKd5j5BMAQ8n3/uRLR
3czW7i8Xzy0Z97kfy9ZHllbnbZQvYceDQY2ngR0CgYEA272Wz4PrhnaNcnGg/6Yb
lRXA80uzzg4nkaGminj+FJYnfvs1vgg2oYecJWmzSmAfAaMJhL4xk+sLqmF0RTYP
Yay0YXYtMuX6fHYGgOCHq7/rXAgDFQsJBkf3X4mBH693FBmm9WycgiKVy319LEaT
fDnuI0hKHQq+PfwgOIxznoMCgYBGj9WpDGkz34/2ux207p5Z55vji/s/6YleXIA4
kbj3IV9qwjC9vQsQnviUZInKKOUhVRbFH6xYNG10tLE3WyYB6KVVMneMLRXn+22E
y/k6Y/hR1Pgp5DQYG/zt4rvc2T6P1jYhhaWqcCdEilfLUUVIeDV66Lyu+/KRkydq
MR5DHQKBgHn3HNAiHfLVef7+unoYu8ak6uqj5V74IJZdvcNZ4jUz7onk2o3h7pj0
9xzWoakzv1ypK6rVBYr+2U4Mn728kz90rR9VV05YwbMQBcBH0gmBfGdbUjI9PH/+
iUoxHK0G55ArzmZFtRGUpmEFak4xTpM48Wn5VApe9kMt3yX+FBbD
-----END RSA PRIVATE KEY-----

View File

@@ -1,4 +0,0 @@
http://localhost/platform/admin/platform
http://localhost:5173/datacenter

View File

@@ -1 +0,0 @@
15

Some files were not shown because too many files have changed in this diff Show More