Compare commits
52 Commits
difyPlugin
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 12d9294f3d | |||
| 8ab6107f25 | |||
| 0bf7361672 | |||
| 1b02067d50 | |||
| 6921ac7e66 | |||
| 0972d504ab | |||
| f4b7337210 | |||
| bfd06dd8f6 | |||
| 0ae5343b11 | |||
| 8073a95f74 | |||
| f948362d1b | |||
| 54599f28d1 | |||
| 16e714730d | |||
| b0ddabd819 | |||
| b1e7aeca43 | |||
| 419b6ffec5 | |||
| 12d43e912c | |||
| 8e86f244c6 | |||
| 5190a0cc9c | |||
| 4784971d97 | |||
| 2e159ec318 | |||
| 23f08dec09 | |||
| 5bad18dd33 | |||
| 86bd6613b4 | |||
| c7c18d4dc3 | |||
| 2e84282424 | |||
| 72cea2935d | |||
| ec61f134a8 | |||
| 75877db4f9 | |||
| 576c7e9ed2 | |||
| b7378867c0 | |||
| 2d19ee784b | |||
| 5d54ac1cd4 | |||
| 97724c8c8d | |||
| 73badc175d | |||
| 89bc8bf1d4 | |||
| 19026c1b30 | |||
| e15305df85 | |||
| 4b6d7d04ec | |||
| 05c76fa3ec | |||
| b53faca120 | |||
| eb15706ccc | |||
| 4e373e6d2c | |||
| a07daa715a | |||
| 87baa347b7 | |||
| 48da0a4c81 | |||
| c0cbb059fe | |||
| 8cb8692b84 | |||
| 1bb1dba4d6 | |||
| 4f0eeede37 | |||
| 1ef1b32f5f | |||
| 154ac7f61c |
46
.dockerignore
Normal file
46
.dockerignore
Normal file
@@ -0,0 +1,46 @@
|
||||
# Docker 构建时排除的目录和文件
|
||||
|
||||
# 排除数据卷目录(PostgreSQL 等容器创建的数据)
|
||||
**/volumes/
|
||||
**/data/
|
||||
docker/**/volumes/
|
||||
|
||||
# 排除日志
|
||||
**/logs/
|
||||
*.log
|
||||
|
||||
# 排除临时文件
|
||||
*.tmp
|
||||
*.swp
|
||||
.tmp/
|
||||
|
||||
# 排除 git
|
||||
.git/
|
||||
**/.git/
|
||||
|
||||
# 排除 IDE 配置
|
||||
.idea/
|
||||
.vscode/
|
||||
**/.vscode/
|
||||
|
||||
# 排除环境文件
|
||||
.env
|
||||
.env.local
|
||||
**/.env.local
|
||||
|
||||
# 排除 node_modules(前端构建时会重新安装)
|
||||
**/node_modules/
|
||||
|
||||
# 排除构建产物(保留 JAR 文件)
|
||||
**/build/
|
||||
# 排除 target 下的非 JAR 文件
|
||||
**/target/classes/
|
||||
**/target/generated-sources/
|
||||
**/target/generated-test-sources/
|
||||
**/target/maven-archiver/
|
||||
**/target/maven-status/
|
||||
**/target/test-classes/
|
||||
**/target/*.original
|
||||
|
||||
# 排除导出的镜像
|
||||
docker/urbanLifeline/images/
|
||||
@@ -1,16 +0,0 @@
|
||||
# 应用配置
|
||||
APP_NAME=DifyPlugin
|
||||
APP_VERSION=1.0.0
|
||||
DEBUG=false
|
||||
|
||||
# API配置
|
||||
API_V1_PREFIX=/api/v1
|
||||
|
||||
# 跨域配置
|
||||
CORS_ORIGINS=["*"]
|
||||
|
||||
# Redis配置
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
REDIS_DB=0
|
||||
27
difyPlugin/.gitignore
vendored
27
difyPlugin/.gitignore
vendored
@@ -1,27 +0,0 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
venv/
|
||||
.venv/
|
||||
ENV/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# 环境配置
|
||||
.env
|
||||
|
||||
# 日志
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# 测试
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
@@ -1,38 +0,0 @@
|
||||
# DifyPlugin
|
||||
|
||||
Dify插件服务 - 基于FastAPI构建
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 安装依赖
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### 运行服务
|
||||
|
||||
```bash
|
||||
uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### API文档
|
||||
|
||||
- Swagger UI: http://localhost:8000/docs
|
||||
- ReDoc: http://localhost:8000/redoc
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
difyPlugin/
|
||||
├── app/
|
||||
│ ├── main.py # 应用入口
|
||||
│ ├── config.py # 配置管理
|
||||
│ ├── api/v1/ # API路由
|
||||
│ ├── schemas/ # Pydantic数据模型
|
||||
│ ├── services/ # 业务逻辑
|
||||
│ ├── core/ # 核心功能
|
||||
│ └── utils/ # 工具函数
|
||||
├── requirements.txt
|
||||
└── README.md
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
# DifyPlugin FastAPI Application
|
||||
@@ -1,16 +0,0 @@
|
||||
# API模块
|
||||
from fastapi import APIRouter
|
||||
|
||||
from app.api.workcase import router as workcase_router
|
||||
from app.api.bidding import router as bidding_router
|
||||
from app.api.test import router as test_router
|
||||
|
||||
# 创建主路由器
|
||||
router = APIRouter()
|
||||
|
||||
# 注册所有子路由
|
||||
router.include_router(workcase_router, prefix="/workcase", tags=["工单相关服务"])
|
||||
router.include_router(bidding_router, prefix="/bidding", tags=["招标相关服务"])
|
||||
router.include_router(test_router, prefix="/test", tags=["招标相关服务"])
|
||||
|
||||
__all__ = ["router"]
|
||||
@@ -1,38 +0,0 @@
|
||||
"""文件读取相关接口"""
|
||||
from fastapi import APIRouter
|
||||
|
||||
from app.schemas import ResultDomain
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/read",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="读取文件",
|
||||
description="读取指定路径的文件内容"
|
||||
)
|
||||
async def read_file(file_path: str) -> ResultDomain[dict]:
|
||||
"""
|
||||
读取文件内容
|
||||
|
||||
- **file_path**: 文件路径
|
||||
"""
|
||||
# TODO: 实现文件读取逻辑
|
||||
return ResultDomain.success(message="读取成功", data={"content": ""})
|
||||
|
||||
|
||||
@router.post(
|
||||
"/parse",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="解析文件",
|
||||
description="解析招标文件内容"
|
||||
)
|
||||
async def parse_file(file_path: str) -> ResultDomain[dict]:
|
||||
"""
|
||||
解析招标文件
|
||||
|
||||
- **file_path**: 文件路径
|
||||
"""
|
||||
# TODO: 实现文件解析逻辑
|
||||
return ResultDomain.success(message="解析成功", data={"result": {}})
|
||||
@@ -1,13 +0,0 @@
|
||||
# API模块
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .ReadFileAPI import router as readfile_router
|
||||
|
||||
|
||||
# 创建主路由器
|
||||
router = APIRouter()
|
||||
|
||||
# 注册所有子路由
|
||||
router.include_router(readfile_router, prefix="/readfile", tags=["文件读取相关服务"])
|
||||
|
||||
__all__ = ["router"]
|
||||
@@ -1,28 +0,0 @@
|
||||
"""测试相关接口"""
|
||||
from fastapi import APIRouter
|
||||
|
||||
from app.schemas.base import ResultDomain
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/world",
|
||||
response_model=ResultDomain[str],
|
||||
summary="Hello World",
|
||||
description="测试接口连通性"
|
||||
)
|
||||
async def hello_word() -> ResultDomain[str]:
|
||||
"""Hello World 测试接口"""
|
||||
return ResultDomain.ok(message="Hello World", data="Hello World")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ping",
|
||||
response_model=ResultDomain[str],
|
||||
summary="Ping测试",
|
||||
description="测试服务是否正常运行"
|
||||
)
|
||||
async def ping() -> ResultDomain[str]:
|
||||
"""Ping 测试接口"""
|
||||
return ResultDomain.ok(message="pong", data="pong")
|
||||
@@ -1,13 +0,0 @@
|
||||
# API模块
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .HelloWordAPI import router as hello_router
|
||||
|
||||
|
||||
# 创建主路由器
|
||||
router = APIRouter()
|
||||
|
||||
# 注册所有子路由
|
||||
router.include_router(hello_router, prefix="/hello", tags=["测试服务"])
|
||||
|
||||
__all__ = ["router"]
|
||||
@@ -1,150 +0,0 @@
|
||||
"""二维码相关接口 - API层"""
|
||||
from fastapi import APIRouter, File, UploadFile
|
||||
|
||||
from app.schemas import ResultDomain
|
||||
from app.services.workcase.qrcode import QrCodeService
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# 初始化服务
|
||||
qrcode_service = QrCodeService()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="生成二维码",
|
||||
description="根据内容生成二维码"
|
||||
)
|
||||
async def generate_qrcode(
|
||||
content: str,
|
||||
size: int = 300,
|
||||
error_correction: str = "H"
|
||||
) -> ResultDomain[dict]:
|
||||
"""
|
||||
生成二维码
|
||||
|
||||
- **content**: 二维码内容
|
||||
- **size**: 图片大小(像素,100-2000)
|
||||
- **error_correction**: 纠错级别
|
||||
- L: 7% 容错
|
||||
- M: 15% 容错
|
||||
- Q: 25% 容错
|
||||
- H: 30% 容错 (推荐)
|
||||
"""
|
||||
result = await qrcode_service.generate_qrcode(
|
||||
content=content,
|
||||
size=size,
|
||||
error_correction=error_correction
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
return ResultDomain.success(message="生成成功", data=result)
|
||||
else:
|
||||
return ResultDomain.fail(message=result.get("error", "生成失败"))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/parse",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="解析二维码",
|
||||
description="解析二维码图片内容(支持URL、base64)"
|
||||
)
|
||||
async def parse_qrcode(
|
||||
image_source: str,
|
||||
strategy: str = "auto"
|
||||
) -> ResultDomain[dict]:
|
||||
"""
|
||||
解析二维码
|
||||
|
||||
- **image_source**: 图片来源
|
||||
- URL: http://... 或 https://...
|
||||
- base64: data:image/...;base64,...
|
||||
- 本地路径: /path/to/image.png
|
||||
- **strategy**: 预处理策略
|
||||
- basic: 基础模式,仅尝试原图和灰度图
|
||||
- auto: 自动模式,尝试多种预处理方法 (推荐)
|
||||
- enhanced: 增强模式,使用更多预处理技术
|
||||
- all: 全部模式,尝试所有可能的预处理方法(包括多尺度)
|
||||
"""
|
||||
result = await qrcode_service.parse_qrcode(
|
||||
image_source=image_source,
|
||||
strategy=strategy
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
return ResultDomain.success(message="解析成功", data=result)
|
||||
else:
|
||||
return ResultDomain.fail(
|
||||
message=result.get("error", "解析失败"),
|
||||
data={"total_attempts": result.get("total_attempts", 0)}
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/parse-file",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="解析二维码文件",
|
||||
description="通过文件上传解析二维码"
|
||||
)
|
||||
async def parse_qrcode_file(
|
||||
file: UploadFile = File(...),
|
||||
strategy: str = "auto"
|
||||
) -> ResultDomain[dict]:
|
||||
"""
|
||||
解析二维码文件上传
|
||||
|
||||
- **file**: 二维码图片文件(支持 png/jpg/jpeg/bmp 等格式)
|
||||
- **strategy**: 预处理策略 (basic/auto/enhanced/all)
|
||||
"""
|
||||
# 读取文件内容
|
||||
content = await file.read()
|
||||
|
||||
# 提取文件类型
|
||||
if file.content_type:
|
||||
file_type = file.content_type.split("/")[-1]
|
||||
else:
|
||||
# 从文件名提取扩展名
|
||||
file_type = file.filename.split(".")[-1] if file.filename else "png"
|
||||
|
||||
# 调用服务
|
||||
result = await qrcode_service.parse_qrcode_from_file(
|
||||
file_content=content,
|
||||
file_type=file_type,
|
||||
strategy=strategy
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
return ResultDomain.success(message="解析成功", data=result)
|
||||
else:
|
||||
return ResultDomain.fail(
|
||||
message=result.get("error", "解析失败"),
|
||||
data={"total_attempts": result.get("total_attempts", 0)}
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/validate",
|
||||
response_model=ResultDomain[dict],
|
||||
summary="验证二维码内容",
|
||||
description="验证内容是否适合生成二维码"
|
||||
)
|
||||
async def validate_qrcode_content(
|
||||
content: str,
|
||||
max_length: int = 2953
|
||||
) -> ResultDomain[dict]:
|
||||
"""
|
||||
验证二维码内容
|
||||
|
||||
- **content**: 要验证的内容
|
||||
- **max_length**: 最大长度(字节)
|
||||
"""
|
||||
result = qrcode_service.validate_qrcode_content(content, max_length)
|
||||
|
||||
if result["valid"]:
|
||||
return ResultDomain.success(
|
||||
message="内容有效",
|
||||
data={"length": result["length"]}
|
||||
)
|
||||
else:
|
||||
return ResultDomain.fail(message=result.get("error", "内容无效"))
|
||||
@@ -1,13 +0,0 @@
|
||||
# API模块
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .QrCodeAPI import router as qrcode_router
|
||||
|
||||
|
||||
# 创建主路由器
|
||||
router = APIRouter()
|
||||
|
||||
# 注册所有子路由
|
||||
router.include_router(qrcode_router, prefix="/qrcode", tags=["二维码相关服务"])
|
||||
|
||||
__all__ = ["router"]
|
||||
@@ -1,38 +0,0 @@
|
||||
"""应用配置管理"""
|
||||
from pydantic_settings import BaseSettings
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""应用配置"""
|
||||
# 应用基础配置
|
||||
APP_NAME: str = "DifyPlugin"
|
||||
APP_VERSION: str = "1.0.0"
|
||||
DEBUG: bool = False
|
||||
|
||||
# API配置
|
||||
API_V1_PREFIX: str = "/api/v1"
|
||||
HOST: str = "0.0.0.0"
|
||||
API_HOST: str = "localhost" # OpenAPI servers 显示的地址
|
||||
PORT: int = 8380
|
||||
|
||||
# 跨域配置
|
||||
CORS_ORIGINS: list[str] = ["*"]
|
||||
|
||||
# Redis配置
|
||||
REDIS_HOST: str = "localhost"
|
||||
REDIS_PORT: int = 6379
|
||||
REDIS_PASSWORD: str = "123456"
|
||||
REDIS_DB: int = 0
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
case_sensitive = True
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_settings() -> Settings:
|
||||
return Settings()
|
||||
|
||||
|
||||
settings = get_settings()
|
||||
@@ -1 +0,0 @@
|
||||
# Core模块
|
||||
@@ -1,42 +0,0 @@
|
||||
"""自定义异常和异常处理器"""
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from app.schemas.base import ResultDomain
|
||||
|
||||
|
||||
class BusinessException(Exception):
|
||||
"""业务异常"""
|
||||
def __init__(self, code: int = 500, message: str = "业务异常"):
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
|
||||
class NotFoundException(BusinessException):
|
||||
"""资源不存在异常"""
|
||||
def __init__(self, message: str = "资源不存在"):
|
||||
super().__init__(code=404, message=message)
|
||||
|
||||
|
||||
class ValidationException(BusinessException):
|
||||
"""参数校验异常"""
|
||||
def __init__(self, message: str = "参数校验失败"):
|
||||
super().__init__(code=400, message=message)
|
||||
|
||||
|
||||
def register_exception_handlers(app: FastAPI):
|
||||
"""注册全局异常处理器"""
|
||||
|
||||
@app.exception_handler(BusinessException)
|
||||
async def business_exception_handler(request: Request, exc: BusinessException):
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content=ResultDomain.fail(message=exc.message, code=exc.code).model_dump()
|
||||
)
|
||||
|
||||
@app.exception_handler(Exception)
|
||||
async def global_exception_handler(request: Request, exc: Exception):
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=ResultDomain.fail(message=str(exc), code=500).model_dump()
|
||||
)
|
||||
@@ -1,26 +0,0 @@
|
||||
"""中间件定义"""
|
||||
import time
|
||||
import logging
|
||||
from fastapi import Request
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
||||
"""请求日志中间件"""
|
||||
|
||||
async def dispatch(self, request: Request, call_next):
|
||||
start_time = time.time()
|
||||
|
||||
response = await call_next(request)
|
||||
|
||||
process_time = time.time() - start_time
|
||||
logger.info(
|
||||
f"{request.method} {request.url.path} "
|
||||
f"- Status: {response.status_code} "
|
||||
f"- Time: {process_time:.3f}s"
|
||||
)
|
||||
|
||||
response.headers["X-Process-Time"] = str(process_time)
|
||||
return response
|
||||
@@ -1,128 +0,0 @@
|
||||
"""Redis 服务"""
|
||||
import json
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import redis.asyncio as redis
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from app.config import settings
|
||||
|
||||
|
||||
class RedisService:
|
||||
"""Redis 服务类"""
|
||||
|
||||
_client: Optional[Redis] = None
|
||||
|
||||
@classmethod
|
||||
async def init(cls) -> None:
|
||||
"""初始化 Redis 连接"""
|
||||
cls._client = redis.Redis(
|
||||
host=settings.REDIS_HOST,
|
||||
port=settings.REDIS_PORT,
|
||||
password=settings.REDIS_PASSWORD or None,
|
||||
db=settings.REDIS_DB,
|
||||
decode_responses=True
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def close(cls) -> None:
|
||||
"""关闭 Redis 连接"""
|
||||
if cls._client:
|
||||
await cls._client.close()
|
||||
cls._client = None
|
||||
|
||||
@classmethod
|
||||
def get_client(cls) -> Redis:
|
||||
"""获取 Redis 客户端"""
|
||||
if not cls._client:
|
||||
raise RuntimeError("Redis 未初始化,请先调用 init()")
|
||||
return cls._client
|
||||
|
||||
# ==================== String 操作 ====================
|
||||
|
||||
@classmethod
|
||||
async def get(cls, key: str) -> Optional[str]:
|
||||
"""获取值"""
|
||||
return await cls.get_client().get(key)
|
||||
|
||||
@classmethod
|
||||
async def set(cls, key: str, value: Union[str, int, float], expire: Optional[int] = None) -> bool:
|
||||
"""设置值"""
|
||||
return await cls.get_client().set(key, value, ex=expire)
|
||||
|
||||
@classmethod
|
||||
async def delete(cls, *keys: str) -> int:
|
||||
"""删除键"""
|
||||
return await cls.get_client().delete(*keys)
|
||||
|
||||
@classmethod
|
||||
async def exists(cls, key: str) -> bool:
|
||||
"""判断键是否存在"""
|
||||
return await cls.get_client().exists(key) > 0
|
||||
|
||||
@classmethod
|
||||
async def expire(cls, key: str, seconds: int) -> bool:
|
||||
"""设置过期时间"""
|
||||
return await cls.get_client().expire(key, seconds)
|
||||
|
||||
@classmethod
|
||||
async def ttl(cls, key: str) -> int:
|
||||
"""获取剩余过期时间"""
|
||||
return await cls.get_client().ttl(key)
|
||||
|
||||
# ==================== JSON 操作 ====================
|
||||
|
||||
@classmethod
|
||||
async def get_json(cls, key: str) -> Optional[Any]:
|
||||
"""获取 JSON 值"""
|
||||
value = await cls.get(key)
|
||||
return json.loads(value) if value else None
|
||||
|
||||
@classmethod
|
||||
async def set_json(cls, key: str, value: Any, expire: Optional[int] = None) -> bool:
|
||||
"""设置 JSON 值"""
|
||||
return await cls.set(key, json.dumps(value, ensure_ascii=False), expire)
|
||||
|
||||
# ==================== Hash 操作 ====================
|
||||
|
||||
@classmethod
|
||||
async def hget(cls, name: str, key: str) -> Optional[str]:
|
||||
"""获取 Hash 字段值"""
|
||||
return await cls.get_client().hget(name, key)
|
||||
|
||||
@classmethod
|
||||
async def hset(cls, name: str, key: str, value: str) -> int:
|
||||
"""设置 Hash 字段值"""
|
||||
return await cls.get_client().hset(name, key, value)
|
||||
|
||||
@classmethod
|
||||
async def hgetall(cls, name: str) -> dict:
|
||||
"""获取 Hash 所有字段"""
|
||||
return await cls.get_client().hgetall(name)
|
||||
|
||||
@classmethod
|
||||
async def hdel(cls, name: str, *keys: str) -> int:
|
||||
"""删除 Hash 字段"""
|
||||
return await cls.get_client().hdel(name, *keys)
|
||||
|
||||
# ==================== List 操作 ====================
|
||||
|
||||
@classmethod
|
||||
async def lpush(cls, key: str, *values: str) -> int:
|
||||
"""左侧插入列表"""
|
||||
return await cls.get_client().lpush(key, *values)
|
||||
|
||||
@classmethod
|
||||
async def rpush(cls, key: str, *values: str) -> int:
|
||||
"""右侧插入列表"""
|
||||
return await cls.get_client().rpush(key, *values)
|
||||
|
||||
@classmethod
|
||||
async def lrange(cls, key: str, start: int = 0, end: int = -1) -> list:
|
||||
"""获取列表范围"""
|
||||
return await cls.get_client().lrange(key, start, end)
|
||||
|
||||
@classmethod
|
||||
async def llen(cls, key: str) -> int:
|
||||
"""获取列表长度"""
|
||||
return await cls.get_client().llen(key)
|
||||
@@ -1,79 +0,0 @@
|
||||
"""FastAPI 应用入口"""
|
||||
from contextlib import asynccontextmanager
|
||||
import os
|
||||
import sys
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.config import settings
|
||||
from app.api import router as api_router
|
||||
from app.core.exceptions import register_exception_handlers
|
||||
from app.core.redis import RedisService
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""应用生命周期管理"""
|
||||
# 启动时初始化
|
||||
await RedisService.init()
|
||||
yield
|
||||
# 关闭时清理
|
||||
await RedisService.close()
|
||||
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
"""创建FastAPI应用实例"""
|
||||
app = FastAPI(
|
||||
title=settings.APP_NAME,
|
||||
version=settings.APP_VERSION,
|
||||
description="Dify插件服务API",
|
||||
openapi_url=f"{settings.API_V1_PREFIX}/openapi.json",
|
||||
docs_url="/docs",
|
||||
redoc_url="/redoc",
|
||||
lifespan=lifespan,
|
||||
servers=[
|
||||
{"url": f"http://{settings.API_HOST}:{settings.PORT}", "description": "API服务器"},
|
||||
],
|
||||
)
|
||||
|
||||
# 注册CORS中间件
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=settings.CORS_ORIGINS,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# 注册异常处理器
|
||||
register_exception_handlers(app)
|
||||
|
||||
# 注册路由
|
||||
app.include_router(api_router, prefix=settings.API_V1_PREFIX)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
app = create_app()
|
||||
|
||||
|
||||
def print_routes(app: FastAPI):
|
||||
"""打印所有注册的路由"""
|
||||
print("\n" + "=" * 60)
|
||||
print("Registered Routes:")
|
||||
print("=" * 60)
|
||||
for route in app.routes:
|
||||
if hasattr(route, "methods"):
|
||||
methods = ", ".join(route.methods - {"HEAD", "OPTIONS"})
|
||||
print(f" {methods:8} {route.path}")
|
||||
print("=" * 60 + "\n")
|
||||
|
||||
|
||||
# 启动时打印路由
|
||||
print_routes(app)
|
||||
|
||||
|
||||
@app.get("/health", tags=["健康检查"], summary="健康检查接口")
|
||||
async def health_check():
|
||||
"""服务健康检查"""
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
from app.schemas.base import ResultDomain
|
||||
from app.schemas.plugin import PluginRequest, PluginResponse
|
||||
|
||||
__all__ = ["ResultDomain", "PluginRequest", "PluginResponse"]
|
||||
@@ -1,52 +0,0 @@
|
||||
"""统一返回类型定义"""
|
||||
from typing import TypeVar, Generic, Optional, List, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
|
||||
class PageDomain(BaseModel, Generic[T]):
|
||||
"""分页数据模型"""
|
||||
page: int = Field(default=1, description="当前页码")
|
||||
pageSize: int = Field(default=10, description="每页大小")
|
||||
total: int = Field(default=0, description="总记录数")
|
||||
dataList: Optional[List[T]] = Field(default=None, description="数据列表")
|
||||
|
||||
|
||||
class ResultDomain(BaseModel, Generic[T]):
|
||||
"""统一返回类型"""
|
||||
code: Optional[int] = Field(default=None, description="状态码")
|
||||
success: Optional[bool] = Field(default=None, description="是否成功")
|
||||
message: Optional[str] = Field(default=None, description="返回消息")
|
||||
data: Optional[T] = Field(default=None, description="单条数据")
|
||||
dataList: Optional[List[T]] = Field(default=None, description="数据列表")
|
||||
pageDomain: Optional[PageDomain[T]] = Field(default=None, description="分页数据")
|
||||
|
||||
@staticmethod
|
||||
def ok(message: str = "success", data: Any = None) -> "ResultDomain":
|
||||
"""成功返回 - 单条数据"""
|
||||
return ResultDomain(code=200, success=True, message=message, data=data)
|
||||
|
||||
@staticmethod
|
||||
def ok_list(message: str = "success", data_list: List[Any] = None) -> "ResultDomain":
|
||||
"""成功返回 - 数据列表"""
|
||||
return ResultDomain(code=200, success=True, message=message, dataList=data_list)
|
||||
|
||||
@staticmethod
|
||||
def ok_page(message: str = "success", page_domain: "PageDomain" = None) -> "ResultDomain":
|
||||
"""成功返回 - 分页数据"""
|
||||
result = ResultDomain(code=200, success=True, message=message, pageDomain=page_domain)
|
||||
if page_domain:
|
||||
result.dataList = page_domain.dataList
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def fail(message: str = "failure", code: int = 500) -> "ResultDomain":
|
||||
"""失败返回"""
|
||||
return ResultDomain(code=code, success=False, message=message)
|
||||
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"examples": [{"code": 200, "success": True, "message": "操作成功"}]
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
"""插件相关数据模型"""
|
||||
from typing import Optional, Dict, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class PluginRequest(BaseModel):
|
||||
"""
|
||||
插件请求模型
|
||||
|
||||
Attributes:
|
||||
plugin_id: 插件ID
|
||||
action: 执行动作
|
||||
params: 请求参数
|
||||
"""
|
||||
plugin_id: str = Field(..., description="插件ID", examples=["plugin_001"])
|
||||
action: str = Field(..., description="执行动作", examples=["execute"])
|
||||
params: Optional[Dict[str, Any]] = Field(default=None, description="请求参数")
|
||||
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"examples": [
|
||||
{
|
||||
"plugin_id": "plugin_001",
|
||||
"action": "execute",
|
||||
"params": {"key": "value"}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class PluginResponse(BaseModel):
|
||||
"""
|
||||
插件响应模型
|
||||
|
||||
Attributes:
|
||||
plugin_id: 插件ID
|
||||
result: 执行结果
|
||||
status: 执行状态
|
||||
"""
|
||||
plugin_id: str = Field(..., description="插件ID")
|
||||
result: Optional[Dict[str, Any]] = Field(default=None, description="执行结果")
|
||||
status: str = Field(default="success", description="执行状态")
|
||||
@@ -1,2 +0,0 @@
|
||||
|
||||
__all__ = []
|
||||
@@ -1,298 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""二维码处理核心类 - 基于 OpenCV QRCodeDetector
|
||||
|
||||
本模块使用 OpenCV 的 QRCodeDetector 进行二维码识别,
|
||||
配合多种图像预处理策略,确保高识别率和跨平台兼容性。
|
||||
"""
|
||||
import base64
|
||||
import io
|
||||
from typing import Optional, Callable, Tuple
|
||||
|
||||
import cv2
|
||||
import httpx
|
||||
import numpy as np
|
||||
import qrcode
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class QRCodeProcessor:
|
||||
"""二维码处理器 - 负责二维码的生成、解析和图像预处理"""
|
||||
|
||||
# 预处理策略映射
|
||||
PREPROCESSING_STRATEGIES = {
|
||||
"original": ("原图", lambda img, gray: img),
|
||||
"grayscale": ("灰度图", lambda img, gray: cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)),
|
||||
"clahe": ("CLAHE增强", lambda img, gray: cv2.cvtColor(
|
||||
cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)).apply(gray),
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"adaptive_threshold": ("自适应二值化", lambda img, gray: cv2.cvtColor(
|
||||
cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2),
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"otsu": ("Otsu二值化", lambda img, gray: cv2.cvtColor(
|
||||
cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"denoise": ("去噪+二值化", lambda img, gray: cv2.cvtColor(
|
||||
cv2.threshold(
|
||||
cv2.fastNlMeansDenoising(gray, None, 10, 7, 21),
|
||||
0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
|
||||
)[1],
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"sharpen": ("锐化", lambda img, gray: cv2.cvtColor(
|
||||
cv2.filter2D(gray, -1, np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])),
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"morphology": ("形态学处理", lambda img, gray: cv2.cvtColor(
|
||||
cv2.morphologyEx(
|
||||
cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2),
|
||||
cv2.MORPH_CLOSE,
|
||||
cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
||||
),
|
||||
cv2.COLOR_GRAY2BGR
|
||||
)),
|
||||
"scale_0.5": ("0.5x缩放", lambda img, gray: cv2.resize(
|
||||
img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA
|
||||
)),
|
||||
"scale_1.5": ("1.5x缩放", lambda img, gray: cv2.resize(
|
||||
img, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC
|
||||
)),
|
||||
"scale_2.0": ("2.0x缩放", lambda img, gray: cv2.resize(
|
||||
img, None, fx=2.0, fy=2.0, interpolation=cv2.INTER_CUBIC
|
||||
)),
|
||||
}
|
||||
|
||||
# 策略组合映射
|
||||
STRATEGY_MAP = {
|
||||
"basic": ["original", "grayscale"],
|
||||
"enhanced": ["original", "grayscale", "clahe", "adaptive_threshold", "otsu", "denoise", "sharpen", "morphology"],
|
||||
"all": ["original", "grayscale", "clahe", "adaptive_threshold", "otsu", "denoise", "sharpen", "morphology", "scale_0.5", "scale_1.5", "scale_2.0"],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def generate(
|
||||
content: str,
|
||||
size: int = 300,
|
||||
error_correction: str = "H",
|
||||
box_size: int = 10,
|
||||
border: int = 4
|
||||
) -> str:
|
||||
"""
|
||||
生成二维码
|
||||
|
||||
Args:
|
||||
content: 二维码内容
|
||||
size: 图片大小(像素)
|
||||
error_correction: 纠错级别 (L/M/Q/H)
|
||||
box_size: 每个格子的像素大小
|
||||
border: 边框大小(格子数)
|
||||
|
||||
Returns:
|
||||
base64编码的图片数据 (data:image/png;base64,...)
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误时抛出异常
|
||||
"""
|
||||
# 验证纠错级别
|
||||
error_levels = {
|
||||
"L": qrcode.constants.ERROR_CORRECT_L, # 7% 容错
|
||||
"M": qrcode.constants.ERROR_CORRECT_M, # 15% 容错
|
||||
"Q": qrcode.constants.ERROR_CORRECT_Q, # 25% 容错
|
||||
"H": qrcode.constants.ERROR_CORRECT_H, # 30% 容错
|
||||
}
|
||||
|
||||
if error_correction not in error_levels:
|
||||
raise ValueError(f"无效的纠错级别: {error_correction},支持: L/M/Q/H")
|
||||
|
||||
# 创建二维码对象
|
||||
qr = qrcode.QRCode(
|
||||
version=None, # 自动确定版本
|
||||
error_correction=error_levels[error_correction],
|
||||
box_size=box_size,
|
||||
border=border,
|
||||
)
|
||||
|
||||
# 添加数据并生成
|
||||
qr.add_data(content)
|
||||
qr.make(fit=True)
|
||||
|
||||
# 生成图片
|
||||
img = qr.make_image(fill_color="black", back_color="white")
|
||||
|
||||
# 调整到指定大小
|
||||
img = img.resize((size, size), Image.Resampling.LANCZOS)
|
||||
|
||||
# 转换为base64
|
||||
buffer = io.BytesIO()
|
||||
img.save(buffer, format="PNG")
|
||||
img_base64 = base64.b64encode(buffer.getvalue()).decode()
|
||||
|
||||
return f"data:image/png;base64,{img_base64}"
|
||||
|
||||
@staticmethod
|
||||
async def load_image(image_source: str) -> np.ndarray:
|
||||
"""
|
||||
加载图片(支持URL、base64、本地路径)
|
||||
|
||||
Args:
|
||||
image_source: 图片来源
|
||||
- URL: http://... 或 https://...
|
||||
- base64: data:image/...;base64,...
|
||||
- 本地路径: /path/to/image.png
|
||||
|
||||
Returns:
|
||||
OpenCV图片对象 (BGR格式)
|
||||
|
||||
Raises:
|
||||
ValueError: 图片加载失败时抛出异常
|
||||
"""
|
||||
try:
|
||||
# 检查是否为base64
|
||||
if image_source.startswith("data:image"):
|
||||
# 提取base64数据
|
||||
base64_data = image_source.split(",")[1]
|
||||
img_data = base64.b64decode(base64_data)
|
||||
img_array = np.frombuffer(img_data, np.uint8)
|
||||
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
||||
|
||||
elif image_source.startswith("http://") or image_source.startswith("https://"):
|
||||
# 下载图片
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.get(image_source)
|
||||
response.raise_for_status()
|
||||
img_array = np.frombuffer(response.content, np.uint8)
|
||||
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
||||
|
||||
else:
|
||||
# 本地文件
|
||||
img = cv2.imread(image_source)
|
||||
|
||||
if img is None:
|
||||
raise ValueError("无法解析图片数据")
|
||||
|
||||
return img
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError(f"图片加载失败: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
async def search_qrcode(img: np.ndarray)-> list[np.ndarray]:
|
||||
"""
|
||||
搜索二维码,只搜索不解析
|
||||
|
||||
Args:
|
||||
img: OpenCV图像对象
|
||||
|
||||
Returns:
|
||||
二维码列表
|
||||
"""
|
||||
detector = cv2.QRCodeDetector()
|
||||
imgs = detector.detect(img)
|
||||
|
||||
@staticmethod
|
||||
def decode(img: np.ndarray) -> Optional[str]:
|
||||
"""
|
||||
使用 OpenCV QRCodeDetector 解码二维码
|
||||
|
||||
Args:
|
||||
img: OpenCV图像对象
|
||||
|
||||
Returns:
|
||||
二维码内容,如果没有检测到返回None
|
||||
"""
|
||||
detector = cv2.QRCodeDetector()
|
||||
data, bbox, _ = detector.detectAndDecode(img)
|
||||
|
||||
if data:
|
||||
return data
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def parse(
|
||||
image_source: str,
|
||||
strategy: str = "auto"
|
||||
) -> dict:
|
||||
"""
|
||||
解析二维码(使用 OpenCV + 按需预处理策略)
|
||||
|
||||
解码策略:
|
||||
1. 根据 strategy 参数选择预处理步骤列表
|
||||
2. 按需应用每种预处理算法并立即尝试解码
|
||||
3. 一旦成功立即返回,避免不必要的计算
|
||||
|
||||
Args:
|
||||
image_source: 图片来源(URL/base64/本地路径)
|
||||
strategy: 预处理策略
|
||||
- basic: 原图 + 灰度图(2种)
|
||||
- enhanced: basic + 6种增强算法(8种)
|
||||
- all: auto + 多尺度(11种)
|
||||
|
||||
Returns:
|
||||
解析结果字典:
|
||||
{
|
||||
"success": bool,
|
||||
"content": str or None,
|
||||
"strategy_used": str, # 使用的预处理策略名称
|
||||
"preprocessing_index": int, # 预处理索引
|
||||
"total_attempts": int,
|
||||
"message": str # 仅失败时有
|
||||
}
|
||||
"""
|
||||
# 验证策略参数
|
||||
if strategy not in QRCodeProcessor.STRATEGY_MAP:
|
||||
raise ValueError(f"无效的策略: {strategy},支持: {list(QRCodeProcessor.STRATEGY_MAP.keys())}")
|
||||
|
||||
# 加载原始图片
|
||||
img = await QRCodeProcessor.load_image(image_source)
|
||||
|
||||
# 预先生成灰度图(很多预处理都需要)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 获取该策略对应的预处理步骤列表
|
||||
preprocessing_steps = QRCodeProcessor.STRATEGY_MAP[strategy]
|
||||
|
||||
# 依次应用每种预处理并尝试解码
|
||||
for idx, step_key in enumerate(preprocessing_steps):
|
||||
strategy_name, preprocess_func = QRCodeProcessor.PREPROCESSING_STRATEGIES[step_key]
|
||||
|
||||
try:
|
||||
# 按需处理图像
|
||||
processed_img = preprocess_func(img, gray)
|
||||
|
||||
# 立即尝试解码
|
||||
result = QRCodeProcessor.decode(processed_img)
|
||||
|
||||
if result:
|
||||
# 解码成功,立即返回
|
||||
return {
|
||||
"success": True,
|
||||
"content": result,
|
||||
"strategy_used": f"opencv_{strategy_name}",
|
||||
"preprocessing_index": idx,
|
||||
"total_attempts": idx + 1
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
# 某个预处理步骤失败,继续尝试下一个
|
||||
continue
|
||||
|
||||
# 所有预处理方法都失败
|
||||
return {
|
||||
"success": False,
|
||||
"content": None,
|
||||
"message": f"未检测到二维码或二维码损坏(已尝试 {len(preprocessing_steps)} 种预处理)",
|
||||
"total_attempts": len(preprocessing_steps)
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
async def main():
|
||||
# 示例用法
|
||||
result = await QRCodeProcessor.parse("F:/Project/urbanLifeline/docs/qrcode.png", "enhanced")
|
||||
print(result)
|
||||
|
||||
asyncio.run(main())
|
||||
@@ -1,201 +0,0 @@
|
||||
"""二维码服务层 - 提供统一的业务逻辑接口"""
|
||||
import base64
|
||||
from typing import Optional
|
||||
|
||||
from .QrCode import QRCodeProcessor
|
||||
|
||||
|
||||
class QrCodeService:
|
||||
"""二维码服务 - 业务逻辑层"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化服务"""
|
||||
self.processor = QRCodeProcessor()
|
||||
|
||||
async def generate_qrcode(
|
||||
self,
|
||||
content: str,
|
||||
size: int = 300,
|
||||
error_correction: str = "H"
|
||||
) -> dict:
|
||||
"""
|
||||
生成二维码
|
||||
|
||||
Args:
|
||||
content: 二维码内容
|
||||
size: 图片大小(像素)
|
||||
error_correction: 纠错级别 (L/M/Q/H)
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": bool,
|
||||
"image": str, # base64编码的图片
|
||||
"content": str,
|
||||
"size": int,
|
||||
"error_correction": str,
|
||||
"error": str # 仅失败时有
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# 验证参数
|
||||
if not content:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "内容不能为空"
|
||||
}
|
||||
|
||||
if size < 100 or size > 2000:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "尺寸必须在100-2000之间"
|
||||
}
|
||||
|
||||
if error_correction not in ["L", "M", "Q", "H"]:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "纠错级别必须是 L/M/Q/H 之一"
|
||||
}
|
||||
|
||||
# 生成二维码
|
||||
img_base64 = self.processor.generate(
|
||||
content=content,
|
||||
size=size,
|
||||
error_correction=error_correction
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"image": img_base64,
|
||||
"content": content,
|
||||
"size": size,
|
||||
"error_correction": error_correction
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"生成失败: {str(e)}"
|
||||
}
|
||||
|
||||
async def parse_qrcode(
|
||||
self,
|
||||
image_source: str,
|
||||
strategy: str = "auto"
|
||||
) -> dict:
|
||||
"""
|
||||
解析二维码
|
||||
|
||||
Args:
|
||||
image_source: 图片来源(URL/base64/本地路径)
|
||||
strategy: 预处理策略 (basic/auto/enhanced/all)
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": bool,
|
||||
"content": str or None,
|
||||
"strategy_used": str,
|
||||
"total_attempts": int,
|
||||
"error": str # 仅失败时有
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# 验证参数
|
||||
if not image_source:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "图片来源不能为空"
|
||||
}
|
||||
|
||||
if strategy not in ["basic", "auto", "enhanced", "all"]:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "策略必须是 basic/auto/enhanced/all 之一"
|
||||
}
|
||||
|
||||
# 解析二维码
|
||||
result = await self.processor.parse(image_source, strategy)
|
||||
|
||||
if result["success"]:
|
||||
return result
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"content": None,
|
||||
"error": result.get("message", "解析失败"),
|
||||
"total_attempts": result.get("total_attempts", 0)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"content": None,
|
||||
"error": f"解析失败: {str(e)}"
|
||||
}
|
||||
|
||||
async def parse_qrcode_from_file(
|
||||
self,
|
||||
file_content: bytes,
|
||||
file_type: str = "png",
|
||||
strategy: str = "auto"
|
||||
) -> dict:
|
||||
"""
|
||||
从文件内容解析二维码
|
||||
|
||||
Args:
|
||||
file_content: 文件二进制内容
|
||||
file_type: 文件类型 (png/jpg/jpeg等)
|
||||
strategy: 预处理策略
|
||||
|
||||
Returns:
|
||||
解析结果(格式同parse_qrcode)
|
||||
"""
|
||||
try:
|
||||
# 转换为base64
|
||||
img_base64 = base64.b64encode(file_content).decode()
|
||||
image_source = f"data:image/{file_type};base64,{img_base64}"
|
||||
|
||||
# 调用解析方法
|
||||
return await self.parse_qrcode(image_source, strategy)
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"content": None,
|
||||
"error": f"文件解析失败: {str(e)}"
|
||||
}
|
||||
|
||||
def validate_qrcode_content(self, content: str, max_length: int = 2953) -> dict:
|
||||
"""
|
||||
验证二维码内容是否合法
|
||||
|
||||
Args:
|
||||
content: 要验证的内容
|
||||
max_length: 最大长度(默认2953字节,version 40 with L级别)
|
||||
|
||||
Returns:
|
||||
{
|
||||
"valid": bool,
|
||||
"length": int,
|
||||
"error": str # 仅无效时有
|
||||
}
|
||||
"""
|
||||
if not content:
|
||||
return {
|
||||
"valid": False,
|
||||
"error": "内容不能为空"
|
||||
}
|
||||
|
||||
content_bytes = content.encode("utf-8")
|
||||
length = len(content_bytes)
|
||||
|
||||
if length > max_length:
|
||||
return {
|
||||
"valid": False,
|
||||
"length": length,
|
||||
"error": f"内容过长,当前{length}字节,最大支持{max_length}字节"
|
||||
}
|
||||
|
||||
return {
|
||||
"valid": True,
|
||||
"length": length
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
"""二维码服务模块"""
|
||||
from .QrCodeService import QrCodeService
|
||||
|
||||
__all__ = ["QrCodeService"]
|
||||
@@ -1,324 +0,0 @@
|
||||
"""二维码服务测试脚本
|
||||
|
||||
使用方法:
|
||||
python test_qrcode.py
|
||||
|
||||
测试内容:
|
||||
1. 生成二维码
|
||||
2. 解析生成的二维码
|
||||
3. 测试不同的预处理策略
|
||||
4. 测试错误处理
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# 添加项目根目录到Python路径
|
||||
project_root = Path(__file__).parent.parent.parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from app.services.workcase.qrcode import QrCodeService
|
||||
|
||||
|
||||
class QRCodeTester:
|
||||
"""二维码服务测试类"""
|
||||
|
||||
def __init__(self):
|
||||
self.service = QrCodeService()
|
||||
self.test_results = []
|
||||
|
||||
def print_header(self, title: str):
|
||||
"""打印测试标题"""
|
||||
print("\n" + "=" * 60)
|
||||
print(f" {title}")
|
||||
print("=" * 60)
|
||||
|
||||
def print_result(self, test_name: str, success: bool, details: str = ""):
|
||||
"""打印测试结果"""
|
||||
status = "✓ 通过" if success else "✗ 失败"
|
||||
print(f"\n{status} - {test_name}")
|
||||
if details:
|
||||
print(f" 详情: {details}")
|
||||
self.test_results.append((test_name, success))
|
||||
|
||||
async def test_generate_qrcode(self):
|
||||
"""测试生成二维码"""
|
||||
self.print_header("测试1: 生成二维码")
|
||||
|
||||
# 测试1.1: 基本生成
|
||||
result = await self.service.generate_qrcode(
|
||||
content="https://github.com",
|
||||
size=300,
|
||||
error_correction="H"
|
||||
)
|
||||
|
||||
if result["success"] and "image" in result:
|
||||
self.print_result(
|
||||
"1.1 基本生成",
|
||||
True,
|
||||
f"内容长度: {len(result['image'])} 字符"
|
||||
)
|
||||
# 保存生成的图片用于后续测试
|
||||
self.generated_image = result["image"]
|
||||
else:
|
||||
self.print_result("1.1 基本生成", False, result.get("error", "未知错误"))
|
||||
self.generated_image = None
|
||||
|
||||
# 测试1.2: 不同纠错级别
|
||||
for level in ["L", "M", "Q", "H"]:
|
||||
result = await self.service.generate_qrcode(
|
||||
content="测试内容",
|
||||
size=200,
|
||||
error_correction=level
|
||||
)
|
||||
self.print_result(
|
||||
f"1.2 纠错级别 {level}",
|
||||
result["success"],
|
||||
f"图片大小: {result.get('size', 'N/A')}"
|
||||
)
|
||||
|
||||
# 测试1.3: 参数验证
|
||||
result = await self.service.generate_qrcode(
|
||||
content="",
|
||||
size=300,
|
||||
error_correction="H"
|
||||
)
|
||||
self.print_result(
|
||||
"1.3 空内容验证",
|
||||
not result["success"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
# 测试1.4: 无效尺寸
|
||||
result = await self.service.generate_qrcode(
|
||||
content="test",
|
||||
size=50, # 太小
|
||||
error_correction="H"
|
||||
)
|
||||
self.print_result(
|
||||
"1.4 无效尺寸验证",
|
||||
not result["success"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
async def test_parse_qrcode(self):
|
||||
"""测试解析二维码"""
|
||||
self.print_header("测试2: 解析二维码")
|
||||
|
||||
if not self.generated_image:
|
||||
print("⚠ 跳过解析测试(没有生成的图片)")
|
||||
return
|
||||
|
||||
# 测试2.1: 解析自己生成的二维码
|
||||
result = await self.service.parse_qrcode(
|
||||
image_source=self.generated_image,
|
||||
strategy="auto"
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
self.print_result(
|
||||
"2.1 解析生成的二维码",
|
||||
True,
|
||||
f"内容: {result['content']}, 尝试次数: {result.get('total_attempts', 0)}"
|
||||
)
|
||||
else:
|
||||
self.print_result(
|
||||
"2.1 解析生成的二维码",
|
||||
False,
|
||||
result.get("error", "未知错误")
|
||||
)
|
||||
|
||||
# 测试2.2: 测试不同策略
|
||||
for strategy in ["basic", "auto", "enhanced"]:
|
||||
result = await self.service.parse_qrcode(
|
||||
image_source=self.generated_image,
|
||||
strategy=strategy
|
||||
)
|
||||
self.print_result(
|
||||
f"2.2 策略 {strategy}",
|
||||
result["success"],
|
||||
f"尝试次数: {result.get('total_attempts', 0)}"
|
||||
)
|
||||
|
||||
# 测试2.3: 无效输入
|
||||
result = await self.service.parse_qrcode(
|
||||
image_source="",
|
||||
strategy="auto"
|
||||
)
|
||||
self.print_result(
|
||||
"2.3 空图片源验证",
|
||||
not result["success"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
async def test_validate_content(self):
|
||||
"""测试内容验证"""
|
||||
self.print_header("测试3: 内容验证")
|
||||
|
||||
# 测试3.1: 正常内容
|
||||
result = self.service.validate_qrcode_content("https://example.com")
|
||||
self.print_result(
|
||||
"3.1 正常内容",
|
||||
result["valid"],
|
||||
f"长度: {result.get('length', 0)} 字节"
|
||||
)
|
||||
|
||||
# 测试3.2: 空内容
|
||||
result = self.service.validate_qrcode_content("")
|
||||
self.print_result(
|
||||
"3.2 空内容",
|
||||
not result["valid"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
# 测试3.3: 超长内容
|
||||
long_content = "a" * 3000
|
||||
result = self.service.validate_qrcode_content(long_content)
|
||||
self.print_result(
|
||||
"3.3 超长内容",
|
||||
not result["valid"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
# 测试3.4: 中文内容
|
||||
result = self.service.validate_qrcode_content("这是一段中文测试内容")
|
||||
self.print_result(
|
||||
"3.4 中文内容",
|
||||
result["valid"],
|
||||
f"长度: {result.get('length', 0)} 字节"
|
||||
)
|
||||
|
||||
async def test_integration(self):
|
||||
"""集成测试:生成 -> 解析 -> 验证"""
|
||||
self.print_header("测试4: 集成测试")
|
||||
|
||||
test_contents = [
|
||||
"https://github.com",
|
||||
"简单文本",
|
||||
"{'key': 'value', 'number': 123}", # JSON
|
||||
"mailto:test@example.com",
|
||||
"tel:+86-123-4567-8900"
|
||||
]
|
||||
|
||||
for idx, content in enumerate(test_contents, 1):
|
||||
# 生成
|
||||
gen_result = await self.service.generate_qrcode(
|
||||
content=content,
|
||||
size=300,
|
||||
error_correction="H"
|
||||
)
|
||||
|
||||
if not gen_result["success"]:
|
||||
self.print_result(
|
||||
f"4.{idx} 集成测试: {content[:20]}...",
|
||||
False,
|
||||
"生成失败"
|
||||
)
|
||||
continue
|
||||
|
||||
# 解析
|
||||
parse_result = await self.service.parse_qrcode(
|
||||
image_source=gen_result["image"],
|
||||
strategy="auto"
|
||||
)
|
||||
|
||||
# 验证内容是否一致
|
||||
success = (
|
||||
parse_result["success"] and
|
||||
parse_result.get("content") == content
|
||||
)
|
||||
|
||||
self.print_result(
|
||||
f"4.{idx} {content[:30]}",
|
||||
success,
|
||||
f"原始: {content[:20]}... | 解析: {parse_result.get('content', '')[:20]}..."
|
||||
)
|
||||
|
||||
async def test_error_handling(self):
|
||||
"""测试错误处理"""
|
||||
self.print_header("测试5: 错误处理")
|
||||
|
||||
# 测试5.1: 无效的图片URL
|
||||
result = await self.service.parse_qrcode(
|
||||
image_source="https://invalid-url-that-does-not-exist.com/image.png",
|
||||
strategy="auto"
|
||||
)
|
||||
self.print_result(
|
||||
"5.1 无效URL处理",
|
||||
not result["success"],
|
||||
result.get("error", "")[:50]
|
||||
)
|
||||
|
||||
# 测试5.2: 无效的base64
|
||||
result = await self.service.parse_qrcode(
|
||||
image_source="data:image/png;base64,invalid_base64",
|
||||
strategy="auto"
|
||||
)
|
||||
self.print_result(
|
||||
"5.2 无效base64处理",
|
||||
not result["success"],
|
||||
result.get("error", "")[:50]
|
||||
)
|
||||
|
||||
# 测试5.3: 无效的纠错级别
|
||||
result = await self.service.generate_qrcode(
|
||||
content="test",
|
||||
size=300,
|
||||
error_correction="X" # 无效
|
||||
)
|
||||
self.print_result(
|
||||
"5.3 无效纠错级别",
|
||||
not result["success"],
|
||||
result.get("error", "")
|
||||
)
|
||||
|
||||
def print_summary(self):
|
||||
"""打印测试总结"""
|
||||
self.print_header("测试总结")
|
||||
|
||||
total = len(self.test_results)
|
||||
passed = sum(1 for _, success in self.test_results if success)
|
||||
failed = total - passed
|
||||
|
||||
print(f"\n总测试数: {total}")
|
||||
print(f"通过: {passed} (✓)")
|
||||
print(f"失败: {failed} (✗)")
|
||||
print(f"成功率: {passed/total*100:.1f}%\n")
|
||||
|
||||
if failed > 0:
|
||||
print("失败的测试:")
|
||||
for name, success in self.test_results:
|
||||
if not success:
|
||||
print(f" - {name}")
|
||||
|
||||
async def run_all_tests(self):
|
||||
"""运行所有测试"""
|
||||
print("\n" + "=" * 60)
|
||||
print(" 二维码服务测试套件")
|
||||
print("=" * 60)
|
||||
|
||||
try:
|
||||
await self.test_generate_qrcode()
|
||||
await self.test_parse_qrcode()
|
||||
await self.test_validate_content()
|
||||
await self.test_integration()
|
||||
await self.test_error_handling()
|
||||
|
||||
self.print_summary()
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ 测试过程中出现错误: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
async def main():
|
||||
"""主函数"""
|
||||
tester = QRCodeTester()
|
||||
await tester.run_all_tests()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 运行测试
|
||||
asyncio.run(main())
|
||||
@@ -1 +0,0 @@
|
||||
# Utils模块
|
||||
@@ -1,22 +0,0 @@
|
||||
"""工具函数"""
|
||||
from typing import Any, Dict
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def format_datetime(dt: datetime, fmt: str = "%Y-%m-%d %H:%M:%S") -> str:
|
||||
"""格式化日期时间"""
|
||||
return dt.strftime(fmt)
|
||||
|
||||
|
||||
def safe_json_loads(json_str: str, default: Any = None) -> Any:
|
||||
"""安全的JSON解析"""
|
||||
try:
|
||||
return json.loads(json_str)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return default
|
||||
|
||||
|
||||
def dict_filter_none(data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""过滤字典中的None值"""
|
||||
return {k: v for k, v in data.items() if v is not None}
|
||||
@@ -1,78 +0,0 @@
|
||||
{
|
||||
"openapi": "3.1.0",
|
||||
"info": {
|
||||
"title": "DifyPlugin",
|
||||
"description": "Dify插件服务API",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"url": "http://192.168.0.253:8380/api/v1",
|
||||
"description": "开发服务器"
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
"/test/hello/world": {
|
||||
"get": {
|
||||
"operationId": "HelloWord",
|
||||
"summary": "Hello World",
|
||||
"description": "测试接口连通性",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "成功响应",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ResultDomain"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/test/hello/ping": {
|
||||
"get": {
|
||||
"operationId": "Ping",
|
||||
"summary": "Ping测试",
|
||||
"description": "测试服务是否正常运行",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "成功响应",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ResultDomain"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"components": {
|
||||
"schemas": {
|
||||
"ResultDomain": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"description": "状态码"
|
||||
},
|
||||
"success": {
|
||||
"type": "boolean",
|
||||
"description": "是否成功"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "返回消息"
|
||||
},
|
||||
"data": {
|
||||
"description": "返回数据"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,372 +0,0 @@
|
||||
# 二维码服务 README
|
||||
|
||||
## 功能概述
|
||||
|
||||
基于 **OpenCV QRCodeDetector** 的高性能二维码生成和解析服务,配合多种图像预处理策略,确保高识别率。
|
||||
|
||||
### 核心特性
|
||||
|
||||
✅ **纯 OpenCV 引擎** - 无需额外依赖,跨平台稳定
|
||||
✅ **8种预处理策略** - CLAHE、二值化、去噪、锐化等
|
||||
✅ **多种输入方式** - URL、base64、文件上传
|
||||
✅ **智能容错** - 自动尝试多种预处理策略直到成功
|
||||
✅ **企业级稳定性** - Windows/Linux 完美支持,无 DLL 问题
|
||||
|
||||
---
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 1. 安装依赖
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
所有依赖都是标准库,无需额外配置!
|
||||
|
||||
### 2. 测试服务
|
||||
|
||||
```bash
|
||||
# 运行测试脚本
|
||||
python app/services/workcase/qrcode/test_qrcode.py
|
||||
```
|
||||
|
||||
### 3. 启动服务
|
||||
|
||||
```bash
|
||||
uvicorn app.main:app --reload
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## API 使用
|
||||
|
||||
### 生成二维码
|
||||
|
||||
**请求:**
|
||||
```http
|
||||
POST /api/workcase/qrcode/generate
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"content": "https://github.com",
|
||||
"size": 300,
|
||||
"error_correction": "H"
|
||||
}
|
||||
```
|
||||
|
||||
**响应:**
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"message": "生成成功",
|
||||
"data": {
|
||||
"success": true,
|
||||
"image": "data:image/png;base64,iVBORw0KG...",
|
||||
"content": "https://github.com",
|
||||
"size": 300,
|
||||
"error_correction": "H"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**参数说明:**
|
||||
- `content`: 二维码内容(必填)
|
||||
- `size`: 图片大小,100-2000像素(默认 300)
|
||||
- `error_correction`: 纠错级别
|
||||
- `L`: 7% 容错
|
||||
- `M`: 15% 容错
|
||||
- `Q`: 25% 容错
|
||||
- `H`: 30% 容错(默认,推荐)
|
||||
|
||||
---
|
||||
|
||||
### 解析二维码(URL/base64)
|
||||
|
||||
**请求:**
|
||||
```http
|
||||
POST /api/workcase/qrcode/parse
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"image_source": "https://example.com/qrcode.png",
|
||||
"strategy": "auto"
|
||||
}
|
||||
```
|
||||
|
||||
**响应:**
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"message": "解析成功",
|
||||
"data": {
|
||||
"success": true,
|
||||
"content": "https://github.com",
|
||||
"strategy_used": "opencv_灰度图",
|
||||
"preprocessing_index": 1,
|
||||
"total_attempts": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**参数说明:**
|
||||
- `image_source`: 图片来源(必填)
|
||||
- URL: `https://...`
|
||||
- base64: `data:image/png;base64,...`
|
||||
- 本地路径: `/path/to/image.png`
|
||||
- `strategy`: 预处理策略
|
||||
- `basic`: 基础模式(2种)- 快速
|
||||
- `auto`: 自动模式(8种)- **推荐**
|
||||
- `enhanced`: 增强模式(8种)
|
||||
- `all`: 全部模式(11种)- 包括多尺度
|
||||
|
||||
---
|
||||
|
||||
### 解析二维码(文件上传)
|
||||
|
||||
**请求:**
|
||||
```http
|
||||
POST /api/workcase/qrcode/parse-file
|
||||
Content-Type: multipart/form-data
|
||||
|
||||
file: [二维码图片文件]
|
||||
strategy: auto
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 验证二维码内容
|
||||
|
||||
**请求:**
|
||||
```http
|
||||
POST /api/workcase/qrcode/validate
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"content": "要验证的内容",
|
||||
"max_length": 2953
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 预处理策略详解
|
||||
|
||||
### basic 模式(2种)
|
||||
1. **原图**
|
||||
2. **灰度图**
|
||||
|
||||
**适用场景:** 清晰二维码,追求速度
|
||||
|
||||
**性能:** 最快,< 100ms
|
||||
|
||||
### auto 模式(8种)⭐ 推荐
|
||||
1. **原图**
|
||||
2. **灰度图**
|
||||
3. **CLAHE 对比度增强** - 光照不均
|
||||
4. **自适应二值化** - 复杂背景
|
||||
5. **Otsu 二值化** - 自动阈值
|
||||
6. **去噪 + 二值化** - 模糊图片
|
||||
7. **锐化处理** - 增强边缘
|
||||
8. **形态学处理** - 修复断裂
|
||||
|
||||
**适用场景:**
|
||||
- 光照不均
|
||||
- 模糊/噪声
|
||||
- 低对比度
|
||||
- 轻微损坏
|
||||
|
||||
**性能:** 平衡,200-500ms
|
||||
|
||||
### all 模式(11种)
|
||||
在 auto 基础上增加多尺度:
|
||||
9. **0.5x 缩放**
|
||||
10. **1.5x 缩放**
|
||||
11. **2.0x 缩放**
|
||||
|
||||
**适用场景:**
|
||||
- 分辨率问题
|
||||
- 尺寸过小/过大
|
||||
|
||||
**性能:** 较慢,500-1000ms
|
||||
|
||||
---
|
||||
|
||||
## 服务层使用(Python)
|
||||
|
||||
```python
|
||||
from app.services.workcase.qrcode import QrCodeService
|
||||
|
||||
# 初始化服务
|
||||
service = QrCodeService()
|
||||
|
||||
# 生成二维码
|
||||
result = await service.generate_qrcode(
|
||||
content="https://github.com",
|
||||
size=300,
|
||||
error_correction="H"
|
||||
)
|
||||
print(result["image"]) # base64 图片
|
||||
|
||||
# 解析二维码
|
||||
result = await service.parse_qrcode(
|
||||
image_source="https://example.com/qr.png",
|
||||
strategy="auto"
|
||||
)
|
||||
print(result["content"]) # 解析结果
|
||||
|
||||
# 验证内容
|
||||
result = service.validate_qrcode_content("测试内容")
|
||||
print(result["valid"]) # True/False
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 性能优化建议
|
||||
|
||||
### 提高识别速度
|
||||
1. 使用 `basic` 策略(清晰图片场景)
|
||||
2. 调整图片大小到 300-500px
|
||||
3. 预先转换为灰度图
|
||||
|
||||
### 提高识别率
|
||||
1. 使用 `auto` 或 `all` 策略
|
||||
2. 确保图片分辨率足够(二维码 ≥ 100x100px)
|
||||
3. 提高二维码纠错级别(使用 H 级)
|
||||
|
||||
### 批量处理
|
||||
```python
|
||||
import asyncio
|
||||
|
||||
async def batch_parse(image_sources):
|
||||
service = QrCodeService()
|
||||
tasks = [
|
||||
service.parse_qrcode(src, strategy="basic")
|
||||
for src in image_sources
|
||||
]
|
||||
return await asyncio.gather(*tasks)
|
||||
|
||||
# 使用
|
||||
results = await batch_parse([
|
||||
"https://example.com/qr1.png",
|
||||
"https://example.com/qr2.png",
|
||||
"https://example.com/qr3.png"
|
||||
])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 为什么选择纯 OpenCV 方案?
|
||||
|
||||
### 技术优势
|
||||
|
||||
| 特性 | OpenCV | pyzbar | 说明 |
|
||||
|------|--------|---------|------|
|
||||
| **跨平台** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | OpenCV 无需额外配置 |
|
||||
| **Windows 友好** | ⭐⭐⭐⭐⭐ | ⭐⭐ | pyzbar 需要手动安装 DLL |
|
||||
| **安装难度** | ⭐⭐⭐⭐⭐ | ⭐⭐ | pip install 即可 vs 需要 libzbar |
|
||||
| **识别率(清晰)** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | 相当 |
|
||||
| **识别率(模糊)** | ⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | 配合预处理差距不大 |
|
||||
| **稳定性** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | OpenCV 更成熟 |
|
||||
| **维护性** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | 依赖少,问题少 |
|
||||
|
||||
### 工程实践建议
|
||||
|
||||
✅ **推荐使用 OpenCV**,因为:
|
||||
1. **无依赖地狱** - 不用担心 Windows DLL、Linux .so 问题
|
||||
2. **企业级稳定** - OpenCV 由 Intel 支持,久经考验
|
||||
3. **预处理补偿** - 8种预处理策略让识别率不输 pyzbar
|
||||
4. **运维友好** - CI/CD、Docker 部署零配置
|
||||
5. **团队协作** - 新成员 5 分钟即可搭建环境
|
||||
|
||||
❌ **不推荐 pyzbar**,除非:
|
||||
1. 你只在 Linux 服务器部署
|
||||
2. 需要识别多种条码格式(EAN、Code128 等)
|
||||
3. 有专人负责处理依赖问题
|
||||
|
||||
---
|
||||
|
||||
## 常见问题
|
||||
|
||||
### Q1: 识别率怎么样?
|
||||
|
||||
**答:** 配合预处理策略,识别率可达 95%+
|
||||
- 清晰二维码:99%+
|
||||
- 轻度模糊:95%+
|
||||
- 中度模糊:85%+
|
||||
- 重度损坏:60%+
|
||||
|
||||
### Q2: 比 pyzbar 差多少?
|
||||
|
||||
**答:** 清晰图片无差异,模糊图片差距 < 5%
|
||||
- 对于大部分应用场景,差异可忽略
|
||||
- 配合 `all` 策略可进一步缩小差距
|
||||
|
||||
### Q3: 解析速度如何?
|
||||
|
||||
**答:**
|
||||
- basic: 50-100ms
|
||||
- auto: 200-500ms
|
||||
- all: 500-1000ms
|
||||
|
||||
根据场景选择合适策略即可。
|
||||
|
||||
### Q4: 支持哪些图片格式?
|
||||
|
||||
**答:** 支持所有 OpenCV 支持的格式
|
||||
- PNG、JPG、JPEG、BMP、WebP、TIFF 等
|
||||
|
||||
### Q5: 如何提高识别成功率?
|
||||
|
||||
**答:**
|
||||
1. 生成时使用 H 级纠错(30% 容错)
|
||||
2. 解析时使用 `auto` 或 `all` 策略
|
||||
3. 确保二维码尺寸 ≥ 100x100px
|
||||
4. 避免过度压缩图片
|
||||
|
||||
---
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
app/services/workcase/qrcode/
|
||||
├── __init__.py # 模块导出
|
||||
├── QrCode.py # 核心处理器(OpenCV QRCodeDetector)
|
||||
├── QrCodeService.py # 业务逻辑层
|
||||
└── test_qrcode.py # 测试脚本
|
||||
|
||||
docs/
|
||||
└── qrcode_service_readme.md # 本文档
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 技术栈
|
||||
|
||||
- **qrcode** - 二维码生成
|
||||
- **Pillow (PIL)** - 图像处理
|
||||
- **OpenCV** - 图像预处理和解码
|
||||
- **httpx** - 异步HTTP客户端
|
||||
- **numpy** - 数组处理
|
||||
|
||||
---
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT License
|
||||
|
||||
---
|
||||
|
||||
## 更新日志
|
||||
|
||||
### v1.1.0 (2025-12-30)
|
||||
- 🔥 **完全移除 pyzbar 依赖**
|
||||
- ✨ 采用纯 OpenCV QRCodeDetector 方案
|
||||
- ⚡ 优化预处理策略命名
|
||||
- 📝 简化文档和安装流程
|
||||
- 🎯 企业级稳定性提升
|
||||
|
||||
### v1.0.0 (2025-12-30)
|
||||
- ✨ 初始版本发布
|
||||
- ✨ 双引擎解码支持(已废弃)
|
||||
- ✨ 8种预处理策略
|
||||
@@ -1,14 +0,0 @@
|
||||
fastapi
|
||||
pydantic
|
||||
pydantic-settings
|
||||
python-dotenv
|
||||
redis
|
||||
anyio>=4.5
|
||||
uvicorn[standard]>=0.31.1
|
||||
|
||||
# 二维码处理
|
||||
qrcode>=7.4.2
|
||||
pillow>=10.0.0
|
||||
opencv-python-headless>=4.8.0
|
||||
numpy>=1.24.0
|
||||
httpx>=0.27.0
|
||||
@@ -1,12 +0,0 @@
|
||||
import uvicorn
|
||||
from app.main import app
|
||||
from app.config import settings
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(
|
||||
app,
|
||||
host="0.0.0.0",
|
||||
port=settings.PORT,
|
||||
reload=False,
|
||||
workers=1
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""验证二维码服务(纯 OpenCV 方案)"""
|
||||
import sys
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from app.services.workcase.qrcode import QrCodeService
|
||||
print("✓ 二维码服务导入成功")
|
||||
|
||||
async def test():
|
||||
service = QrCodeService()
|
||||
|
||||
# 测试生成
|
||||
result = await service.generate_qrcode("https://github.com", size=300)
|
||||
if result["success"]:
|
||||
print("✓ 二维码生成成功")
|
||||
|
||||
# 测试解析
|
||||
parse_result = await service.parse_qrcode(result["image"], strategy="auto")
|
||||
if parse_result["success"]:
|
||||
print(f"✓ 二维码解析成功: {parse_result['content']}")
|
||||
print(f" 使用策略: {parse_result['strategy_used']}")
|
||||
print(f" 尝试次数: {parse_result['total_attempts']}")
|
||||
else:
|
||||
print(f"✗ 解析失败: {parse_result.get('error', '未知错误')}")
|
||||
else:
|
||||
print(f"✗ 生成失败: {result.get('error', '未知错误')}")
|
||||
|
||||
asyncio.run(test())
|
||||
print("\n✅ 所有测试通过 - 纯 OpenCV 方案运行正常")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ 错误: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
42
docker/.env.example
Normal file
42
docker/.env.example
Normal file
@@ -0,0 +1,42 @@
|
||||
# ================================================
|
||||
# Urban Lifeline - Docker 环境变量配置
|
||||
# 复制此文件为 .env 并修改配置
|
||||
# ================================================
|
||||
|
||||
# 镜像版本
|
||||
IMAGE_VERSION=latest
|
||||
|
||||
# 数据存储根目录
|
||||
DATA_ROOT=./volumes
|
||||
LOG_ROOT=./volumes/logs
|
||||
|
||||
# ====================== MySQL 配置 ======================
|
||||
MYSQL_HOST=host.docker.internal
|
||||
MYSQL_PORT=3306
|
||||
MYSQL_USER=root
|
||||
MYSQL_PASSWORD=123456
|
||||
|
||||
# ====================== Nacos 配置 ======================
|
||||
NACOS_SERVER_ADDR=nacos:8848
|
||||
NACOS_NAMESPACE=
|
||||
NACOS_AUTH_TOKEN=ZlRkR2ZxR3BvZ1F0a3JxY2V6RUx2cUh1Rkx6V1ZQbE9kUVd1R1VOcWFFS2t3dG5hS0E9PQ==
|
||||
|
||||
# ====================== MinIO 配置 ======================
|
||||
MINIO_ROOT_USER=minioadmin
|
||||
MINIO_ROOT_PASSWORD=minioadmin123
|
||||
MINIO_ENDPOINT=http://minio:9000
|
||||
|
||||
# ====================== Jitsi 配置 ======================
|
||||
JITSI_PUBLIC_URL=https://org.xyzh.yslg.jitsi
|
||||
JVB_HOST_ADDRESS=192.168.0.253
|
||||
JWT_APP_ID=urbanLifeline
|
||||
JWT_APP_SECRET=urbanLifeline-jitsi-secret-key-2025-production-safe-hs256
|
||||
|
||||
# ====================== Dify 配置 ======================
|
||||
DIFY_API_URL=http://dify-api:5001
|
||||
|
||||
# ====================== Spring 配置 ======================
|
||||
SPRING_PROFILES_ACTIVE=prod
|
||||
|
||||
# ====================== API 配置 ======================
|
||||
API_BASE_URL=http://gateway:8080
|
||||
318
docker/Makefile
Normal file
318
docker/Makefile
Normal file
@@ -0,0 +1,318 @@
|
||||
# ================================================
|
||||
# Urban Lifeline - Docker 构建管理 Makefile
|
||||
# ================================================
|
||||
# 使用方法:
|
||||
# make help - 显示帮助信息
|
||||
# make build-all - 构建所有镜像
|
||||
# make build-serv - 构建所有后端服务镜像
|
||||
# make build-gateway - 构建单个服务镜像
|
||||
# make save-all - 导出所有镜像
|
||||
# make up - 启动所有服务
|
||||
# make down - 停止所有服务
|
||||
# ================================================
|
||||
|
||||
# 项目根目录(docker 目录的上级)
|
||||
PROJECT_ROOT := $(shell cd .. && pwd)
|
||||
DOCKER_DIR := $(shell pwd)
|
||||
|
||||
# 镜像版本
|
||||
IMAGE_VERSION ?= $(shell date +%Y%m%d_%H%M%S)
|
||||
IMAGE_REGISTRY ?=
|
||||
|
||||
# 服务列表
|
||||
SERV_SERVICES := gateway system auth file ai workcase
|
||||
WEB_SERVICES := platform workcase-web
|
||||
|
||||
# 服务端口映射
|
||||
PORT_gateway := 8080
|
||||
PORT_system := 8082
|
||||
PORT_auth := 8081
|
||||
PORT_file := 8084
|
||||
PORT_ai := 8090
|
||||
PORT_workcase := 8088
|
||||
|
||||
# 输出目录
|
||||
BUILD_OUTPUT := $(DOCKER_DIR)/output
|
||||
|
||||
# 颜色定义
|
||||
COLOR_RESET := \033[0m
|
||||
COLOR_GREEN := \033[0;32m
|
||||
COLOR_YELLOW := \033[0;33m
|
||||
COLOR_BLUE := \033[0;34m
|
||||
COLOR_RED := \033[0;31m
|
||||
|
||||
# ================================================
|
||||
# 帮助信息
|
||||
# ================================================
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
|
||||
@echo "$(COLOR_BLUE) Urban Lifeline Docker 构建管理$(COLOR_RESET)"
|
||||
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
|
||||
@echo ""
|
||||
@echo "$(COLOR_GREEN)构建命令:$(COLOR_RESET)"
|
||||
@echo " make build-base - 构建基础镜像"
|
||||
@echo " make build-all - 构建所有镜像"
|
||||
@echo " make build-serv - 构建所有后端服务镜像"
|
||||
@echo " make build-web - 构建前端镜像"
|
||||
@echo " make build-<service> - 构建单个服务 (gateway/system/auth/file/ai/workcase)"
|
||||
@echo ""
|
||||
@echo "$(COLOR_GREEN)编译命令:$(COLOR_RESET)"
|
||||
@echo " make compile-serv - 编译所有后端服务"
|
||||
@echo " make compile-<service> - 编译单个后端服务"
|
||||
@echo " make compile-web - 编译所有前端项目"
|
||||
@echo ""
|
||||
@echo "$(COLOR_GREEN)导出命令:$(COLOR_RESET)"
|
||||
@echo " make save-all - 导出所有镜像到 tar 文件"
|
||||
@echo " make save-serv - 导出所有后端镜像"
|
||||
@echo " make save-<service> - 导出单个服务镜像"
|
||||
@echo ""
|
||||
@echo "$(COLOR_GREEN)运行命令:$(COLOR_RESET)"
|
||||
@echo " make up - 启动所有服务"
|
||||
@echo " make up-infra - 启动基础设施 (nacos/minio/jitsi)"
|
||||
@echo " make up-serv - 启动后端服务"
|
||||
@echo " make up-web - 启动前端服务"
|
||||
@echo " make down - 停止所有服务"
|
||||
@echo " make logs - 查看所有日志"
|
||||
@echo " make status - 查看服务状态"
|
||||
@echo ""
|
||||
@echo "$(COLOR_GREEN)清理命令:$(COLOR_RESET)"
|
||||
@echo " make clean - 清理构建产物"
|
||||
@echo " make clean-images - 清理 Docker 镜像"
|
||||
@echo ""
|
||||
@echo "$(COLOR_YELLOW)当前版本: $(IMAGE_VERSION)$(COLOR_RESET)"
|
||||
@echo ""
|
||||
|
||||
# ================================================
|
||||
# 初始化
|
||||
# ================================================
|
||||
.PHONY: init
|
||||
init:
|
||||
@mkdir -p $(BUILD_OUTPUT)
|
||||
@mkdir -p $(DOCKER_DIR)/volumes/logs
|
||||
|
||||
# ================================================
|
||||
# 编译后端服务
|
||||
# ================================================
|
||||
.PHONY: compile-serv
|
||||
compile-serv:
|
||||
@echo "$(COLOR_YELLOW)编译所有后端服务...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT)/urbanLifelineServ && mvn clean package -DskipTests
|
||||
@echo "$(COLOR_GREEN)✓ 后端服务编译完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: $(addprefix compile-,$(SERV_SERVICES))
|
||||
$(addprefix compile-,$(SERV_SERVICES)): compile-%:
|
||||
@echo "$(COLOR_YELLOW)编译 $* 服务...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT)/urbanLifelineServ && mvn clean package -DskipTests -pl $* -am
|
||||
@echo "$(COLOR_GREEN)✓ $* 服务编译完成$(COLOR_RESET)"
|
||||
|
||||
# ================================================
|
||||
# 编译前端项目
|
||||
# ================================================
|
||||
.PHONY: compile-web
|
||||
compile-web:
|
||||
@echo "$(COLOR_YELLOW)编译前端项目...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT)/urbanLifelineWeb && pnpm install && pnpm run build:all
|
||||
@echo "$(COLOR_GREEN)✓ 前端项目编译完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: compile-platform
|
||||
compile-platform:
|
||||
@echo "$(COLOR_YELLOW)编译 platform 项目...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT)/urbanLifelineWeb/packages/platform && pnpm run build
|
||||
@echo "$(COLOR_GREEN)✓ platform 编译完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: compile-workcase
|
||||
compile-workcase-web:
|
||||
@echo "$(COLOR_YELLOW)编译 workcase 项目...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT)/urbanLifelineWeb/packages/workcase && pnpm run build
|
||||
@echo "$(COLOR_GREEN)✓ workcase 编译完成$(COLOR_RESET)"
|
||||
|
||||
# ================================================
|
||||
# 构建 Docker 镜像
|
||||
# ================================================
|
||||
.PHONY: build-base
|
||||
build-base:
|
||||
@echo "$(COLOR_YELLOW)构建基础镜像...$(COLOR_RESET)"
|
||||
@cd $(PROJECT_ROOT) && docker build \
|
||||
-t urban-lifeline-base-serv:$(IMAGE_VERSION) \
|
||||
-t urban-lifeline-base-serv:latest \
|
||||
-f docker/urbanLifeline/serv/Dockerfile.base .
|
||||
@echo "$(COLOR_GREEN)✓ 基础镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: build-all
|
||||
build-all: build-base build-serv build-web
|
||||
@echo "$(COLOR_GREEN)✓ 所有镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: build-serv
|
||||
build-serv: $(addprefix build-,$(SERV_SERVICES))
|
||||
@echo "$(COLOR_GREEN)✓ 所有后端服务镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
# 构建单个后端服务镜像
|
||||
.PHONY: $(addprefix build-,$(SERV_SERVICES))
|
||||
$(addprefix build-,$(SERV_SERVICES)): build-%:
|
||||
@echo "$(COLOR_YELLOW)构建 $* 服务镜像...$(COLOR_RESET)"
|
||||
@if [ ! -f "$(PROJECT_ROOT)/urbanLifelineServ/$*/target/$*-*.jar" ] && \
|
||||
[ ! -f "$(PROJECT_ROOT)/urbanLifelineServ/$*/target/*.jar" ]; then \
|
||||
echo "$(COLOR_RED)✗ JAR 包不存在,请先执行 make compile-$*$(COLOR_RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@cd $(PROJECT_ROOT) && docker build \
|
||||
--build-arg SERVICE_NAME=$* \
|
||||
--build-arg SERVICE_PORT=$(PORT_$*) \
|
||||
-t urban-lifeline-$*:$(IMAGE_VERSION) \
|
||||
-t urban-lifeline-$*:latest \
|
||||
-f docker/urbanLifeline/serv/Dockerfile.template .
|
||||
@echo "$(COLOR_GREEN)✓ $* 镜像构建完成: urban-lifeline-$*:$(IMAGE_VERSION)$(COLOR_RESET)"
|
||||
|
||||
.PHONY: build-web
|
||||
build-web: build-platform build-workcase-web
|
||||
@echo "$(COLOR_GREEN)✓ 所有前端镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: build-platform
|
||||
build-platform:
|
||||
@echo "$(COLOR_YELLOW)构建 platform 镜像...$(COLOR_RESET)"
|
||||
@if [ ! -d "$(PROJECT_ROOT)/urbanLifelineWeb/packages/platform/dist" ]; then \
|
||||
echo "$(COLOR_RED)✗ platform dist 不存在,请先执行 make compile-web$(COLOR_RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@cd $(PROJECT_ROOT) && docker build \
|
||||
--build-arg WEB_NAME=platform \
|
||||
-t urban-lifeline-platform:$(IMAGE_VERSION) \
|
||||
-t urban-lifeline-platform:latest \
|
||||
-f docker/urbanLifeline/web/Dockerfile .
|
||||
@echo "$(COLOR_GREEN)✓ platform 镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: build-workcase-web
|
||||
build-workcase-web:
|
||||
@echo "$(COLOR_YELLOW)构建 workcase-web 镜像...$(COLOR_RESET)"
|
||||
@if [ ! -d "$(PROJECT_ROOT)/urbanLifelineWeb/packages/workcase/dist" ]; then \
|
||||
echo "$(COLOR_RED)✗ workcase dist 不存在,请先执行 make compile-web$(COLOR_RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@cd $(PROJECT_ROOT) && docker build \
|
||||
--build-arg WEB_NAME=workcase \
|
||||
-t urban-lifeline-workcase-web:$(IMAGE_VERSION) \
|
||||
-t urban-lifeline-workcase-web:latest \
|
||||
-f docker/urbanLifeline/web/Dockerfile .
|
||||
@echo "$(COLOR_GREEN)✓ workcase-web 镜像构建完成$(COLOR_RESET)"
|
||||
|
||||
# ================================================
|
||||
# 导出镜像
|
||||
# ================================================
|
||||
.PHONY: save-all
|
||||
save-all: init save-serv save-web
|
||||
@echo "$(COLOR_GREEN)✓ 所有镜像导出完成$(COLOR_RESET)"
|
||||
@echo "输出目录: $(BUILD_OUTPUT)"
|
||||
@ls -lh $(BUILD_OUTPUT)/*.tar 2>/dev/null || true
|
||||
|
||||
.PHONY: save-serv
|
||||
save-serv: $(addprefix save-,$(SERV_SERVICES))
|
||||
@echo "$(COLOR_GREEN)✓ 所有后端镜像导出完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: $(addprefix save-,$(SERV_SERVICES))
|
||||
$(addprefix save-,$(SERV_SERVICES)): save-%: init
|
||||
@echo "$(COLOR_YELLOW)导出 $* 镜像...$(COLOR_RESET)"
|
||||
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-$*_$(IMAGE_VERSION).tar \
|
||||
urban-lifeline-$*:$(IMAGE_VERSION)
|
||||
@echo "$(COLOR_GREEN)✓ $* 镜像已导出: $(BUILD_OUTPUT)/urban-lifeline-$*_$(IMAGE_VERSION).tar$(COLOR_RESET)"
|
||||
|
||||
.PHONY: save-web
|
||||
save-web: save-platform save-workcase-web
|
||||
@echo "$(COLOR_GREEN)✓ 所有前端镜像导出完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: save-platform
|
||||
save-platform: init
|
||||
@echo "$(COLOR_YELLOW)导出 platform 镜像...$(COLOR_RESET)"
|
||||
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-platform_$(IMAGE_VERSION).tar \
|
||||
urban-lifeline-platform:$(IMAGE_VERSION)
|
||||
@echo "$(COLOR_GREEN)✓ platform 镜像已导出$(COLOR_RESET)"
|
||||
|
||||
.PHONY: save-workcase-web
|
||||
save-workcase-web: init
|
||||
@echo "$(COLOR_YELLOW)导出 workcase-web 镜像...$(COLOR_RESET)"
|
||||
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-workcase-web_$(IMAGE_VERSION).tar \
|
||||
urban-lifeline-workcase-web:$(IMAGE_VERSION)
|
||||
@echo "$(COLOR_GREEN)✓ workcase-web 镜像已导出$(COLOR_RESET)"
|
||||
|
||||
.PHONY: save-base
|
||||
save-base: init
|
||||
@echo "$(COLOR_YELLOW)导出基础镜像...$(COLOR_RESET)"
|
||||
@docker save -o $(BUILD_OUTPUT)/urban-lifeline-base-serv_$(IMAGE_VERSION).tar \
|
||||
urban-lifeline-base-serv:$(IMAGE_VERSION)
|
||||
@echo "$(COLOR_GREEN)✓ 基础镜像已导出$(COLOR_RESET)"
|
||||
|
||||
# ================================================
|
||||
# Docker Compose 操作
|
||||
# ================================================
|
||||
.PHONY: up
|
||||
up:
|
||||
@echo "$(COLOR_YELLOW)启动所有服务...$(COLOR_RESET)"
|
||||
@docker compose --profile all up -d
|
||||
@echo "$(COLOR_GREEN)✓ 所有服务已启动$(COLOR_RESET)"
|
||||
|
||||
.PHONY: up-infra
|
||||
up-infra:
|
||||
@echo "$(COLOR_YELLOW)启动基础设施...$(COLOR_RESET)"
|
||||
@docker compose --profile infra up -d
|
||||
@echo "$(COLOR_GREEN)✓ 基础设施已启动$(COLOR_RESET)"
|
||||
|
||||
.PHONY: up-serv
|
||||
up-serv:
|
||||
@echo "$(COLOR_YELLOW)启动后端服务...$(COLOR_RESET)"
|
||||
@docker compose --profile serv up -d
|
||||
@echo "$(COLOR_GREEN)✓ 后端服务已启动$(COLOR_RESET)"
|
||||
|
||||
.PHONY: up-web
|
||||
up-web:
|
||||
@echo "$(COLOR_YELLOW)前端已集成到 Nginx (infra),请使用 make up-infra$(COLOR_RESET)"
|
||||
|
||||
.PHONY: down
|
||||
down:
|
||||
@echo "$(COLOR_YELLOW)停止所有服务...$(COLOR_RESET)"
|
||||
@docker compose --profile all down
|
||||
@echo "$(COLOR_GREEN)✓ 所有服务已停止$(COLOR_RESET)"
|
||||
|
||||
.PHONY: restart
|
||||
restart: down up
|
||||
@echo "$(COLOR_GREEN)✓ 所有服务已重启$(COLOR_RESET)"
|
||||
|
||||
.PHONY: logs
|
||||
logs:
|
||||
@docker compose logs -f
|
||||
|
||||
.PHONY: status
|
||||
status:
|
||||
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
|
||||
@echo "$(COLOR_BLUE) 服务运行状态$(COLOR_RESET)"
|
||||
@echo "$(COLOR_BLUE)=============================================$(COLOR_RESET)"
|
||||
@docker compose ps
|
||||
|
||||
# ================================================
|
||||
# 清理
|
||||
# ================================================
|
||||
.PHONY: clean
|
||||
clean:
|
||||
@echo "$(COLOR_YELLOW)清理构建产物...$(COLOR_RESET)"
|
||||
@rm -rf $(BUILD_OUTPUT)
|
||||
@echo "$(COLOR_GREEN)✓ 清理完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: clean-images
|
||||
clean-images:
|
||||
@echo "$(COLOR_YELLOW)清理 Docker 镜像...$(COLOR_RESET)"
|
||||
@docker images | grep "urban-lifeline" | awk '{print $$3}' | xargs -r docker rmi -f
|
||||
@echo "$(COLOR_GREEN)✓ 镜像清理完成$(COLOR_RESET)"
|
||||
|
||||
# ================================================
|
||||
# 快捷命令
|
||||
# ================================================
|
||||
.PHONY: full-build
|
||||
full-build: compile-serv compile-web build-all
|
||||
@echo "$(COLOR_GREEN)✓ 完整构建完成$(COLOR_RESET)"
|
||||
|
||||
.PHONY: full-deploy
|
||||
full-deploy: full-build save-all
|
||||
@echo "$(COLOR_GREEN)✓ 完整部署包准备完成$(COLOR_RESET)"
|
||||
@echo "输出目录: $(BUILD_OUTPUT)"
|
||||
|
||||
# 默认目标
|
||||
.DEFAULT_GOAL := help
|
||||
188
docker/README.md
Normal file
188
docker/README.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# Urban Lifeline Docker 部署指南
|
||||
|
||||
## 目录结构
|
||||
|
||||
```
|
||||
docker/
|
||||
├── docker-compose.yml # 总控 compose 文件
|
||||
├── Makefile # 构建管理脚本
|
||||
├── .env.example # 环境变量模板
|
||||
├── README.md # 本文档
|
||||
│
|
||||
├── infra/ # Level 1: 基础设施
|
||||
│ └── docker-compose.yml # Nacos, MinIO, Jitsi Meet
|
||||
│
|
||||
├── urbanLifeline/
|
||||
│ ├── serv/ # Level 2: 后端服务
|
||||
│ │ ├── docker-compose.yml
|
||||
│ │ ├── Dockerfile.base # 基础镜像
|
||||
│ │ ├── Dockerfile.template # 服务镜像模板
|
||||
│ │ └── start.sh # 启动脚本
|
||||
│ │
|
||||
│ └── web/ # Level 3: 前端服务
|
||||
│ ├── docker-compose.yml
|
||||
│ ├── Dockerfile
|
||||
│ └── nginx/ # Nginx 配置
|
||||
│ ├── nginx.conf
|
||||
│ └── conf.d/
|
||||
│
|
||||
├── dify/ # Dify AI 平台(独立管理)
|
||||
│ └── docker-compose.yaml
|
||||
│
|
||||
├── volumes/ # 数据卷目录
|
||||
│ ├── nacos/
|
||||
│ ├── minio/
|
||||
│ ├── jitsi/
|
||||
│ └── logs/
|
||||
│
|
||||
└── output/ # 镜像导出目录
|
||||
```
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 1. 准备环境
|
||||
|
||||
```bash
|
||||
# 复制环境变量配置
|
||||
cp .env.example .env
|
||||
|
||||
# 编辑配置(根据实际环境修改)
|
||||
vim .env
|
||||
```
|
||||
|
||||
### 2. 编译项目
|
||||
|
||||
```bash
|
||||
# 编译所有后端服务
|
||||
make compile-serv
|
||||
|
||||
# 编译所有前端项目
|
||||
make compile-web
|
||||
|
||||
# 或者一次性编译所有
|
||||
make full-build
|
||||
```
|
||||
|
||||
### 3. 构建镜像
|
||||
|
||||
```bash
|
||||
# 构建基础镜像(首次需要)
|
||||
make build-base
|
||||
|
||||
# 构建所有镜像
|
||||
make build-all
|
||||
|
||||
# 或者单独构建
|
||||
make build-gateway
|
||||
make build-system
|
||||
make build-web
|
||||
```
|
||||
|
||||
### 4. 启动服务
|
||||
|
||||
```bash
|
||||
# 启动所有服务
|
||||
make up
|
||||
|
||||
# 或者分层启动
|
||||
make up-infra # 先启动基础设施
|
||||
make up-serv # 再启动后端服务
|
||||
make up-web # 最后启动前端
|
||||
```
|
||||
|
||||
### 5. 查看状态
|
||||
|
||||
```bash
|
||||
# 查看服务状态
|
||||
make status
|
||||
|
||||
# 查看日志
|
||||
make logs
|
||||
|
||||
# 查看特定服务日志
|
||||
docker compose logs -f gateway
|
||||
```
|
||||
|
||||
## 服务层级
|
||||
|
||||
### Level 1: 基础设施 (infra)
|
||||
- **Nginx**: 反向代理 + 前端静态资源 (80/443)
|
||||
- **Nacos**: 服务注册与配置中心 (8848)
|
||||
- **MinIO**: 对象存储服务 (9000/9001)
|
||||
- **Jitsi Meet**: 视频会议服务 (8280)
|
||||
|
||||
### Level 2: 后端服务 (serv)
|
||||
启动顺序: gateway → system → file → auth → ai → workcase
|
||||
|
||||
| 服务 | 端口 | 说明 |
|
||||
|------|------|------|
|
||||
| gateway | 8080 | API 网关 |
|
||||
| system | 8082 | 系统服务 |
|
||||
| file | 8084 | 文件服务 |
|
||||
| auth | 8081 | 认证服务 |
|
||||
| ai | 8090 | AI 服务 |
|
||||
| workcase | 8088 | 工单服务 |
|
||||
|
||||
### Level 3: 前端应用 (web)
|
||||
前端静态资源已打包到 Nginx 镜像中,由 infra 层的 Nginx 服务提供:
|
||||
- /platform - 管理平台
|
||||
- /workcase - 工单系统
|
||||
|
||||
## 常用命令
|
||||
|
||||
```bash
|
||||
# 构建
|
||||
make build-all # 构建所有镜像
|
||||
make build-gateway # 构建单个服务
|
||||
|
||||
# 运行
|
||||
make up # 启动所有
|
||||
make up-infra # 只启动基础设施
|
||||
make down # 停止所有
|
||||
|
||||
# 导出
|
||||
make save-all # 导出所有镜像
|
||||
make save-gateway # 导出单个镜像
|
||||
|
||||
# 清理
|
||||
make clean # 清理构建产物
|
||||
make clean-images # 清理 Docker 镜像
|
||||
```
|
||||
|
||||
## 单独管理子服务
|
||||
|
||||
每个子目录都有独立的 docker-compose.yml,可以单独管理:
|
||||
|
||||
```bash
|
||||
# 只管理基础设施
|
||||
cd infra
|
||||
docker compose up -d nacos
|
||||
docker compose logs -f nacos
|
||||
|
||||
# 只管理后端服务
|
||||
cd urbanLifeline/serv
|
||||
docker compose up -d gateway
|
||||
docker compose restart system
|
||||
```
|
||||
|
||||
## 镜像导出与部署
|
||||
|
||||
```bash
|
||||
# 导出所有镜像
|
||||
make save-all
|
||||
|
||||
# 镜像文件在 output/ 目录
|
||||
ls -la output/
|
||||
|
||||
# 在目标服务器加载镜像
|
||||
docker load -i urban-lifeline-gateway_20251228_120000.tar
|
||||
docker load -i urban-lifeline-web_20251228_120000.tar
|
||||
```
|
||||
|
||||
## 注意事项
|
||||
|
||||
1. **首次部署**需要先构建基础镜像: `make build-base`
|
||||
2. **MySQL** 需要在宿主机或单独容器中运行,配置 `MYSQL_HOST`
|
||||
3. **Dify** 有独立的 docker-compose,在 `dify/` 目录单独管理
|
||||
4. 修改 `.env` 后需要重启服务才能生效
|
||||
5. 生产环境建议配置 SSL 证书
|
||||
381
docker/build.sh
Normal file
381
docker/build.sh
Normal file
@@ -0,0 +1,381 @@
|
||||
#!/bin/bash
|
||||
|
||||
##############################################
|
||||
# Urban Lifeline - 构建和打包脚本
|
||||
#
|
||||
# 功能:
|
||||
# 1. 编译后端/前端项目
|
||||
# 2. 构建 Docker 镜像
|
||||
# 3. 导出镜像到 tar 文件
|
||||
#
|
||||
# 使用方法:
|
||||
# ./build.sh [target] [options]
|
||||
#
|
||||
# 目标(target):
|
||||
# base - 构建基础镜像
|
||||
# serv - 构建所有后端服务镜像
|
||||
# web - 构建前端镜像
|
||||
# all - 构建所有镜像(默认)
|
||||
# gateway - 构建单个后端服务
|
||||
# system/auth/file/ai/workcase - 同上
|
||||
#
|
||||
# 选项(options):
|
||||
# compile - 先编译代码再构建镜像
|
||||
# save - 构建后导出镜像到 tar 文件
|
||||
# save=VERSION - 导出指定版本的镜像
|
||||
#
|
||||
# 示例:
|
||||
# ./build.sh all compile save # 编译+构建+导出所有
|
||||
# ./build.sh gateway compile # 编译+构建 gateway
|
||||
# ./build.sh serv save # 构建+导出所有后端
|
||||
# ./build.sh web compile save # 编译+构建+导出前端
|
||||
##############################################
|
||||
|
||||
set -e
|
||||
|
||||
# 颜色定义
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
log_step() { echo -e "${BLUE}[STEP]${NC} $1"; }
|
||||
|
||||
# 项目路径
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
PROJECT_ROOT=$(cd "${SCRIPT_DIR}/.." && pwd)
|
||||
DOCKER_DIR="${SCRIPT_DIR}"
|
||||
BUILD_OUTPUT="${DOCKER_DIR}/output"
|
||||
IMAGE_VERSION=$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# 服务列表
|
||||
SERV_SERVICES="gateway system auth file ai workcase"
|
||||
|
||||
# 服务端口映射
|
||||
declare -A SERVICE_PORTS=(
|
||||
["gateway"]=8080
|
||||
["system"]=8082
|
||||
["auth"]=8081
|
||||
["file"]=8084
|
||||
["ai"]=8090
|
||||
["workcase"]=8088
|
||||
)
|
||||
|
||||
# 解析参数
|
||||
BUILD_TARGET="${1:-all}"
|
||||
shift || true
|
||||
|
||||
DO_COMPILE=false
|
||||
DO_SAVE=false
|
||||
SAVE_VERSION=""
|
||||
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
compile)
|
||||
DO_COMPILE=true
|
||||
;;
|
||||
save)
|
||||
DO_SAVE=true
|
||||
;;
|
||||
save=*)
|
||||
DO_SAVE=true
|
||||
SAVE_VERSION="${arg#save=}"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# 确定保存版本
|
||||
if [ "${DO_SAVE}" = true ] && [ -z "${SAVE_VERSION}" ]; then
|
||||
SAVE_VERSION="${IMAGE_VERSION}"
|
||||
fi
|
||||
|
||||
echo "=========================================="
|
||||
echo "Urban Lifeline - 构建脚本"
|
||||
echo "=========================================="
|
||||
log_info "构建目标: ${BUILD_TARGET}"
|
||||
log_info "构建版本: ${IMAGE_VERSION}"
|
||||
log_info "编译代码: ${DO_COMPILE}"
|
||||
log_info "保存镜像: ${DO_SAVE}"
|
||||
[ "${DO_SAVE}" = true ] && log_info "保存版本: ${SAVE_VERSION}"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
mkdir -p "${BUILD_OUTPUT}"
|
||||
|
||||
# ================================================
|
||||
# 编译函数
|
||||
# ================================================
|
||||
|
||||
compile_serv_all() {
|
||||
log_step "编译所有后端服务"
|
||||
cd "${PROJECT_ROOT}/urbanLifelineServ"
|
||||
mvn clean package -DskipTests
|
||||
log_info "✅ 后端服务编译完成"
|
||||
}
|
||||
|
||||
compile_serv_single() {
|
||||
local service=$1
|
||||
log_step "编译 ${service} 服务"
|
||||
cd "${PROJECT_ROOT}/urbanLifelineServ"
|
||||
mvn clean package -DskipTests -pl ${service} -am
|
||||
log_info "✅ ${service} 服务编译完成"
|
||||
}
|
||||
|
||||
compile_web() {
|
||||
log_step "编译前端项目"
|
||||
cd "${PROJECT_ROOT}/urbanLifelineWeb"
|
||||
|
||||
# 检查 pnpm
|
||||
if command -v pnpm &> /dev/null; then
|
||||
pnpm install
|
||||
pnpm run build:all 2>/dev/null || {
|
||||
# 如果没有 build:all 脚本,分别构建
|
||||
log_info "分别构建各前端项目..."
|
||||
cd packages/shared && pnpm run build && cd ../..
|
||||
cd packages/platform && pnpm run build && cd ../..
|
||||
cd packages/workcase && pnpm run build && cd ../..
|
||||
}
|
||||
else
|
||||
npm install
|
||||
npm run build:all 2>/dev/null || {
|
||||
cd packages/shared && npm run build && cd ../..
|
||||
cd packages/platform && npm run build && cd ../..
|
||||
cd packages/workcase && npm run build && cd ../..
|
||||
}
|
||||
fi
|
||||
log_info "✅ 前端项目编译完成"
|
||||
}
|
||||
|
||||
# ================================================
|
||||
# 构建函数
|
||||
# ================================================
|
||||
|
||||
build_base() {
|
||||
log_step "构建基础镜像"
|
||||
cd "${PROJECT_ROOT}"
|
||||
docker build \
|
||||
-t urban-lifeline-base-serv:${IMAGE_VERSION} \
|
||||
-t urban-lifeline-base-serv:latest \
|
||||
-f docker/urbanLifeline/serv/Dockerfile.base .
|
||||
log_info "✅ 基础镜像构建完成: urban-lifeline-base-serv:${IMAGE_VERSION}"
|
||||
}
|
||||
|
||||
build_serv_single() {
|
||||
local service=$1
|
||||
local port=${SERVICE_PORTS[$service]}
|
||||
|
||||
log_step "构建 ${service} 服务镜像"
|
||||
|
||||
# 检查 JAR 包
|
||||
local jar_file=$(find "${PROJECT_ROOT}/urbanLifelineServ/${service}/target" -name "*.jar" -type f 2>/dev/null | head -1)
|
||||
if [ -z "$jar_file" ]; then
|
||||
log_error "JAR 包不存在,请先编译: ./build.sh ${service} compile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PROJECT_ROOT}"
|
||||
docker build \
|
||||
--build-arg SERVICE_NAME=${service} \
|
||||
--build-arg SERVICE_PORT=${port} \
|
||||
-t urban-lifeline-${service}:${IMAGE_VERSION} \
|
||||
-t urban-lifeline-${service}:latest \
|
||||
-f docker/urbanLifeline/serv/Dockerfile.template .
|
||||
|
||||
log_info "✅ ${service} 镜像构建完成: urban-lifeline-${service}:${IMAGE_VERSION}"
|
||||
}
|
||||
|
||||
build_serv_all() {
|
||||
for service in ${SERV_SERVICES}; do
|
||||
build_serv_single ${service}
|
||||
done
|
||||
log_info "✅ 所有后端服务镜像构建完成"
|
||||
}
|
||||
|
||||
build_web() {
|
||||
build_platform
|
||||
build_workcase_web
|
||||
log_info "✅ 所有前端镜像构建完成"
|
||||
}
|
||||
|
||||
build_platform() {
|
||||
log_step "构建 platform 镜像"
|
||||
|
||||
if [ ! -d "${PROJECT_ROOT}/urbanLifelineWeb/packages/platform/dist" ]; then
|
||||
log_error "platform dist 不存在,请先编译: ./build.sh platform compile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PROJECT_ROOT}"
|
||||
docker build \
|
||||
--build-arg WEB_NAME=platform \
|
||||
-t urban-lifeline-platform:${IMAGE_VERSION} \
|
||||
-t urban-lifeline-platform:latest \
|
||||
-f docker/urbanLifeline/web/Dockerfile .
|
||||
|
||||
log_info "✅ platform 镜像构建完成: urban-lifeline-platform:${IMAGE_VERSION}"
|
||||
}
|
||||
|
||||
build_workcase_web() {
|
||||
log_step "构建 workcase-web 镜像"
|
||||
|
||||
if [ ! -d "${PROJECT_ROOT}/urbanLifelineWeb/packages/workcase/dist" ]; then
|
||||
log_error "workcase dist 不存在,请先编译: ./build.sh workcase-web compile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PROJECT_ROOT}"
|
||||
docker build \
|
||||
--build-arg WEB_NAME=workcase \
|
||||
-t urban-lifeline-workcase-web:${IMAGE_VERSION} \
|
||||
-t urban-lifeline-workcase-web:latest \
|
||||
-f docker/urbanLifeline/web/Dockerfile .
|
||||
|
||||
log_info "✅ workcase-web 镜像构建完成: urban-lifeline-workcase-web:${IMAGE_VERSION}"
|
||||
}
|
||||
|
||||
# ================================================
|
||||
# 导出函数
|
||||
# ================================================
|
||||
|
||||
save_image() {
|
||||
local image_name=$1
|
||||
local version=${SAVE_VERSION}
|
||||
local output_file="${BUILD_OUTPUT}/${image_name}_${version}.tar"
|
||||
|
||||
log_info "导出镜像: ${image_name}:${version}"
|
||||
|
||||
if ! docker images | grep -q "${image_name}.*${version}"; then
|
||||
log_error "镜像不存在: ${image_name}:${version}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
docker save -o "${output_file}" ${image_name}:${version}
|
||||
local size=$(du -h "${output_file}" | cut -f1)
|
||||
log_info "✅ 镜像已导出: ${output_file} (${size})"
|
||||
}
|
||||
|
||||
save_serv_all() {
|
||||
for service in ${SERV_SERVICES}; do
|
||||
save_image "urban-lifeline-${service}"
|
||||
done
|
||||
}
|
||||
|
||||
# ================================================
|
||||
# 主流程
|
||||
# ================================================
|
||||
|
||||
main() {
|
||||
# 编译
|
||||
if [ "${DO_COMPILE}" = true ]; then
|
||||
case ${BUILD_TARGET} in
|
||||
base)
|
||||
# 基础镜像不需要编译
|
||||
;;
|
||||
serv)
|
||||
compile_serv_all
|
||||
;;
|
||||
web)
|
||||
compile_web
|
||||
;;
|
||||
all)
|
||||
compile_serv_all
|
||||
compile_web
|
||||
;;
|
||||
gateway|system|auth|file|ai|workcase)
|
||||
compile_serv_single ${BUILD_TARGET}
|
||||
;;
|
||||
*)
|
||||
log_error "未知目标: ${BUILD_TARGET}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# 构建镜像
|
||||
case ${BUILD_TARGET} in
|
||||
base)
|
||||
build_base
|
||||
;;
|
||||
serv)
|
||||
build_serv_all
|
||||
;;
|
||||
web)
|
||||
build_web
|
||||
;;
|
||||
platform)
|
||||
build_platform
|
||||
;;
|
||||
workcase-web)
|
||||
build_workcase_web
|
||||
;;
|
||||
all)
|
||||
# 检查基础镜像
|
||||
if ! docker images | grep -q "urban-lifeline-base-serv.*latest"; then
|
||||
log_warn "基础镜像不存在,先构建基础镜像"
|
||||
build_base
|
||||
fi
|
||||
build_serv_all
|
||||
build_web
|
||||
;;
|
||||
gateway|system|auth|file|ai|workcase)
|
||||
build_serv_single ${BUILD_TARGET}
|
||||
;;
|
||||
*)
|
||||
log_error "未知目标: ${BUILD_TARGET}"
|
||||
echo ""
|
||||
echo "可用目标: base, serv, web, all, gateway, system, auth, file, ai, workcase, platform, workcase-web"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# 导出镜像
|
||||
if [ "${DO_SAVE}" = true ]; then
|
||||
case ${BUILD_TARGET} in
|
||||
base)
|
||||
save_image "urban-lifeline-base-serv"
|
||||
;;
|
||||
serv)
|
||||
save_serv_all
|
||||
;;
|
||||
web)
|
||||
save_image "urban-lifeline-platform"
|
||||
save_image "urban-lifeline-workcase-web"
|
||||
;;
|
||||
platform)
|
||||
save_image "urban-lifeline-platform"
|
||||
;;
|
||||
workcase-web)
|
||||
save_image "urban-lifeline-workcase-web"
|
||||
;;
|
||||
all)
|
||||
save_image "urban-lifeline-base-serv"
|
||||
save_serv_all
|
||||
save_image "urban-lifeline-platform"
|
||||
save_image "urban-lifeline-workcase-web"
|
||||
;;
|
||||
gateway|system|auth|file|ai|workcase)
|
||||
save_image "urban-lifeline-${BUILD_TARGET}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
log_info "导出文件列表:"
|
||||
ls -lh "${BUILD_OUTPUT}"/*.tar 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# 显示镜像列表
|
||||
echo ""
|
||||
log_info "Docker 镜像列表:"
|
||||
docker images | grep "urban-lifeline" | head -20
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
log_info "✅ 构建完成!"
|
||||
echo "=========================================="
|
||||
}
|
||||
|
||||
main
|
||||
1437
docker/dify/.env.example
Normal file
1437
docker/dify/.env.example
Normal file
File diff suppressed because it is too large
Load Diff
119
docker/dify/README.md
Normal file
119
docker/dify/README.md
Normal file
@@ -0,0 +1,119 @@
|
||||
## README for docker Deployment
|
||||
|
||||
Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
|
||||
|
||||
### What's Updated
|
||||
|
||||
- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\
|
||||
For more information, refer `docker/certbot/README.md`.
|
||||
|
||||
- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
|
||||
|
||||
> What is `.env`? </br> </br>
|
||||
> The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
|
||||
|
||||
- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
|
||||
|
||||
- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
|
||||
|
||||
### How to Deploy Dify with `docker-compose.yaml`
|
||||
|
||||
1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
|
||||
1. **Environment Setup**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
|
||||
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
|
||||
1. **Running the Services**:
|
||||
- Execute `docker compose up` from the `docker` directory to start the services.
|
||||
- To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
|
||||
1. **SSL Certificate Setup**:
|
||||
- Refer `docker/certbot/README.md` to set up SSL certificates using Certbot.
|
||||
1. **OpenTelemetry Collector Setup**:
|
||||
- Change `ENABLE_OTEL` to `true` in `.env`.
|
||||
- Configure `OTLP_BASE_ENDPOINT` properly.
|
||||
|
||||
### How to Deploy Middleware for Developing Dify
|
||||
|
||||
1. **Middleware Setup**:
|
||||
- Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
|
||||
- Navigate to the `docker` directory.
|
||||
- Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
|
||||
1. **Running Middleware Services**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance.
|
||||
|
||||
> Compose automatically loads `COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate` from `middleware.env`, so no extra `--profile` flags are needed. Adjust variables in `middleware.env` if you want a different combination of services.
|
||||
|
||||
### Migration for Existing Users
|
||||
|
||||
For users migrating from the `docker-legacy` setup:
|
||||
|
||||
1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
|
||||
1. **Transfer Customizations**:
|
||||
- If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
|
||||
1. **Data Migration**:
|
||||
- Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
|
||||
|
||||
### Overview of `.env`
|
||||
|
||||
#### Key Modules and Customization
|
||||
|
||||
- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
|
||||
- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
|
||||
- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontend operate.
|
||||
|
||||
#### Other notable variables
|
||||
|
||||
The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
|
||||
|
||||
1. **Common Variables**:
|
||||
|
||||
- `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
|
||||
- `APP_WEB_URL`: Frontend application URL.
|
||||
- `FILES_URL`: Base URL for file downloads and previews.
|
||||
|
||||
1. **Server Configuration**:
|
||||
|
||||
- `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
|
||||
- `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
|
||||
|
||||
1. **Database Configuration**:
|
||||
|
||||
- `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
|
||||
|
||||
1. **Redis Configuration**:
|
||||
|
||||
- `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
|
||||
|
||||
1. **Celery Configuration**:
|
||||
|
||||
- `CELERY_BROKER_URL`: Configuration for Celery message broker.
|
||||
|
||||
1. **Storage Configuration**:
|
||||
|
||||
- `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
|
||||
|
||||
1. **Vector Database Configuration**:
|
||||
|
||||
- `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
|
||||
- Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`.
|
||||
|
||||
1. **CORS Configuration**:
|
||||
|
||||
- `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
|
||||
|
||||
1. **OpenTelemetry Configuration**:
|
||||
|
||||
- `ENABLE_OTEL`: Enable OpenTelemetry collector in api.
|
||||
- `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter.
|
||||
|
||||
1. **Other Service-Specific Environment Variables**:
|
||||
|
||||
- Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
|
||||
|
||||
### Additional Information
|
||||
|
||||
- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
|
||||
- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
|
||||
|
||||
This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
|
||||
76
docker/dify/certbot/README.md
Normal file
76
docker/dify/certbot/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Launching new servers with SSL certificates
|
||||
|
||||
## Short description
|
||||
|
||||
docker compose certbot configurations with Backward compatibility (without certbot container).\
|
||||
Use `docker compose --profile certbot up` to use this features.
|
||||
|
||||
## The simplest way for launching new servers with SSL certificates
|
||||
|
||||
1. Get letsencrypt certs\
|
||||
set `.env` values
|
||||
```properties
|
||||
NGINX_SSL_CERT_FILENAME=fullchain.pem
|
||||
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=true
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
CERTBOT_EMAIL=example@your_domain.com
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker network prune
|
||||
docker compose --profile certbot up --force-recreate -d
|
||||
```
|
||||
then after the containers launched:
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
1. Edit `.env` file and `docker compose --profile certbot up` again.\
|
||||
set `.env` value additionally
|
||||
```properties
|
||||
NGINX_HTTPS_ENABLED=true
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate nginx
|
||||
```
|
||||
Then you can access your serve with HTTPS.\
|
||||
[https://your_domain.com](https://your_domain.com)
|
||||
|
||||
## SSL certificates renewal
|
||||
|
||||
For SSL certificates renewal, execute commands below:
|
||||
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## Options for certbot
|
||||
|
||||
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
|
||||
|
||||
```properties
|
||||
CERTBOT_OPTIONS=--dry-run
|
||||
```
|
||||
|
||||
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
|
||||
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate certbot
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
|
||||
Then, reload the nginx container if necessary.
|
||||
|
||||
```shell
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## For legacy servers
|
||||
|
||||
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
|
||||
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
30
docker/dify/certbot/docker-entrypoint.sh
Normal file
30
docker/dify/certbot/docker-entrypoint.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
printf '%s\n' "Docker entrypoint script is running"
|
||||
|
||||
printf '%s\n' "\nChecking specific environment variables:"
|
||||
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
|
||||
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
|
||||
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
|
||||
|
||||
printf '%s\n' "\nChecking mounted directories:"
|
||||
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
|
||||
if [ -d "$dir" ]; then
|
||||
printf '%s\n' "$dir exists. Contents:"
|
||||
ls -la "$dir"
|
||||
else
|
||||
printf '%s\n' "$dir does not exist."
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "\nGenerating update-cert.sh from template"
|
||||
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
|
||||
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
|
||||
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
|
||||
/update-cert.template.txt > /update-cert.sh
|
||||
|
||||
chmod +x /update-cert.sh
|
||||
|
||||
printf '%s\n' "\nExecuting command:" "$@"
|
||||
exec "$@"
|
||||
19
docker/dify/certbot/update-cert.template.txt
Normal file
19
docker/dify/certbot/update-cert.template.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DOMAIN="${CERTBOT_DOMAIN}"
|
||||
EMAIL="${CERTBOT_EMAIL}"
|
||||
OPTIONS="${CERTBOT_OPTIONS}"
|
||||
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
|
||||
|
||||
# Check if the certificate already exists
|
||||
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
|
||||
echo "Certificate exists. Attempting to renew..."
|
||||
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
|
||||
else
|
||||
echo "Certificate does not exist. Obtaining a new certificate..."
|
||||
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
|
||||
fi
|
||||
echo "Certificate operation successful"
|
||||
# Note: Nginx reload should be handled outside this container
|
||||
echo "Please ensure to reload Nginx to apply any certificate changes."
|
||||
4
docker/dify/couchbase-server/Dockerfile
Normal file
4
docker/dify/couchbase-server/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM couchbase/server:latest AS stage_base
|
||||
# FROM couchbase:latest AS stage_base
|
||||
COPY init-cbserver.sh /opt/couchbase/init/
|
||||
RUN chmod +x /opt/couchbase/init/init-cbserver.sh
|
||||
44
docker/dify/couchbase-server/init-cbserver.sh
Normal file
44
docker/dify/couchbase-server/init-cbserver.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
|
||||
# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
|
||||
|
||||
/entrypoint.sh couchbase-server &
|
||||
|
||||
# track if setup is complete so we don't try to setup again
|
||||
FILE=/opt/couchbase/init/setupComplete.txt
|
||||
|
||||
if ! [ -f "$FILE" ]; then
|
||||
# used to automatically create the cluster based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
|
||||
|
||||
echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
|
||||
|
||||
sleep 20s
|
||||
/opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
|
||||
--cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--services data,index,query,fts \
|
||||
--cluster-ramsize $COUCHBASE_RAM_SIZE \
|
||||
--cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
|
||||
--cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
|
||||
--cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
|
||||
--index-storage-setting default
|
||||
|
||||
sleep 2s
|
||||
|
||||
# used to auto create the bucket based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
|
||||
|
||||
/opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
|
||||
--username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--bucket $COUCHBASE_BUCKET \
|
||||
--bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
|
||||
--bucket-type couchbase
|
||||
|
||||
# create file so we know that the cluster is setup and don't run the setup again
|
||||
touch $FILE
|
||||
fi
|
||||
# docker compose will stop the container from running unless we do this
|
||||
# known issue and workaround
|
||||
tail -f /dev/null
|
||||
907
docker/dify/docker-compose-template.yaml
Normal file
907
docker/dify/docker-compose-template.yaml
Normal file
@@ -0,0 +1,907 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# Init container to fix permissions
|
||||
init_permissions:
|
||||
image: busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
FLAG_FILE="/app/api/storage/.init_permissions"
|
||||
if [ -f "$${FLAG_FILE}" ]; then
|
||||
echo "Permissions already initialized. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
echo "Initializing permissions for /app/api/storage"
|
||||
chown -R 1001:1001 /app/api/storage && touch "$${FLAG_FILE}"
|
||||
echo "Permissions initialized. Exiting."
|
||||
volumes:
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
restart: "no"
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.11.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.11.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing all queues.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.11.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.11.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
PM2_INSTANCES: ${PM2_INSTANCES:-2}
|
||||
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
|
||||
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
|
||||
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
|
||||
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
|
||||
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
|
||||
# The PostgreSQL database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- postgresql
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: ${DB_USERNAME:-postgres}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The mysql database.
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ./volumes/redis/data:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
|
||||
]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.12
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.5.1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
|
||||
SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Certbot service
|
||||
# use `docker-compose --profile certbot up` to start the certbot service.
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
- ./volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ./certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
command: ["tail", "-f", "/dev/null"]
|
||||
|
||||
# The nginx reverse proxy.
|
||||
# used for reverse proxying the API service and Web service.
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d
|
||||
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
|
||||
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the con tainer.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: en_US.UTF-8
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# seekdb vector database
|
||||
seekdb:
|
||||
image: oceanbase/seekdb:latest
|
||||
container_name: seekdb
|
||||
profiles:
|
||||
- seekdb
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/seekdb:/var/lib/oceanbase
|
||||
environment:
|
||||
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
REPORTER: dify-ai-seekdb
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
timeout: 5s
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.8.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
# The Couchbase vector store.
|
||||
couchbase-server:
|
||||
build: ./couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [""]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
# ensure bucket was created before proceeding
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
|
||||
]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# The pgvector vector database.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
# pg_bigm module for full text search
|
||||
PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
|
||||
PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# get image from https://www.vastdata.com.cn/
|
||||
vastbase:
|
||||
image: vastdata/vastbase-vector
|
||||
profiles:
|
||||
- vastbase
|
||||
restart: always
|
||||
environment:
|
||||
- VB_DBCOMPATIBILITY=PG
|
||||
- VB_DB=dify
|
||||
- VB_USERNAME=dify
|
||||
- VB_PASSWORD=Difyai123456
|
||||
ports:
|
||||
- "5434:5432"
|
||||
volumes:
|
||||
- ./vastbase/lic:/home/vastbase/vastbase/lic
|
||||
- ./vastbase/data:/home/vastbase/data
|
||||
- ./vastbase/backup:/home/vastbase/backup
|
||||
- ./vastbase/backup_log:/home/vastbase/backup_log
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# pgvecto-rs vector store
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# Chroma vector database
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
# Milvus vector database services
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ./volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ./volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.6.3
|
||||
profiles:
|
||||
- milvus
|
||||
command: ["milvus", "run", "standalone"]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ./volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
# Opensearch vector database
|
||||
opensearch:
|
||||
container_name: opensearch
|
||||
image: opensearchproject/opensearch:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
container_name: opensearch-dashboards
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
# opengauss vector database.
|
||||
opengauss:
|
||||
image: opengauss/opengauss:7.0.0-RC1
|
||||
profiles:
|
||||
- opengauss
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
GS_USERNAME: ${OPENGAUSS_USER:-postgres}
|
||||
GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
|
||||
GS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
GS_DB: ${OPENGAUSS_DATABASE:-dify}
|
||||
volumes:
|
||||
- ./volumes/opengauss/data:/var/lib/opengauss/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
ports:
|
||||
- ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
|
||||
|
||||
# MyScale vector database
|
||||
myscale:
|
||||
container_name: myscale
|
||||
image: myscale/myscaledb:1.6.4
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ./volumes/myscale/data:/var/lib/clickhouse
|
||||
- ./volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
# Matrixone vector store.
|
||||
matrixone:
|
||||
hostname: matrixone
|
||||
image: matrixorigin/matrixone:2.1.1
|
||||
profiles:
|
||||
- matrixone
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/matrixone/data:/mo-data
|
||||
ports:
|
||||
- ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001}
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch
|
||||
profiles:
|
||||
- elasticsearch
|
||||
- elasticsearch-ja
|
||||
restart: always
|
||||
volumes:
|
||||
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: basic
|
||||
xpack.security.enabled: "true"
|
||||
xpack.security.enrollment.enabled: "false"
|
||||
xpack.security.http.ssl.enabled: "false"
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
|
||||
healthcheck:
|
||||
test:
|
||||
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
# https://www.elastic.co/guide/en/kibana/current/docker.html
|
||||
# https://www.elastic.co/guide/en/kibana/current/settings.html
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: "true"
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
|
||||
XPACK_FLEET_ISAIRGAPPED: "true"
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: "5601"
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# unstructured .
|
||||
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
250
docker/dify/docker-compose.middleware.yaml
Normal file
250
docker/dify/docker-compose.middleware.yaml
Normal file
@@ -0,0 +1,250 @@
|
||||
services:
|
||||
# The postgres database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- ""
|
||||
- postgresql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "${EXPOSE_POSTGRES_PORT:-5432}:5432"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
ports:
|
||||
- "${EXPOSE_MYSQL_PORT:-3306}:3306"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
ports:
|
||||
- "${EXPOSE_REDIS_PORT:-6379}:6379"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
|
||||
]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.12
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.5.1-local
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://host.docker.internal:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
THIRD_PARTY_SIGNATURE_VERIFICATION_ENABLED: true
|
||||
THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS: /app/keys/publickey.pem
|
||||
FORCE_VERIFYING_SIGNATURE: false
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}"
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
ports:
|
||||
- "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
|
||||
- "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- ""
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
- ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ports:
|
||||
- "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
|
||||
- "${EXPOSE_WEAVIATE_GRPC_PORT:-50051}:50051"
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
BIN
docker/dify/docker-compose.png
Normal file
BIN
docker/dify/docker-compose.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 170 KiB |
1551
docker/dify/docker-compose.yaml
Normal file
1551
docker/dify/docker-compose.yaml
Normal file
File diff suppressed because it is too large
Load Diff
25
docker/dify/elasticsearch/docker-entrypoint.sh
Normal file
25
docker/dify/elasticsearch/docker-entrypoint.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
|
||||
# Check if the ICU tokenizer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
|
||||
printf '%s\n' "Installing the ICU tokenizer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
|
||||
printf '%s\n' "Failed to install the ICU tokenizer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Check if the Japanese language analyzer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
|
||||
printf '%s\n' "Installing the Japanese language analyzer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
|
||||
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
|
||||
112
docker/dify/generate_docker_compose
Normal file
112
docker/dify/generate_docker_compose
Normal file
@@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def parse_env_example(file_path):
|
||||
"""
|
||||
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
|
||||
"""
|
||||
env_vars = {}
|
||||
with open(file_path, "r") as f:
|
||||
for line_number, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
# Ignore empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Use regex to parse KEY=VALUE
|
||||
match = re.match(r"^([^=]+)=(.*)$", line)
|
||||
if match:
|
||||
key = match.group(1).strip()
|
||||
value = match.group(2).strip()
|
||||
# Remove possible quotes around the value
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env_vars[key] = value
|
||||
else:
|
||||
print(f"Warning: Unable to parse line {line_number}: {line}")
|
||||
return env_vars
|
||||
|
||||
|
||||
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
|
||||
"""
|
||||
Generates a shared environment variables block as a YAML string.
|
||||
"""
|
||||
lines = [f"x-shared-env: &{anchor_name}"]
|
||||
for key, default in env_vars.items():
|
||||
if key == "COMPOSE_PROFILES":
|
||||
continue
|
||||
# If default value is empty, use ${KEY:-}
|
||||
if default == "":
|
||||
lines.append(f" {key}: ${{{key}:-}}")
|
||||
else:
|
||||
# If default value contains special characters, wrap it in quotes
|
||||
if re.search(r"[:\s]", default):
|
||||
default = f"{default}"
|
||||
lines.append(f" {key}: ${{{key}:-{default}}}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
|
||||
"""
|
||||
Inserts the shared environment variables block and header comments into the template file,
|
||||
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
|
||||
"""
|
||||
with open(template_path, "r") as f:
|
||||
template_content = f.read()
|
||||
|
||||
# Remove existing x-shared-env: &shared-api-worker-env lines
|
||||
template_content = re.sub(
|
||||
r"^x-shared-env: &shared-api-worker-env\s*\n?",
|
||||
"",
|
||||
template_content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
# Prepare the final content with header comments and shared env block
|
||||
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
|
||||
|
||||
with open(output_path, "w") as f:
|
||||
f.write(final_content)
|
||||
print(f"Generated {output_path}")
|
||||
|
||||
|
||||
def main():
|
||||
env_example_path = ".env.example"
|
||||
template_path = "docker-compose-template.yaml"
|
||||
output_path = "docker-compose.yaml"
|
||||
anchor_name = "shared-api-worker-env" # Can be modified as needed
|
||||
|
||||
# Define header comments to be added at the top of docker-compose.yaml
|
||||
header_comments = (
|
||||
"# ==================================================================\n"
|
||||
"# WARNING: This file is auto-generated by generate_docker_compose\n"
|
||||
"# Do not modify this file directly. Instead, update the .env.example\n"
|
||||
"# or docker-compose-template.yaml and regenerate this file.\n"
|
||||
"# ==================================================================\n"
|
||||
)
|
||||
|
||||
# Check if required files exist
|
||||
for path in [env_example_path, template_path]:
|
||||
if not os.path.isfile(path):
|
||||
print(f"Error: File {path} does not exist.")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse .env.example file
|
||||
env_vars = parse_env_example(env_example_path)
|
||||
|
||||
if not env_vars:
|
||||
print("Warning: No environment variables found in .env.example.")
|
||||
|
||||
# Generate shared environment variables block
|
||||
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
|
||||
|
||||
# Insert shared environment variables block and header comments into the template
|
||||
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
215
docker/dify/middleware.env.example
Normal file
215
docker/dify/middleware.env.example
Normal file
@@ -0,0 +1,215 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# Database type, supported values are `postgresql` and `mysql`
|
||||
DB_TYPE=postgresql
|
||||
# For MySQL, only `root` user is supported for now
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db_postgres
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# PostgreSQL Configuration
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
PGDATA_HOST_VOLUME=./volumes/db/data
|
||||
|
||||
# Maximum number of connections to the database
|
||||
# Default is 100
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
|
||||
POSTGRES_MAX_CONNECTIONS=100
|
||||
|
||||
# Sets the amount of shared memory used for postgres's shared buffers.
|
||||
# Default is 128MB
|
||||
# Recommended value: 25% of available memory
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
|
||||
POSTGRES_SHARED_BUFFERS=128MB
|
||||
|
||||
# Sets the amount of memory used by each database worker for working space.
|
||||
# Default is 4MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
|
||||
POSTGRES_WORK_MEM=4MB
|
||||
|
||||
# Sets the amount of memory reserved for maintenance activities.
|
||||
# Default is 64MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
|
||||
POSTGRES_MAINTENANCE_WORK_MEM=64MB
|
||||
|
||||
# Sets the planner's assumption about the effective cache size.
|
||||
# Default is 4096MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
|
||||
|
||||
# Sets the maximum allowed duration of any statement before termination.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT
|
||||
# A value of 0 prevents the server from timing out statements.
|
||||
POSTGRES_STATEMENT_TIMEOUT=0
|
||||
|
||||
# Sets the maximum allowed duration of any idle in-transaction session before termination.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# MySQL Configuration
|
||||
# MySQL data directory host volume
|
||||
MYSQL_HOST_VOLUME=./volumes/mysql/data
|
||||
|
||||
# MySQL Performance Configuration
|
||||
# Maximum number of connections to MySQL
|
||||
# Default is 1000
|
||||
MYSQL_MAX_CONNECTIONS=1000
|
||||
|
||||
# InnoDB buffer pool size
|
||||
# Default is 512M
|
||||
# Recommended value: 70-80% of available memory for dedicated MySQL server
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE=512M
|
||||
|
||||
# InnoDB log file size
|
||||
# Default is 128M
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size
|
||||
MYSQL_INNODB_LOG_FILE_SIZE=128M
|
||||
|
||||
# InnoDB flush log at transaction commit
|
||||
# Default is 2 (flush to OS cache, sync every second)
|
||||
# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache)
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2
|
||||
|
||||
# -----------------------------
|
||||
# Environment Variables for redis Service
|
||||
# -----------------------------
|
||||
REDIS_HOST_VOLUME=./volumes/redis/data
|
||||
REDIS_PASSWORD=difyai123456
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
# ------------------------------
|
||||
SANDBOX_API_KEY=dify-sandbox
|
||||
SANDBOX_GIN_MODE=release
|
||||
SANDBOX_WORKER_TIMEOUT=15
|
||||
SANDBOX_ENABLE_NETWORK=true
|
||||
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
|
||||
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
|
||||
SANDBOX_PORT=8194
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for ssrf_proxy Service
|
||||
# ------------------------------
|
||||
SSRF_HTTP_PORT=3128
|
||||
SSRF_COREDUMP_DIR=/var/spool/squid
|
||||
SSRF_REVERSE_PROXY_PORT=8194
|
||||
SSRF_SANDBOX_HOST=sandbox
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for weaviate Service
|
||||
# ------------------------------
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
|
||||
WEAVIATE_CLUSTER_HOSTNAME=node1
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
WEAVIATE_DISABLE_TELEMETRY=false
|
||||
WEAVIATE_HOST_VOLUME=./volumes/weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose profile configuration
|
||||
# ------------------------------
|
||||
# Loaded automatically when running `docker compose --env-file middleware.env ...`.
|
||||
# Controls which DB/vector services start, so no extra `--profile` flag is needed.
|
||||
COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose Service Expose Host Port Configurations
|
||||
# ------------------------------
|
||||
EXPOSE_POSTGRES_PORT=5432
|
||||
EXPOSE_MYSQL_PORT=3306
|
||||
EXPOSE_REDIS_PORT=6379
|
||||
EXPOSE_SANDBOX_PORT=8194
|
||||
EXPOSE_SSRF_PROXY_PORT=3128
|
||||
EXPOSE_WEAVIATE_PORT=8080
|
||||
|
||||
# ------------------------------
|
||||
# Plugin Daemon Configuration
|
||||
# ------------------------------
|
||||
|
||||
DB_PLUGIN_DATABASE=dify_plugin
|
||||
EXPOSE_PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DAEMON_URL=http://host.docker.internal:5002
|
||||
PLUGIN_MAX_PACKAGE_SIZE=52428800
|
||||
PLUGIN_PPROF_ENABLED=false
|
||||
PLUGIN_WORKING_PATH=/app/storage/cwd
|
||||
|
||||
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||||
|
||||
PLUGIN_DEBUGGING_PORT=5003
|
||||
PLUGIN_DEBUGGING_HOST=0.0.0.0
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
|
||||
|
||||
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
PLUGIN_DIFY_INNER_API_URL=http://host.docker.internal:5001
|
||||
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||||
|
||||
FORCE_VERIFYING_SIGNATURE=true
|
||||
|
||||
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT=600
|
||||
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
PIP_MIRROR_URL=
|
||||
|
||||
# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example
|
||||
# Plugin storage type, local aws_s3 tencent_cos azure_blob
|
||||
PLUGIN_STORAGE_TYPE=local
|
||||
PLUGIN_STORAGE_LOCAL_ROOT=/app/storage
|
||||
PLUGIN_WORKING_PATH=/app/storage/cwd
|
||||
PLUGIN_INSTALLED_PATH=plugin
|
||||
PLUGIN_PACKAGE_CACHE_PATH=plugin_packages
|
||||
PLUGIN_MEDIA_CACHE_PATH=assets
|
||||
# Plugin oss bucket
|
||||
PLUGIN_STORAGE_OSS_BUCKET=
|
||||
# Plugin oss s3 credentials
|
||||
PLUGIN_S3_USE_AWS_MANAGED_IAM=false
|
||||
PLUGIN_S3_USE_AWS=false
|
||||
PLUGIN_S3_ENDPOINT=
|
||||
PLUGIN_S3_USE_PATH_STYLE=false
|
||||
PLUGIN_AWS_ACCESS_KEY=
|
||||
PLUGIN_AWS_SECRET_KEY=
|
||||
PLUGIN_AWS_REGION=
|
||||
# Plugin oss azure blob
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME=
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING=
|
||||
# Plugin oss tencent cos
|
||||
PLUGIN_TENCENT_COS_SECRET_KEY=
|
||||
PLUGIN_TENCENT_COS_SECRET_ID=
|
||||
PLUGIN_TENCENT_COS_REGION=
|
||||
# Plugin oss aliyun oss
|
||||
PLUGIN_ALIYUN_OSS_REGION=
|
||||
PLUGIN_ALIYUN_OSS_ENDPOINT=
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID=
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET=
|
||||
PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4
|
||||
PLUGIN_ALIYUN_OSS_PATH=
|
||||
# Plugin oss volcengine tos
|
||||
PLUGIN_VOLCENGINE_TOS_ENDPOINT=
|
||||
PLUGIN_VOLCENGINE_TOS_ACCESS_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_SECRET_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_REGION=
|
||||
58
docker/dify/nginx/conf.d/default.conf
Normal file
58
docker/dify/nginx/conf.d/default.conf
Normal file
@@ -0,0 +1,58 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /v1 {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /files {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e/ {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /mcp {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /triggers {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# placeholder for acme challenge location
|
||||
|
||||
|
||||
# placeholder for https config defined in https.conf.template
|
||||
|
||||
}
|
||||
58
docker/dify/nginx/conf.d/default.conf.template
Normal file
58
docker/dify/nginx/conf.d/default.conf.template
Normal file
@@ -0,0 +1,58 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen ${NGINX_PORT};
|
||||
server_name ${NGINX_SERVER_NAME};
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /v1 {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /files {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e/ {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /mcp {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /triggers {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# placeholder for acme challenge location
|
||||
${ACME_CHALLENGE_LOCATION}
|
||||
|
||||
# placeholder for https config defined in https.conf.template
|
||||
${HTTPS_CONFIG}
|
||||
}
|
||||
42
docker/dify/nginx/docker-entrypoint.sh
Normal file
42
docker/dify/nginx/docker-entrypoint.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
HTTPS_CONFIG=''
|
||||
|
||||
if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
|
||||
# Check if the certificate and key files for the specified domain exist
|
||||
if [ -n "${CERTBOT_DOMAIN}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
|
||||
SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
else
|
||||
SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
fi
|
||||
export SSL_CERTIFICATE_PATH
|
||||
export SSL_CERTIFICATE_KEY_PATH
|
||||
|
||||
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
|
||||
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
|
||||
export HTTPS_CONFIG
|
||||
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
|
||||
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
fi
|
||||
export HTTPS_CONFIG
|
||||
|
||||
if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
|
||||
ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
|
||||
else
|
||||
ACME_CHALLENGE_LOCATION=''
|
||||
fi
|
||||
export ACME_CHALLENGE_LOCATION
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
|
||||
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Start Nginx using the default entrypoint
|
||||
exec nginx -g 'daemon off;'
|
||||
9
docker/dify/nginx/https.conf.template
Normal file
9
docker/dify/nginx/https.conf.template
Normal file
@@ -0,0 +1,9 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ${SSL_CERTIFICATE_PATH};
|
||||
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
34
docker/dify/nginx/nginx.conf.template
Normal file
34
docker/dify/nginx/nginx.conf.template
Normal file
@@ -0,0 +1,34 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
user nginx;
|
||||
worker_processes ${NGINX_WORKER_PROCESSES};
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
|
||||
|
||||
#gzip on;
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
11
docker/dify/nginx/proxy.conf.template
Normal file
11
docker/dify/nginx/proxy.conf.template
Normal file
@@ -0,0 +1,11 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
|
||||
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
|
||||
0
docker/dify/nginx/ssl/.gitkeep
Normal file
0
docker/dify/nginx/ssl/.gitkeep
Normal file
24
docker/dify/pgvector/docker-entrypoint.sh
Normal file
24
docker/dify/pgvector/docker-entrypoint.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
PG_MAJOR=16
|
||||
|
||||
if [ "${PG_BIGM}" = "true" ]; then
|
||||
# install pg_bigm
|
||||
apt-get update
|
||||
apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR}
|
||||
|
||||
curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz
|
||||
tar xf v${PG_BIGM_VERSION}.tar.gz
|
||||
cd pg_bigm-${PG_BIGM_VERSION} || exit 1
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install
|
||||
|
||||
cd - || exit 1
|
||||
rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION}
|
||||
|
||||
# enable pg_bigm
|
||||
sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /usr/local/bin/docker-entrypoint.sh postgres
|
||||
42
docker/dify/ssrf_proxy/docker-entrypoint.sh
Normal file
42
docker/dify/ssrf_proxy/docker-entrypoint.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Modified based on Squid OCI image entrypoint
|
||||
|
||||
# This entrypoint aims to forward the squid logs to stdout to assist users of
|
||||
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
|
||||
# access the service logs.
|
||||
|
||||
# Moreover, it invokes the squid binary, leaving all the desired parameters to
|
||||
# be provided by the "command" passed to the spawned container. If no command
|
||||
# is provided by the user, the default behavior (as per the CMD statement in
|
||||
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
|
||||
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
|
||||
# systemd unit.
|
||||
|
||||
# [1] The default configuration is changed in the Dockerfile to allow local
|
||||
# network connections. See the Dockerfile for further information.
|
||||
|
||||
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
|
||||
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
|
||||
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
tail -F /var/log/squid/access.log 2>/dev/null &
|
||||
tail -F /var/log/squid/error.log 2>/dev/null &
|
||||
tail -F /var/log/squid/store.log 2>/dev/null &
|
||||
tail -F /var/log/squid/cache.log 2>/dev/null &
|
||||
|
||||
# Replace environment variables in the template and output to the squid.conf
|
||||
echo "[ENTRYPOINT] replacing environment variables in the template"
|
||||
awk '{
|
||||
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
|
||||
var = substr($0, RSTART+2, RLENGTH-3)
|
||||
val = ENVIRON[var]
|
||||
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
|
||||
}
|
||||
print
|
||||
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
|
||||
|
||||
/usr/sbin/squid -Nz
|
||||
echo "[ENTRYPOINT] starting squid"
|
||||
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
|
||||
56
docker/dify/ssrf_proxy/squid.conf.template
Normal file
56
docker/dify/ssrf_proxy/squid.conf.template
Normal file
@@ -0,0 +1,56 @@
|
||||
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
|
||||
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
|
||||
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
|
||||
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
|
||||
acl localnet src fc00::/7 # RFC 4193 local private network range
|
||||
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
|
||||
acl SSL_ports port 443
|
||||
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
|
||||
acl Safe_ports port 80 # http
|
||||
acl Safe_ports port 21 # ftp
|
||||
acl Safe_ports port 443 # https
|
||||
acl Safe_ports port 70 # gopher
|
||||
acl Safe_ports port 210 # wais
|
||||
acl Safe_ports port 1025-65535 # unregistered ports
|
||||
acl Safe_ports port 280 # http-mgmt
|
||||
acl Safe_ports port 488 # gss-http
|
||||
acl Safe_ports port 591 # filemaker
|
||||
acl Safe_ports port 777 # multiling http
|
||||
acl CONNECT method CONNECT
|
||||
acl allowed_domains dstdomain .marketplace.dify.ai
|
||||
http_access allow allowed_domains
|
||||
http_access deny !Safe_ports
|
||||
http_access deny CONNECT !SSL_ports
|
||||
http_access allow localhost manager
|
||||
http_access deny manager
|
||||
http_access allow localhost
|
||||
include /etc/squid/conf.d/*.conf
|
||||
http_access deny all
|
||||
|
||||
################################## Proxy Server ################################
|
||||
http_port ${HTTP_PORT}
|
||||
coredump_dir ${COREDUMP_DIR}
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern . 0 20% 4320
|
||||
|
||||
|
||||
# cache_dir ufs /var/spool/squid 100 16 256
|
||||
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
|
||||
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
|
||||
|
||||
################################## Reverse Proxy To Sandbox ################################
|
||||
http_port ${REVERSE_PROXY_PORT} accel vhost
|
||||
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
|
||||
acl src_all src all
|
||||
http_access allow src_all
|
||||
|
||||
# Unless the option's size is increased, an error will occur when uploading more than two files.
|
||||
client_request_buffer_max_size 100 MB
|
||||
13
docker/dify/startupscripts/init.sh
Normal file
13
docker/dify/startupscripts/init.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DB_INITIALIZED="/opt/oracle/oradata/dbinit"
|
||||
#[ -f ${DB_INITIALIZED} ] && exit
|
||||
#touch ${DB_INITIALIZED}
|
||||
if [ -f ${DB_INITIALIZED} ]; then
|
||||
echo 'File exists. Standards for have been Init'
|
||||
exit
|
||||
else
|
||||
echo 'File does not exist. Standards for first time Start up this DB'
|
||||
"$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
|
||||
touch ${DB_INITIALIZED}
|
||||
fi
|
||||
10
docker/dify/startupscripts/init_user.script
Normal file
10
docker/dify/startupscripts/init_user.script
Normal file
@@ -0,0 +1,10 @@
|
||||
show pdbs;
|
||||
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
|
||||
alter session set container= freepdb1;
|
||||
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
|
||||
grant DB_DEVELOPER_ROLE to dify;
|
||||
|
||||
BEGIN
|
||||
CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER');
|
||||
END;
|
||||
/
|
||||
4
docker/dify/tidb/config/pd.toml
Normal file
4
docker/dify/tidb/config/pd.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
# PD Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
|
||||
[replication]
|
||||
max-replicas = 1
|
||||
13
docker/dify/tidb/config/tiflash-learner.toml
Normal file
13
docker/dify/tidb/config/tiflash-learner.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
# TiFlash tiflash-learner.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
|
||||
|
||||
log-file = "/logs/tiflash_tikv.log"
|
||||
|
||||
[server]
|
||||
engine-addr = "tiflash:4030"
|
||||
addr = "0.0.0.0:20280"
|
||||
advertise-addr = "tiflash:20280"
|
||||
status-addr = "tiflash:20292"
|
||||
|
||||
[storage]
|
||||
data-dir = "/data/flash"
|
||||
19
docker/dify/tidb/config/tiflash.toml
Normal file
19
docker/dify/tidb/config/tiflash.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
# TiFlash tiflash.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
|
||||
|
||||
listen_host = "0.0.0.0"
|
||||
path = "/data"
|
||||
|
||||
[flash]
|
||||
tidb_status_addr = "tidb:10080"
|
||||
service_addr = "tiflash:4030"
|
||||
|
||||
[flash.proxy]
|
||||
config = "/tiflash-learner.toml"
|
||||
|
||||
[logger]
|
||||
errorlog = "/logs/tiflash_error.log"
|
||||
log = "/logs/tiflash.log"
|
||||
|
||||
[raft]
|
||||
pd_addr = "pd0:2379"
|
||||
63
docker/dify/tidb/docker-compose.yaml
Normal file
63
docker/dify/tidb/docker-compose.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
services:
|
||||
pd0:
|
||||
image: pingcap/pd:v8.5.1
|
||||
# ports:
|
||||
# - "2379"
|
||||
volumes:
|
||||
- ./config/pd.toml:/pd.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --name=pd0
|
||||
- --client-urls=http://0.0.0.0:2379
|
||||
- --peer-urls=http://0.0.0.0:2380
|
||||
- --advertise-client-urls=http://pd0:2379
|
||||
- --advertise-peer-urls=http://pd0:2380
|
||||
- --initial-cluster=pd0=http://pd0:2380
|
||||
- --data-dir=/data/pd
|
||||
- --config=/pd.toml
|
||||
- --log-file=/logs/pd.log
|
||||
restart: on-failure
|
||||
tikv:
|
||||
image: pingcap/tikv:v8.5.1
|
||||
volumes:
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --addr=0.0.0.0:20160
|
||||
- --advertise-addr=tikv:20160
|
||||
- --status-addr=tikv:20180
|
||||
- --data-dir=/data/tikv
|
||||
- --pd=pd0:2379
|
||||
- --log-file=/logs/tikv.log
|
||||
depends_on:
|
||||
- "pd0"
|
||||
restart: on-failure
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.5.1
|
||||
# ports:
|
||||
# - "4000:4000"
|
||||
volumes:
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --advertise-address=tidb
|
||||
- --store=tikv
|
||||
- --path=pd0:2379
|
||||
- --log-file=/logs/tidb.log
|
||||
depends_on:
|
||||
- "tikv"
|
||||
restart: on-failure
|
||||
tiflash:
|
||||
image: pingcap/tiflash:v8.5.1
|
||||
volumes:
|
||||
- ./config/tiflash.toml:/tiflash.toml:ro
|
||||
- ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- server
|
||||
- --config-file=/tiflash.toml
|
||||
depends_on:
|
||||
- "tikv"
|
||||
- "tidb"
|
||||
restart: on-failure
|
||||
0
docker/dify/volumes/app/storage/.init_permissions
Normal file
0
docker/dify/volumes/app/storage/.init_permissions
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAzviset7ubYZgOjpGZTLgI01lkJ1b54EZCctU+KKd3VJ9B/su
|
||||
cCTZ1ql7Fs5B4shZUu2MQOlO2twDC3gxW8ENj+yyU+r8toukxar2Iie3Qsp5tcJH
|
||||
mWsq7Imo5RmUGLLwDV8AU0xRc4bRD5YGwiwZcRPODpgQBpnsKKoPNyvCeudEPr2S
|
||||
gs63aQCDyv1Ny9jLTjvvHr2k531M5wChzT5miu0r0R+Hjrah0XnbRlQUubqHJOLH
|
||||
FFp+XJowd0KAN26fjpEbFY2FwRa5POlbc1e2PEu+s3WJa0Iz5GQbhcOJ7ppA9Rft
|
||||
k2CEAoNHfkbZcAxsySa++8+1Nq4hLFJubDYnWwIDAQABAoIBABEJJ5acKj86SUoM
|
||||
U+ZZMAPCh3Kbe94WootLgOdPk0jiqkdG5dqgiIxXujmf/oz+dVkTE8agAuKW1hVS
|
||||
mQAhaCQJlTVnvnypX9r5UyBv6Rtb88qsfseM/6UlQOaimq2xxM/aRaTq6jq6b74l
|
||||
ria7Xy4PNZhpQcsZTkvxO3wMuQmelkkuhqbQF2fpNTTIZfbEehJatOTWIcWWkqRo
|
||||
mZkKd4pHluaXDvczFFo31jSJ4jzrJS11Tx1s21rM4Iq2W+3B6nEVfvHaFqWMHve7
|
||||
+22ZWUUiMJbIpr7hy0bSBjY78HdR/EcQqPqT7uEawFaft8ybMP5AkuLEG9I6VtWq
|
||||
gfPX/ckCgYEA3XbHuDP6Yzlc9tXapQEulM82B3mDtKAX72AqYGEpvZInrge6Fjwq
|
||||
SdMGQHP/VsLq7t78g9negYQjT6J0qJ2V6s/1iVyr6uU3IQ06Q7+8xp6AQPSFShJk
|
||||
o2nnODuvVMwwwH94ga1ntfPdzhYKfJ2E9Cbbn+NUZB/uxnsAxTTGGNcCgYEA7z9S
|
||||
/M8eUfCOEBEC9EGuto8/hZ7Zmm0fuAdKx0zZzSVsKxeHUQoQJ1YQfnjKdlxNv1FI
|
||||
K9g7wcMEvkUv/CMx7pQ/R4eyvbGlxGRAy1jUhgx1hxifBeYKd5j5BMAQ8n3/uRLR
|
||||
3czW7i8Xzy0Z97kfy9ZHllbnbZQvYceDQY2ngR0CgYEA272Wz4PrhnaNcnGg/6Yb
|
||||
lRXA80uzzg4nkaGminj+FJYnfvs1vgg2oYecJWmzSmAfAaMJhL4xk+sLqmF0RTYP
|
||||
Yay0YXYtMuX6fHYGgOCHq7/rXAgDFQsJBkf3X4mBH693FBmm9WycgiKVy319LEaT
|
||||
fDnuI0hKHQq+PfwgOIxznoMCgYBGj9WpDGkz34/2ux207p5Z55vji/s/6YleXIA4
|
||||
kbj3IV9qwjC9vQsQnviUZInKKOUhVRbFH6xYNG10tLE3WyYB6KVVMneMLRXn+22E
|
||||
y/k6Y/hR1Pgp5DQYG/zt4rvc2T6P1jYhhaWqcCdEilfLUUVIeDV66Lyu+/KRkydq
|
||||
MR5DHQKBgHn3HNAiHfLVef7+unoYu8ak6uqj5V74IJZdvcNZ4jUz7onk2o3h7pj0
|
||||
9xzWoakzv1ypK6rVBYr+2U4Mn728kz90rR9VV05YwbMQBcBH0gmBfGdbUjI9PH/+
|
||||
iUoxHK0G55ArzmZFtRGUpmEFak4xTpM48Wn5VApe9kMt3yX+FBbD
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -0,0 +1,2 @@
|
||||
|
||||
开发人员:AAAA
|
||||
@@ -0,0 +1,4 @@
|
||||
|
||||
http://localhost/platform/admin/platform
|
||||
|
||||
http://localhost:5173/datacenter
|
||||
1
docker/dify/volumes/db/data/pgdata/PG_VERSION
Normal file
1
docker/dify/volumes/db/data/pgdata/PG_VERSION
Normal file
@@ -0,0 +1 @@
|
||||
15
|
||||
BIN
docker/dify/volumes/db/data/pgdata/base/1/112
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/112
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/113
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/113
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1247_vm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1249_vm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1255_vm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/1259_vm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13454_vm
Normal file
Binary file not shown.
0
docker/dify/volumes/db/data/pgdata/base/1/13457
Normal file
0
docker/dify/volumes/db/data/pgdata/base/1/13457
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13458
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13458
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13459_vm
Normal file
Binary file not shown.
0
docker/dify/volumes/db/data/pgdata/base/1/13462
Normal file
0
docker/dify/volumes/db/data/pgdata/base/1/13462
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13463
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13463
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464_fsm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464_fsm
Normal file
Binary file not shown.
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464_vm
Normal file
BIN
docker/dify/volumes/db/data/pgdata/base/1/13464_vm
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user