端口
This commit is contained in:
@@ -63,9 +63,20 @@ source reInit.sh
|
||||
execute_init_script # 执行initAll.sql
|
||||
import_sensitive_words # 导入敏感词
|
||||
|
||||
# Docker环境特定配置:更新爬虫路径
|
||||
# Docker环境特定配置:更新爬虫路径并标记初始化状态
|
||||
echo "更新Docker环境配置..."
|
||||
mysql -uroot "${MYSQL_DATABASE}" <<EOSQL
|
||||
-- 确保初始化标记表存在
|
||||
CREATE TABLE IF NOT EXISTS _db_init_status (
|
||||
id INT PRIMARY KEY AUTO_INCREMENT,
|
||||
script_name VARCHAR(255) NOT NULL UNIQUE,
|
||||
executed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
status VARCHAR(50) DEFAULT 'init'
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
-- 确保有一条当前脚本的记录
|
||||
INSERT IGNORE INTO _db_init_status (script_name) VALUES ('01-init-database.sql');
|
||||
|
||||
-- 更新爬虫配置为Docker容器内路径
|
||||
UPDATE tb_sys_config
|
||||
SET config_value = '/usr/bin/python3'
|
||||
@@ -75,12 +86,13 @@ UPDATE tb_sys_config
|
||||
SET config_value = '/app/crawler'
|
||||
WHERE config_key = 'crawler.basePath';
|
||||
|
||||
-- 将初始化状态标记为 success,供 healthcheck 使用
|
||||
UPDATE _db_init_status
|
||||
SET status = 'success'
|
||||
WHERE script_name = '01-init-database.sql';
|
||||
|
||||
SELECT '✅ 数据库初始化完成!' AS message;
|
||||
SELECT '默认用户: admin, 密码: admin123' AS tip;
|
||||
SELECT '默认用户: admin, 密码: 123456' AS tip;
|
||||
SELECT '爬虫配置已更新为Docker容器路径' AS docker_config;
|
||||
EOSQL
|
||||
|
||||
|
||||
1428
docker/dify/.env.example
Normal file
1428
docker/dify/.env.example
Normal file
File diff suppressed because it is too large
Load Diff
119
docker/dify/README.md
Normal file
119
docker/dify/README.md
Normal file
@@ -0,0 +1,119 @@
|
||||
## README for docker Deployment
|
||||
|
||||
Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
|
||||
|
||||
### What's Updated
|
||||
|
||||
- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\
|
||||
For more information, refer `docker/certbot/README.md`.
|
||||
|
||||
- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
|
||||
|
||||
> What is `.env`? </br> </br>
|
||||
> The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
|
||||
|
||||
- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
|
||||
|
||||
- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
|
||||
|
||||
### How to Deploy Dify with `docker-compose.yaml`
|
||||
|
||||
1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
|
||||
1. **Environment Setup**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
|
||||
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
|
||||
1. **Running the Services**:
|
||||
- Execute `docker compose up` from the `docker` directory to start the services.
|
||||
- To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
|
||||
1. **SSL Certificate Setup**:
|
||||
- Refer `docker/certbot/README.md` to set up SSL certificates using Certbot.
|
||||
1. **OpenTelemetry Collector Setup**:
|
||||
- Change `ENABLE_OTEL` to `true` in `.env`.
|
||||
- Configure `OTLP_BASE_ENDPOINT` properly.
|
||||
|
||||
### How to Deploy Middleware for Developing Dify
|
||||
|
||||
1. **Middleware Setup**:
|
||||
- Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
|
||||
- Navigate to the `docker` directory.
|
||||
- Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
|
||||
1. **Running Middleware Services**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance.
|
||||
|
||||
> Compose automatically loads `COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate` from `middleware.env`, so no extra `--profile` flags are needed. Adjust variables in `middleware.env` if you want a different combination of services.
|
||||
|
||||
### Migration for Existing Users
|
||||
|
||||
For users migrating from the `docker-legacy` setup:
|
||||
|
||||
1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
|
||||
1. **Transfer Customizations**:
|
||||
- If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
|
||||
1. **Data Migration**:
|
||||
- Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
|
||||
|
||||
### Overview of `.env`
|
||||
|
||||
#### Key Modules and Customization
|
||||
|
||||
- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
|
||||
- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
|
||||
- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontend operate.
|
||||
|
||||
#### Other notable variables
|
||||
|
||||
The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
|
||||
|
||||
1. **Common Variables**:
|
||||
|
||||
- `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
|
||||
- `APP_WEB_URL`: Frontend application URL.
|
||||
- `FILES_URL`: Base URL for file downloads and previews.
|
||||
|
||||
1. **Server Configuration**:
|
||||
|
||||
- `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
|
||||
- `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
|
||||
|
||||
1. **Database Configuration**:
|
||||
|
||||
- `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
|
||||
|
||||
1. **Redis Configuration**:
|
||||
|
||||
- `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
|
||||
|
||||
1. **Celery Configuration**:
|
||||
|
||||
- `CELERY_BROKER_URL`: Configuration for Celery message broker.
|
||||
|
||||
1. **Storage Configuration**:
|
||||
|
||||
- `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
|
||||
|
||||
1. **Vector Database Configuration**:
|
||||
|
||||
- `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
|
||||
- Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`.
|
||||
|
||||
1. **CORS Configuration**:
|
||||
|
||||
- `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
|
||||
|
||||
1. **OpenTelemetry Configuration**:
|
||||
|
||||
- `ENABLE_OTEL`: Enable OpenTelemetry collector in api.
|
||||
- `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter.
|
||||
|
||||
1. **Other Service-Specific Environment Variables**:
|
||||
|
||||
- Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
|
||||
|
||||
### Additional Information
|
||||
|
||||
- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
|
||||
- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
|
||||
|
||||
This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
|
||||
76
docker/dify/certbot/README.md
Normal file
76
docker/dify/certbot/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Launching new servers with SSL certificates
|
||||
|
||||
## Short description
|
||||
|
||||
docker compose certbot configurations with Backward compatibility (without certbot container).\
|
||||
Use `docker compose --profile certbot up` to use this features.
|
||||
|
||||
## The simplest way for launching new servers with SSL certificates
|
||||
|
||||
1. Get letsencrypt certs\
|
||||
set `.env` values
|
||||
```properties
|
||||
NGINX_SSL_CERT_FILENAME=fullchain.pem
|
||||
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=true
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
CERTBOT_EMAIL=example@your_domain.com
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker network prune
|
||||
docker compose --profile certbot up --force-recreate -d
|
||||
```
|
||||
then after the containers launched:
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
1. Edit `.env` file and `docker compose --profile certbot up` again.\
|
||||
set `.env` value additionally
|
||||
```properties
|
||||
NGINX_HTTPS_ENABLED=true
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate nginx
|
||||
```
|
||||
Then you can access your serve with HTTPS.\
|
||||
[https://your_domain.com](https://your_domain.com)
|
||||
|
||||
## SSL certificates renewal
|
||||
|
||||
For SSL certificates renewal, execute commands below:
|
||||
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## Options for certbot
|
||||
|
||||
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
|
||||
|
||||
```properties
|
||||
CERTBOT_OPTIONS=--dry-run
|
||||
```
|
||||
|
||||
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
|
||||
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate certbot
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
|
||||
Then, reload the nginx container if necessary.
|
||||
|
||||
```shell
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## For legacy servers
|
||||
|
||||
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
|
||||
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
30
docker/dify/certbot/docker-entrypoint.sh
Executable file
30
docker/dify/certbot/docker-entrypoint.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
printf '%s\n' "Docker entrypoint script is running"
|
||||
|
||||
printf '%s\n' "\nChecking specific environment variables:"
|
||||
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
|
||||
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
|
||||
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
|
||||
|
||||
printf '%s\n' "\nChecking mounted directories:"
|
||||
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
|
||||
if [ -d "$dir" ]; then
|
||||
printf '%s\n' "$dir exists. Contents:"
|
||||
ls -la "$dir"
|
||||
else
|
||||
printf '%s\n' "$dir does not exist."
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "\nGenerating update-cert.sh from template"
|
||||
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
|
||||
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
|
||||
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
|
||||
/update-cert.template.txt > /update-cert.sh
|
||||
|
||||
chmod +x /update-cert.sh
|
||||
|
||||
printf '%s\n' "\nExecuting command:" "$@"
|
||||
exec "$@"
|
||||
19
docker/dify/certbot/update-cert.template.txt
Executable file
19
docker/dify/certbot/update-cert.template.txt
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DOMAIN="${CERTBOT_DOMAIN}"
|
||||
EMAIL="${CERTBOT_EMAIL}"
|
||||
OPTIONS="${CERTBOT_OPTIONS}"
|
||||
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
|
||||
|
||||
# Check if the certificate already exists
|
||||
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
|
||||
echo "Certificate exists. Attempting to renew..."
|
||||
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
|
||||
else
|
||||
echo "Certificate does not exist. Obtaining a new certificate..."
|
||||
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
|
||||
fi
|
||||
echo "Certificate operation successful"
|
||||
# Note: Nginx reload should be handled outside this container
|
||||
echo "Please ensure to reload Nginx to apply any certificate changes."
|
||||
4
docker/dify/couchbase-server/Dockerfile
Normal file
4
docker/dify/couchbase-server/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM couchbase/server:latest AS stage_base
|
||||
# FROM couchbase:latest AS stage_base
|
||||
COPY init-cbserver.sh /opt/couchbase/init/
|
||||
RUN chmod +x /opt/couchbase/init/init-cbserver.sh
|
||||
44
docker/dify/couchbase-server/init-cbserver.sh
Executable file
44
docker/dify/couchbase-server/init-cbserver.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
|
||||
# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
|
||||
|
||||
/entrypoint.sh couchbase-server &
|
||||
|
||||
# track if setup is complete so we don't try to setup again
|
||||
FILE=/opt/couchbase/init/setupComplete.txt
|
||||
|
||||
if ! [ -f "$FILE" ]; then
|
||||
# used to automatically create the cluster based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
|
||||
|
||||
echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
|
||||
|
||||
sleep 20s
|
||||
/opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
|
||||
--cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--services data,index,query,fts \
|
||||
--cluster-ramsize $COUCHBASE_RAM_SIZE \
|
||||
--cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
|
||||
--cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
|
||||
--cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
|
||||
--index-storage-setting default
|
||||
|
||||
sleep 2s
|
||||
|
||||
# used to auto create the bucket based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
|
||||
|
||||
/opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
|
||||
--username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--bucket $COUCHBASE_BUCKET \
|
||||
--bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
|
||||
--bucket-type couchbase
|
||||
|
||||
# create file so we know that the cluster is setup and don't run the setup again
|
||||
touch $FILE
|
||||
fi
|
||||
# docker compose will stop the container from running unless we do this
|
||||
# known issue and workaround
|
||||
tail -f /dev/null
|
||||
877
docker/dify/docker-compose-template.yaml
Normal file
877
docker/dify/docker-compose-template.yaml
Normal file
@@ -0,0 +1,877 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.10.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.10.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing all queues.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.10.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.10.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
PM2_INSTANCES: ${PM2_INSTANCES:-2}
|
||||
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
|
||||
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
|
||||
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
|
||||
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
|
||||
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
|
||||
# The PostgreSQL database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- postgresql
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: ${POSTGRES_USER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${PGUSER:-postgres}",
|
||||
"-d",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The mysql database.
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${MYSQL_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${MYSQL_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${MYSQL_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ./volumes/redis/data:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
|
||||
]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.12
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
|
||||
SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Certbot service
|
||||
# use `docker-compose --profile certbot up` to start the certbot service.
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
- ./volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ./certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
command: ["tail", "-f", "/dev/null"]
|
||||
|
||||
# The nginx reverse proxy.
|
||||
# used for reverse proxying the API service and Web service.
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d
|
||||
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
|
||||
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the con tainer.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: en_US.UTF-8
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# seekdb vector database
|
||||
seekdb:
|
||||
image: oceanbase/seekdb:latest
|
||||
container_name: seekdb
|
||||
profiles:
|
||||
- seekdb
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/seekdb:/var/lib/oceanbase
|
||||
environment:
|
||||
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
REPORTER: dify-ai-seekdb
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
timeout: 5s
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
# The Couchbase vector store.
|
||||
couchbase-server:
|
||||
build: ./couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [""]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
# ensure bucket was created before proceeding
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
|
||||
]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# The pgvector vector database.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
# pg_bigm module for full text search
|
||||
PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
|
||||
PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# get image from https://www.vastdata.com.cn/
|
||||
vastbase:
|
||||
image: vastdata/vastbase-vector
|
||||
profiles:
|
||||
- vastbase
|
||||
restart: always
|
||||
environment:
|
||||
- VB_DBCOMPATIBILITY=PG
|
||||
- VB_DB=dify
|
||||
- VB_USERNAME=dify
|
||||
- VB_PASSWORD=Difyai123456
|
||||
ports:
|
||||
- "5434:5432"
|
||||
volumes:
|
||||
- ./vastbase/lic:/home/vastbase/vastbase/lic
|
||||
- ./vastbase/data:/home/vastbase/data
|
||||
- ./vastbase/backup:/home/vastbase/backup
|
||||
- ./vastbase/backup_log:/home/vastbase/backup_log
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# pgvecto-rs vector store
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# Chroma vector database
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
# Milvus vector database services
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ./volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ./volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.5.15
|
||||
profiles:
|
||||
- milvus
|
||||
command: ["milvus", "run", "standalone"]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ./volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
# Opensearch vector database
|
||||
opensearch:
|
||||
container_name: opensearch
|
||||
image: opensearchproject/opensearch:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
container_name: opensearch-dashboards
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
# opengauss vector database.
|
||||
opengauss:
|
||||
image: opengauss/opengauss:7.0.0-RC1
|
||||
profiles:
|
||||
- opengauss
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
GS_USERNAME: ${OPENGAUSS_USER:-postgres}
|
||||
GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
|
||||
GS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
GS_DB: ${OPENGAUSS_DATABASE:-dify}
|
||||
volumes:
|
||||
- ./volumes/opengauss/data:/var/lib/opengauss/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
ports:
|
||||
- ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
|
||||
|
||||
# MyScale vector database
|
||||
myscale:
|
||||
container_name: myscale
|
||||
image: myscale/myscaledb:1.6.4
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ./volumes/myscale/data:/var/lib/clickhouse
|
||||
- ./volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
# Matrixone vector store.
|
||||
matrixone:
|
||||
hostname: matrixone
|
||||
image: matrixorigin/matrixone:2.1.1
|
||||
profiles:
|
||||
- matrixone
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/matrixone/data:/mo-data
|
||||
ports:
|
||||
- ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001}
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch
|
||||
profiles:
|
||||
- elasticsearch
|
||||
- elasticsearch-ja
|
||||
restart: always
|
||||
volumes:
|
||||
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: basic
|
||||
xpack.security.enabled: "true"
|
||||
xpack.security.enrollment.enabled: "false"
|
||||
xpack.security.http.ssl.enabled: "false"
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
|
||||
healthcheck:
|
||||
test:
|
||||
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
# https://www.elastic.co/guide/en/kibana/current/docker.html
|
||||
# https://www.elastic.co/guide/en/kibana/current/settings.html
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: "true"
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
|
||||
XPACK_FLEET_ISAIRGAPPED: "true"
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: "5601"
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# unstructured .
|
||||
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
249
docker/dify/docker-compose.middleware.yaml
Normal file
249
docker/dify/docker-compose.middleware.yaml
Normal file
@@ -0,0 +1,249 @@
|
||||
services:
|
||||
# The postgres database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- ""
|
||||
- postgresql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "${EXPOSE_POSTGRES_PORT:-5432}:5432"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${PGUSER:-postgres}",
|
||||
"-d",
|
||||
"${POSTGRES_DB:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${MYSQL_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${MYSQL_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
ports:
|
||||
- "${EXPOSE_MYSQL_PORT:-3306}:3306"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${MYSQL_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
ports:
|
||||
- "${EXPOSE_REDIS_PORT:-6379}:6379"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
|
||||
]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.12
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.0-local
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://host.docker.internal:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
THIRD_PARTY_SIGNATURE_VERIFICATION_ENABLED: true
|
||||
THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS: /app/keys/publickey.pem
|
||||
FORCE_VERIFYING_SIGNATURE: false
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}"
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
ports:
|
||||
- "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
|
||||
- "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- ""
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
- ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
ports:
|
||||
- "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
|
||||
- "${EXPOSE_WEAVIATE_GRPC_PORT:-50051}:50051"
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
BIN
docker/dify/docker-compose.png
Normal file
BIN
docker/dify/docker-compose.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 170 KiB |
1510
docker/dify/docker-compose.yaml
Normal file
1510
docker/dify/docker-compose.yaml
Normal file
File diff suppressed because it is too large
Load Diff
25
docker/dify/elasticsearch/docker-entrypoint.sh
Executable file
25
docker/dify/elasticsearch/docker-entrypoint.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
|
||||
# Check if the ICU tokenizer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
|
||||
printf '%s\n' "Installing the ICU tokenizer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
|
||||
printf '%s\n' "Failed to install the ICU tokenizer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Check if the Japanese language analyzer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
|
||||
printf '%s\n' "Installing the Japanese language analyzer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
|
||||
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
|
||||
112
docker/dify/generate_docker_compose
Executable file
112
docker/dify/generate_docker_compose
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def parse_env_example(file_path):
|
||||
"""
|
||||
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
|
||||
"""
|
||||
env_vars = {}
|
||||
with open(file_path, "r") as f:
|
||||
for line_number, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
# Ignore empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Use regex to parse KEY=VALUE
|
||||
match = re.match(r"^([^=]+)=(.*)$", line)
|
||||
if match:
|
||||
key = match.group(1).strip()
|
||||
value = match.group(2).strip()
|
||||
# Remove possible quotes around the value
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env_vars[key] = value
|
||||
else:
|
||||
print(f"Warning: Unable to parse line {line_number}: {line}")
|
||||
return env_vars
|
||||
|
||||
|
||||
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
|
||||
"""
|
||||
Generates a shared environment variables block as a YAML string.
|
||||
"""
|
||||
lines = [f"x-shared-env: &{anchor_name}"]
|
||||
for key, default in env_vars.items():
|
||||
if key == "COMPOSE_PROFILES":
|
||||
continue
|
||||
# If default value is empty, use ${KEY:-}
|
||||
if default == "":
|
||||
lines.append(f" {key}: ${{{key}:-}}")
|
||||
else:
|
||||
# If default value contains special characters, wrap it in quotes
|
||||
if re.search(r"[:\s]", default):
|
||||
default = f"{default}"
|
||||
lines.append(f" {key}: ${{{key}:-{default}}}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
|
||||
"""
|
||||
Inserts the shared environment variables block and header comments into the template file,
|
||||
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
|
||||
"""
|
||||
with open(template_path, "r") as f:
|
||||
template_content = f.read()
|
||||
|
||||
# Remove existing x-shared-env: &shared-api-worker-env lines
|
||||
template_content = re.sub(
|
||||
r"^x-shared-env: &shared-api-worker-env\s*\n?",
|
||||
"",
|
||||
template_content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
# Prepare the final content with header comments and shared env block
|
||||
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
|
||||
|
||||
with open(output_path, "w") as f:
|
||||
f.write(final_content)
|
||||
print(f"Generated {output_path}")
|
||||
|
||||
|
||||
def main():
|
||||
env_example_path = ".env.example"
|
||||
template_path = "docker-compose-template.yaml"
|
||||
output_path = "docker-compose.yaml"
|
||||
anchor_name = "shared-api-worker-env" # Can be modified as needed
|
||||
|
||||
# Define header comments to be added at the top of docker-compose.yaml
|
||||
header_comments = (
|
||||
"# ==================================================================\n"
|
||||
"# WARNING: This file is auto-generated by generate_docker_compose\n"
|
||||
"# Do not modify this file directly. Instead, update the .env.example\n"
|
||||
"# or docker-compose-template.yaml and regenerate this file.\n"
|
||||
"# ==================================================================\n"
|
||||
)
|
||||
|
||||
# Check if required files exist
|
||||
for path in [env_example_path, template_path]:
|
||||
if not os.path.isfile(path):
|
||||
print(f"Error: File {path} does not exist.")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse .env.example file
|
||||
env_vars = parse_env_example(env_example_path)
|
||||
|
||||
if not env_vars:
|
||||
print("Warning: No environment variables found in .env.example.")
|
||||
|
||||
# Generate shared environment variables block
|
||||
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
|
||||
|
||||
# Insert shared environment variables block and header comments into the template
|
||||
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
223
docker/dify/middleware.env.example
Normal file
223
docker/dify/middleware.env.example
Normal file
@@ -0,0 +1,223 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# Database type, supported values are `postgresql` and `mysql`
|
||||
DB_TYPE=postgresql
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db_postgres
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# PostgreSQL Configuration
|
||||
POSTGRES_USER=${DB_USERNAME}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=${DB_PASSWORD}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=${DB_DATABASE}
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
PGDATA_HOST_VOLUME=./volumes/db/data
|
||||
|
||||
# Maximum number of connections to the database
|
||||
# Default is 100
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
|
||||
POSTGRES_MAX_CONNECTIONS=100
|
||||
|
||||
# Sets the amount of shared memory used for postgres's shared buffers.
|
||||
# Default is 128MB
|
||||
# Recommended value: 25% of available memory
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
|
||||
POSTGRES_SHARED_BUFFERS=128MB
|
||||
|
||||
# Sets the amount of memory used by each database worker for working space.
|
||||
# Default is 4MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
|
||||
POSTGRES_WORK_MEM=4MB
|
||||
|
||||
# Sets the amount of memory reserved for maintenance activities.
|
||||
# Default is 64MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
|
||||
POSTGRES_MAINTENANCE_WORK_MEM=64MB
|
||||
|
||||
# Sets the planner's assumption about the effective cache size.
|
||||
# Default is 4096MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
|
||||
|
||||
# Sets the maximum allowed duration of any statement before termination.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT
|
||||
# A value of 0 prevents the server from timing out statements.
|
||||
POSTGRES_STATEMENT_TIMEOUT=0
|
||||
|
||||
# Sets the maximum allowed duration of any idle in-transaction session before termination.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# MySQL Configuration
|
||||
MYSQL_USERNAME=${DB_USERNAME}
|
||||
# MySQL password
|
||||
MYSQL_PASSWORD=${DB_PASSWORD}
|
||||
# MySQL database name
|
||||
MYSQL_DATABASE=${DB_DATABASE}
|
||||
# MySQL data directory host volume
|
||||
MYSQL_HOST_VOLUME=./volumes/mysql/data
|
||||
|
||||
# MySQL Performance Configuration
|
||||
# Maximum number of connections to MySQL
|
||||
# Default is 1000
|
||||
MYSQL_MAX_CONNECTIONS=1000
|
||||
|
||||
# InnoDB buffer pool size
|
||||
# Default is 512M
|
||||
# Recommended value: 70-80% of available memory for dedicated MySQL server
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE=512M
|
||||
|
||||
# InnoDB log file size
|
||||
# Default is 128M
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size
|
||||
MYSQL_INNODB_LOG_FILE_SIZE=128M
|
||||
|
||||
# InnoDB flush log at transaction commit
|
||||
# Default is 2 (flush to OS cache, sync every second)
|
||||
# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache)
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2
|
||||
|
||||
# -----------------------------
|
||||
# Environment Variables for redis Service
|
||||
# -----------------------------
|
||||
REDIS_HOST_VOLUME=./volumes/redis/data
|
||||
REDIS_PASSWORD=difyai123456
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
# ------------------------------
|
||||
SANDBOX_API_KEY=dify-sandbox
|
||||
SANDBOX_GIN_MODE=release
|
||||
SANDBOX_WORKER_TIMEOUT=15
|
||||
SANDBOX_ENABLE_NETWORK=true
|
||||
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
|
||||
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
|
||||
SANDBOX_PORT=8194
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for ssrf_proxy Service
|
||||
# ------------------------------
|
||||
SSRF_HTTP_PORT=3128
|
||||
SSRF_COREDUMP_DIR=/var/spool/squid
|
||||
SSRF_REVERSE_PROXY_PORT=8194
|
||||
SSRF_SANDBOX_HOST=sandbox
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for weaviate Service
|
||||
# ------------------------------
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
|
||||
WEAVIATE_CLUSTER_HOSTNAME=node1
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
WEAVIATE_HOST_VOLUME=./volumes/weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose profile configuration
|
||||
# ------------------------------
|
||||
# Loaded automatically when running `docker compose --env-file middleware.env ...`.
|
||||
# Controls which DB/vector services start, so no extra `--profile` flag is needed.
|
||||
COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose Service Expose Host Port Configurations
|
||||
# ------------------------------
|
||||
EXPOSE_POSTGRES_PORT=5432
|
||||
EXPOSE_MYSQL_PORT=3306
|
||||
EXPOSE_REDIS_PORT=6379
|
||||
EXPOSE_SANDBOX_PORT=8194
|
||||
EXPOSE_SSRF_PROXY_PORT=3128
|
||||
EXPOSE_WEAVIATE_PORT=8080
|
||||
|
||||
# ------------------------------
|
||||
# Plugin Daemon Configuration
|
||||
# ------------------------------
|
||||
|
||||
DB_PLUGIN_DATABASE=dify_plugin
|
||||
EXPOSE_PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DAEMON_URL=http://host.docker.internal:5002
|
||||
PLUGIN_MAX_PACKAGE_SIZE=52428800
|
||||
PLUGIN_PPROF_ENABLED=false
|
||||
PLUGIN_WORKING_PATH=/app/storage/cwd
|
||||
|
||||
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||||
|
||||
PLUGIN_DEBUGGING_PORT=5003
|
||||
PLUGIN_DEBUGGING_HOST=0.0.0.0
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
|
||||
|
||||
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
PLUGIN_DIFY_INNER_API_URL=http://host.docker.internal:5001
|
||||
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||||
|
||||
FORCE_VERIFYING_SIGNATURE=true
|
||||
|
||||
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT=600
|
||||
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
PIP_MIRROR_URL=
|
||||
|
||||
# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example
|
||||
# Plugin storage type, local aws_s3 tencent_cos azure_blob
|
||||
PLUGIN_STORAGE_TYPE=local
|
||||
PLUGIN_STORAGE_LOCAL_ROOT=/app/storage
|
||||
PLUGIN_WORKING_PATH=/app/storage/cwd
|
||||
PLUGIN_INSTALLED_PATH=plugin
|
||||
PLUGIN_PACKAGE_CACHE_PATH=plugin_packages
|
||||
PLUGIN_MEDIA_CACHE_PATH=assets
|
||||
# Plugin oss bucket
|
||||
PLUGIN_STORAGE_OSS_BUCKET=
|
||||
# Plugin oss s3 credentials
|
||||
PLUGIN_S3_USE_AWS_MANAGED_IAM=false
|
||||
PLUGIN_S3_USE_AWS=false
|
||||
PLUGIN_S3_ENDPOINT=
|
||||
PLUGIN_S3_USE_PATH_STYLE=false
|
||||
PLUGIN_AWS_ACCESS_KEY=
|
||||
PLUGIN_AWS_SECRET_KEY=
|
||||
PLUGIN_AWS_REGION=
|
||||
# Plugin oss azure blob
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME=
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING=
|
||||
# Plugin oss tencent cos
|
||||
PLUGIN_TENCENT_COS_SECRET_KEY=
|
||||
PLUGIN_TENCENT_COS_SECRET_ID=
|
||||
PLUGIN_TENCENT_COS_REGION=
|
||||
# Plugin oss aliyun oss
|
||||
PLUGIN_ALIYUN_OSS_REGION=
|
||||
PLUGIN_ALIYUN_OSS_ENDPOINT=
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID=
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET=
|
||||
PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4
|
||||
PLUGIN_ALIYUN_OSS_PATH=
|
||||
# Plugin oss volcengine tos
|
||||
PLUGIN_VOLCENGINE_TOS_ENDPOINT=
|
||||
PLUGIN_VOLCENGINE_TOS_ACCESS_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_SECRET_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_REGION=
|
||||
58
docker/dify/nginx/conf.d/default.conf.template
Normal file
58
docker/dify/nginx/conf.d/default.conf.template
Normal file
@@ -0,0 +1,58 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen ${NGINX_PORT};
|
||||
server_name ${NGINX_SERVER_NAME};
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /v1 {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /files {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e/ {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /mcp {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /triggers {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# placeholder for acme challenge location
|
||||
${ACME_CHALLENGE_LOCATION}
|
||||
|
||||
# placeholder for https config defined in https.conf.template
|
||||
${HTTPS_CONFIG}
|
||||
}
|
||||
42
docker/dify/nginx/docker-entrypoint.sh
Executable file
42
docker/dify/nginx/docker-entrypoint.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
HTTPS_CONFIG=''
|
||||
|
||||
if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
|
||||
# Check if the certificate and key files for the specified domain exist
|
||||
if [ -n "${CERTBOT_DOMAIN}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
|
||||
SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
else
|
||||
SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
fi
|
||||
export SSL_CERTIFICATE_PATH
|
||||
export SSL_CERTIFICATE_KEY_PATH
|
||||
|
||||
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
|
||||
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
|
||||
export HTTPS_CONFIG
|
||||
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
|
||||
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
fi
|
||||
export HTTPS_CONFIG
|
||||
|
||||
if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
|
||||
ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
|
||||
else
|
||||
ACME_CHALLENGE_LOCATION=''
|
||||
fi
|
||||
export ACME_CHALLENGE_LOCATION
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
|
||||
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Start Nginx using the default entrypoint
|
||||
exec nginx -g 'daemon off;'
|
||||
9
docker/dify/nginx/https.conf.template
Normal file
9
docker/dify/nginx/https.conf.template
Normal file
@@ -0,0 +1,9 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ${SSL_CERTIFICATE_PATH};
|
||||
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
34
docker/dify/nginx/nginx.conf.template
Normal file
34
docker/dify/nginx/nginx.conf.template
Normal file
@@ -0,0 +1,34 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
user nginx;
|
||||
worker_processes ${NGINX_WORKER_PROCESSES};
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
|
||||
|
||||
#gzip on;
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
11
docker/dify/nginx/proxy.conf.template
Normal file
11
docker/dify/nginx/proxy.conf.template
Normal file
@@ -0,0 +1,11 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
|
||||
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
|
||||
0
docker/dify/nginx/ssl/.gitkeep
Normal file
0
docker/dify/nginx/ssl/.gitkeep
Normal file
24
docker/dify/pgvector/docker-entrypoint.sh
Executable file
24
docker/dify/pgvector/docker-entrypoint.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
PG_MAJOR=16
|
||||
|
||||
if [ "${PG_BIGM}" = "true" ]; then
|
||||
# install pg_bigm
|
||||
apt-get update
|
||||
apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR}
|
||||
|
||||
curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz
|
||||
tar xf v${PG_BIGM_VERSION}.tar.gz
|
||||
cd pg_bigm-${PG_BIGM_VERSION} || exit 1
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install
|
||||
|
||||
cd - || exit 1
|
||||
rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION}
|
||||
|
||||
# enable pg_bigm
|
||||
sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /usr/local/bin/docker-entrypoint.sh postgres
|
||||
42
docker/dify/ssrf_proxy/docker-entrypoint.sh
Executable file
42
docker/dify/ssrf_proxy/docker-entrypoint.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Modified based on Squid OCI image entrypoint
|
||||
|
||||
# This entrypoint aims to forward the squid logs to stdout to assist users of
|
||||
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
|
||||
# access the service logs.
|
||||
|
||||
# Moreover, it invokes the squid binary, leaving all the desired parameters to
|
||||
# be provided by the "command" passed to the spawned container. If no command
|
||||
# is provided by the user, the default behavior (as per the CMD statement in
|
||||
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
|
||||
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
|
||||
# systemd unit.
|
||||
|
||||
# [1] The default configuration is changed in the Dockerfile to allow local
|
||||
# network connections. See the Dockerfile for further information.
|
||||
|
||||
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
|
||||
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
|
||||
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
tail -F /var/log/squid/access.log 2>/dev/null &
|
||||
tail -F /var/log/squid/error.log 2>/dev/null &
|
||||
tail -F /var/log/squid/store.log 2>/dev/null &
|
||||
tail -F /var/log/squid/cache.log 2>/dev/null &
|
||||
|
||||
# Replace environment variables in the template and output to the squid.conf
|
||||
echo "[ENTRYPOINT] replacing environment variables in the template"
|
||||
awk '{
|
||||
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
|
||||
var = substr($0, RSTART+2, RLENGTH-3)
|
||||
val = ENVIRON[var]
|
||||
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
|
||||
}
|
||||
print
|
||||
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
|
||||
|
||||
/usr/sbin/squid -Nz
|
||||
echo "[ENTRYPOINT] starting squid"
|
||||
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
|
||||
56
docker/dify/ssrf_proxy/squid.conf.template
Normal file
56
docker/dify/ssrf_proxy/squid.conf.template
Normal file
@@ -0,0 +1,56 @@
|
||||
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
|
||||
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
|
||||
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
|
||||
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
|
||||
acl localnet src fc00::/7 # RFC 4193 local private network range
|
||||
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
|
||||
acl SSL_ports port 443
|
||||
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
|
||||
acl Safe_ports port 80 # http
|
||||
acl Safe_ports port 21 # ftp
|
||||
acl Safe_ports port 443 # https
|
||||
acl Safe_ports port 70 # gopher
|
||||
acl Safe_ports port 210 # wais
|
||||
acl Safe_ports port 1025-65535 # unregistered ports
|
||||
acl Safe_ports port 280 # http-mgmt
|
||||
acl Safe_ports port 488 # gss-http
|
||||
acl Safe_ports port 591 # filemaker
|
||||
acl Safe_ports port 777 # multiling http
|
||||
acl CONNECT method CONNECT
|
||||
acl allowed_domains dstdomain .marketplace.dify.ai
|
||||
http_access allow allowed_domains
|
||||
http_access deny !Safe_ports
|
||||
http_access deny CONNECT !SSL_ports
|
||||
http_access allow localhost manager
|
||||
http_access deny manager
|
||||
http_access allow localhost
|
||||
include /etc/squid/conf.d/*.conf
|
||||
http_access deny all
|
||||
|
||||
################################## Proxy Server ################################
|
||||
http_port ${HTTP_PORT}
|
||||
coredump_dir ${COREDUMP_DIR}
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern . 0 20% 4320
|
||||
|
||||
|
||||
# cache_dir ufs /var/spool/squid 100 16 256
|
||||
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
|
||||
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
|
||||
|
||||
################################## Reverse Proxy To Sandbox ################################
|
||||
http_port ${REVERSE_PROXY_PORT} accel vhost
|
||||
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
|
||||
acl src_all src all
|
||||
http_access allow src_all
|
||||
|
||||
# Unless the option's size is increased, an error will occur when uploading more than two files.
|
||||
client_request_buffer_max_size 100 MB
|
||||
13
docker/dify/startupscripts/init.sh
Executable file
13
docker/dify/startupscripts/init.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DB_INITIALIZED="/opt/oracle/oradata/dbinit"
|
||||
#[ -f ${DB_INITIALIZED} ] && exit
|
||||
#touch ${DB_INITIALIZED}
|
||||
if [ -f ${DB_INITIALIZED} ]; then
|
||||
echo 'File exists. Standards for have been Init'
|
||||
exit
|
||||
else
|
||||
echo 'File does not exist. Standards for first time Start up this DB'
|
||||
"$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
|
||||
touch ${DB_INITIALIZED}
|
||||
fi
|
||||
10
docker/dify/startupscripts/init_user.script
Executable file
10
docker/dify/startupscripts/init_user.script
Executable file
@@ -0,0 +1,10 @@
|
||||
show pdbs;
|
||||
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
|
||||
alter session set container= freepdb1;
|
||||
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
|
||||
grant DB_DEVELOPER_ROLE to dify;
|
||||
|
||||
BEGIN
|
||||
CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER');
|
||||
END;
|
||||
/
|
||||
4
docker/dify/tidb/config/pd.toml
Normal file
4
docker/dify/tidb/config/pd.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
# PD Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
|
||||
[replication]
|
||||
max-replicas = 1
|
||||
13
docker/dify/tidb/config/tiflash-learner.toml
Normal file
13
docker/dify/tidb/config/tiflash-learner.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
# TiFlash tiflash-learner.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
|
||||
|
||||
log-file = "/logs/tiflash_tikv.log"
|
||||
|
||||
[server]
|
||||
engine-addr = "tiflash:4030"
|
||||
addr = "0.0.0.0:20280"
|
||||
advertise-addr = "tiflash:20280"
|
||||
status-addr = "tiflash:20292"
|
||||
|
||||
[storage]
|
||||
data-dir = "/data/flash"
|
||||
19
docker/dify/tidb/config/tiflash.toml
Normal file
19
docker/dify/tidb/config/tiflash.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
# TiFlash tiflash.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
|
||||
|
||||
listen_host = "0.0.0.0"
|
||||
path = "/data"
|
||||
|
||||
[flash]
|
||||
tidb_status_addr = "tidb:10080"
|
||||
service_addr = "tiflash:4030"
|
||||
|
||||
[flash.proxy]
|
||||
config = "/tiflash-learner.toml"
|
||||
|
||||
[logger]
|
||||
errorlog = "/logs/tiflash_error.log"
|
||||
log = "/logs/tiflash.log"
|
||||
|
||||
[raft]
|
||||
pd_addr = "pd0:2379"
|
||||
63
docker/dify/tidb/docker-compose.yaml
Normal file
63
docker/dify/tidb/docker-compose.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
services:
|
||||
pd0:
|
||||
image: pingcap/pd:v8.5.1
|
||||
# ports:
|
||||
# - "2379"
|
||||
volumes:
|
||||
- ./config/pd.toml:/pd.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --name=pd0
|
||||
- --client-urls=http://0.0.0.0:2379
|
||||
- --peer-urls=http://0.0.0.0:2380
|
||||
- --advertise-client-urls=http://pd0:2379
|
||||
- --advertise-peer-urls=http://pd0:2380
|
||||
- --initial-cluster=pd0=http://pd0:2380
|
||||
- --data-dir=/data/pd
|
||||
- --config=/pd.toml
|
||||
- --log-file=/logs/pd.log
|
||||
restart: on-failure
|
||||
tikv:
|
||||
image: pingcap/tikv:v8.5.1
|
||||
volumes:
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --addr=0.0.0.0:20160
|
||||
- --advertise-addr=tikv:20160
|
||||
- --status-addr=tikv:20180
|
||||
- --data-dir=/data/tikv
|
||||
- --pd=pd0:2379
|
||||
- --log-file=/logs/tikv.log
|
||||
depends_on:
|
||||
- "pd0"
|
||||
restart: on-failure
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.5.1
|
||||
# ports:
|
||||
# - "4000:4000"
|
||||
volumes:
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --advertise-address=tidb
|
||||
- --store=tikv
|
||||
- --path=pd0:2379
|
||||
- --log-file=/logs/tidb.log
|
||||
depends_on:
|
||||
- "tikv"
|
||||
restart: on-failure
|
||||
tiflash:
|
||||
image: pingcap/tiflash:v8.5.1
|
||||
volumes:
|
||||
- ./config/tiflash.toml:/tiflash.toml:ro
|
||||
- ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- server
|
||||
- --config-file=/tiflash.toml
|
||||
depends_on:
|
||||
- "tikv"
|
||||
- "tidb"
|
||||
restart: on-failure
|
||||
@@ -0,0 +1,17 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks>
|
||||
<ip>::1</ip> <!-- change to ::/0 to allow access from all addresses -->
|
||||
<ip>127.0.0.1</ip>
|
||||
<ip>10.0.0.0/8</ip>
|
||||
<ip>172.16.0.0/12</ip>
|
||||
<ip>192.168.0.0/16</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<access_management>1</access_management>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
1
docker/dify/volumes/oceanbase/init.d/vec_memory.sql
Normal file
1
docker/dify/volumes/oceanbase/init.d/vec_memory.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
|
||||
222
docker/dify/volumes/opensearch/opensearch_dashboards.yml
Normal file
222
docker/dify/volumes/opensearch/opensearch_dashboards.yml
Normal file
@@ -0,0 +1,222 @@
|
||||
---
|
||||
# Copyright OpenSearch Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Description:
|
||||
# Default configuration for OpenSearch Dashboards
|
||||
|
||||
# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
|
||||
# server.port: 5601
|
||||
|
||||
# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
|
||||
# The default is 'localhost', which usually means remote machines will not be able to connect.
|
||||
# To allow connections from remote users, set this parameter to a non-loopback address.
|
||||
# server.host: "localhost"
|
||||
|
||||
# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
|
||||
# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
|
||||
# from requests it receives, and to prevent a deprecation warning at startup.
|
||||
# This setting cannot end in a slash.
|
||||
# server.basePath: ""
|
||||
|
||||
# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
|
||||
# `server.basePath` or require that they are rewritten by your reverse proxy.
|
||||
# server.rewriteBasePath: false
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The OpenSearch Dashboards server's name. This is used for display purposes.
|
||||
# server.name: "your-hostname"
|
||||
|
||||
# The URLs of the OpenSearch instances to use for all your queries.
|
||||
# opensearch.hosts: ["http://localhost:9200"]
|
||||
|
||||
# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
|
||||
# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
|
||||
# opensearchDashboards.index: ".opensearch_dashboards"
|
||||
|
||||
# The default application to load.
|
||||
# opensearchDashboards.defaultAppId: "home"
|
||||
|
||||
# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
|
||||
# This settings should be used for large clusters or for clusters with ingest heavy nodes.
|
||||
# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
|
||||
#
|
||||
# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
|
||||
# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
|
||||
# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
|
||||
# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
|
||||
# opensearch.optimizedHealthcheckId: "cluster_id"
|
||||
|
||||
# If your OpenSearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
|
||||
# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
|
||||
# is proxied through the OpenSearch Dashboards server.
|
||||
# opensearch.username: "opensearch_dashboards_system"
|
||||
# opensearch.password: "pass"
|
||||
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
|
||||
# server.ssl.enabled: false
|
||||
# server.ssl.certificate: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
|
||||
# xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
|
||||
# opensearch.ssl.certificate: /path/to/your/client.crt
|
||||
# opensearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your OpenSearch instance.
|
||||
# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
|
||||
# opensearch.ssl.verificationMode: full
|
||||
|
||||
# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
|
||||
# the opensearch.requestTimeout setting.
|
||||
# opensearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or OpenSearch. This value
|
||||
# must be a positive integer.
|
||||
# opensearch.requestTimeout: 30000
|
||||
|
||||
# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
# opensearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
|
||||
# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
|
||||
# opensearch.customHeaders: {}
|
||||
|
||||
# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
|
||||
# opensearch.shardTimeout: 30000
|
||||
|
||||
# Logs queries sent to OpenSearch. Requires logging.verbose set to true.
|
||||
# opensearch.logQueries: false
|
||||
|
||||
# Specifies the path where OpenSearch Dashboards creates the process ID file.
|
||||
# pid.file: /var/run/opensearchDashboards.pid
|
||||
|
||||
# Enables you to specify a file where OpenSearch Dashboards stores log output.
|
||||
# logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
# logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
# logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000.
|
||||
# ops.interval: 5000
|
||||
|
||||
# Specifies locale to be used for all localizable strings, dates and number formats.
|
||||
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
|
||||
# i18n.locale: "en"
|
||||
|
||||
# Set the allowlist to check input graphite Url. Allowlist is the default check list.
|
||||
# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
|
||||
|
||||
# Set the blocklist to check input graphite Url. Blocklist is an IP list.
|
||||
# Below is an example for reference
|
||||
# vis_type_timeline.graphiteBlockedIPs: [
|
||||
# //Loopback
|
||||
# '127.0.0.0/8',
|
||||
# '::1/128',
|
||||
# //Link-local Address for IPv6
|
||||
# 'fe80::/10',
|
||||
# //Private IP address for IPv4
|
||||
# '10.0.0.0/8',
|
||||
# '172.16.0.0/12',
|
||||
# '192.168.0.0/16',
|
||||
# //Unique local address (ULA)
|
||||
# 'fc00::/7',
|
||||
# //Reserved IP address
|
||||
# '0.0.0.0/8',
|
||||
# '100.64.0.0/10',
|
||||
# '192.0.0.0/24',
|
||||
# '192.0.2.0/24',
|
||||
# '198.18.0.0/15',
|
||||
# '192.88.99.0/24',
|
||||
# '198.51.100.0/24',
|
||||
# '203.0.113.0/24',
|
||||
# '224.0.0.0/4',
|
||||
# '240.0.0.0/4',
|
||||
# '255.255.255.255/32',
|
||||
# '::/128',
|
||||
# '2001:db8::/32',
|
||||
# 'ff00::/8',
|
||||
# ]
|
||||
# vis_type_timeline.graphiteBlockedIPs: []
|
||||
|
||||
# opensearchDashboards.branding:
|
||||
# logo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# mark:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# loadingLogo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# faviconUrl: ""
|
||||
# applicationTitle: ""
|
||||
|
||||
# Set the value of this setting to true to capture region blocked warnings and errors
|
||||
# for your map rendering services.
|
||||
# map.showRegionBlockedWarning: false%
|
||||
|
||||
# Set the value of this setting to false to suppress search usage telemetry
|
||||
# for reducing the load of OpenSearch cluster.
|
||||
# data.search.usageTelemetry.enabled: false
|
||||
|
||||
# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
|
||||
# Set the value of this setting to false to disable VisBuilder
|
||||
# functionality in Visualization.
|
||||
# vis_builder.enabled: false
|
||||
|
||||
# 2.4 New Experimental Feature
|
||||
# Set the value of this setting to true to enable the experimental multiple data source
|
||||
# support feature. Use with caution.
|
||||
# data_source.enabled: false
|
||||
# Set the value of these settings to customize crypto materials to encryption saved credentials
|
||||
# in data sources.
|
||||
# data_source.encryption.wrappingKeyName: 'changeme'
|
||||
# data_source.encryption.wrappingKeyNamespace: 'changeme'
|
||||
# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
|
||||
# 2.6 New ML Commons Dashboards Feature
|
||||
# Set the value of this setting to true to enable the ml commons dashboards
|
||||
# ml_commons_dashboards.enabled: false
|
||||
|
||||
# 2.12 New experimental Assistant Dashboards Feature
|
||||
# Set the value of this setting to true to enable the assistant dashboards
|
||||
# assistant.chat.enabled: false
|
||||
|
||||
# 2.13 New Query Assistant Feature
|
||||
# Set the value of this setting to false to disable the query assistant
|
||||
# observability.query_assist.enabled: false
|
||||
|
||||
# 2.14 Enable Ui Metric Collectors in Usage Collector
|
||||
# Set the value of this setting to true to enable UI Metric collections
|
||||
# usageCollection.uiMetric.enabled: false
|
||||
|
||||
opensearch.hosts: [https://localhost:9200]
|
||||
opensearch.ssl.verificationMode: none
|
||||
opensearch.username: admin
|
||||
opensearch.password: 'Qazwsxedc!@#123'
|
||||
opensearch.requestHeadersWhitelist: [authorization, securitytenant]
|
||||
|
||||
opensearch_security.multitenancy.enabled: true
|
||||
opensearch_security.multitenancy.tenants.preferred: [Private, Global]
|
||||
opensearch_security.readonly_mode.roles: [kibana_read_only]
|
||||
# Use this setting if you are running opensearch-dashboards without https
|
||||
opensearch_security.cookie.secure: false
|
||||
server.host: '0.0.0.0'
|
||||
14
docker/dify/volumes/sandbox/conf/config.yaml
Normal file
14
docker/dify/volumes/sandbox/conf/config.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
enable_network: True # please make sure there is no network risk in your environment
|
||||
allowed_syscalls: # please leave it empty if you have no idea how seccomp works
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
35
docker/dify/volumes/sandbox/conf/config.yaml.example
Normal file
35
docker/dify/volumes/sandbox/conf/config.yaml.example
Normal file
@@ -0,0 +1,35 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
python_lib_path:
|
||||
- /usr/local/lib/python3.10
|
||||
- /usr/lib/python3.10
|
||||
- /usr/lib/python3
|
||||
- /usr/lib/x86_64-linux-gnu
|
||||
- /etc/ssl/certs/ca-certificates.crt
|
||||
- /etc/nsswitch.conf
|
||||
- /etc/hosts
|
||||
- /etc/resolv.conf
|
||||
- /run/systemd/resolve/stub-resolv.conf
|
||||
- /run/resolvconf/resolv.conf
|
||||
- /etc/localtime
|
||||
- /usr/share/zoneinfo
|
||||
- /etc/timezone
|
||||
# add more paths if needed
|
||||
python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
nodejs_path: /usr/local/bin/node
|
||||
enable_network: True
|
||||
allowed_syscalls:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
# add all the syscalls which you require
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
@@ -31,7 +31,7 @@ services:
|
||||
- school-news-network
|
||||
healthcheck:
|
||||
# 只有当 MySQL 可访问且敏感词表中至少有一条 deny 记录时,才认为 healthy
|
||||
test: ["CMD-SHELL", "mysql -uroot -p${MYSQL_ROOT_PASSWORD:-123456} -D ${MYSQL_DATABASE:-school_news} -e \"SELECT 'ok' FROM tb_sensitive_word WHERE type='deny' LIMIT 1;\" 2>/dev/null | grep -q ok"]
|
||||
test: ["CMD-SHELL", "mysql -uroot -p${MYSQL_ROOT_PASSWORD:-123456} -D ${MYSQL_DATABASE:-school_news} -e \"SELECT 'ok' FROM _db_init_status WHERE script_name='01-init-database.sql' AND status='success' LIMIT 1;\" 2>/dev/null | grep -q ok"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
-- ========================================
|
||||
|
||||
-- 创建数据库(如果不存在)
|
||||
CREATE DATABASE IF NOT EXISTS school_news
|
||||
DEFAULT CHARACTER SET utf8mb4
|
||||
COLLATE utf8mb4_unicode_ci;
|
||||
-- 创建数据库,使用utf8mb4字符集
|
||||
CREATE DATABASE IF NOT EXISTS `school_news` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
USE school_news;
|
||||
-- 使用数据库
|
||||
USE `school_news`;
|
||||
|
||||
-- 检查表是否已存在,避免重复初始化
|
||||
-- 创建一个标记表记录初始化状态
|
||||
|
||||
Reference in New Issue
Block a user