640 lines
19 KiB
Plaintext
640 lines
19 KiB
Plaintext
# Your App secret key will be used for securely signing the session cookie
|
||
# Make sure you are changing this key for your deployment with a strong key.
|
||
# You can generate a strong key using `openssl rand -base64 42`.
|
||
# Alternatively you can set it with `SECRET_KEY` environment variable.
|
||
SECRET_KEY=
|
||
|
||
# Ensure UTF-8 encoding
|
||
LANG=en_US.UTF-8
|
||
LC_ALL=en_US.UTF-8
|
||
PYTHONIOENCODING=utf-8
|
||
|
||
# Console API base URL
|
||
CONSOLE_API_URL=http://localhost:5001
|
||
CONSOLE_WEB_URL=http://localhost:3000
|
||
|
||
# Service API base URL
|
||
SERVICE_API_URL=http://localhost:5001
|
||
|
||
# Web APP base URL
|
||
APP_WEB_URL=http://localhost:3000
|
||
|
||
# Files URL
|
||
FILES_URL=http://localhost:5001
|
||
|
||
# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network.
|
||
# Set this to the internal Docker service URL for proper plugin file access.
|
||
# Example: INTERNAL_FILES_URL=http://api:5001
|
||
INTERNAL_FILES_URL=http://127.0.0.1:5001
|
||
|
||
# TRIGGER URL
|
||
TRIGGER_URL=http://localhost:5001
|
||
|
||
# The time in seconds after the signature is rejected
|
||
FILES_ACCESS_TIMEOUT=300
|
||
|
||
# Access token expiration time in minutes
|
||
ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||
|
||
# Refresh token expiration time in days
|
||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||
|
||
# redis configuration
|
||
REDIS_HOST=localhost
|
||
REDIS_PORT=6379
|
||
REDIS_USERNAME=
|
||
REDIS_PASSWORD=difyai123456
|
||
REDIS_USE_SSL=false
|
||
# SSL configuration for Redis (when REDIS_USE_SSL=true)
|
||
REDIS_SSL_CERT_REQS=CERT_NONE
|
||
# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
|
||
REDIS_SSL_CA_CERTS=
|
||
# Path to CA certificate file for SSL verification
|
||
REDIS_SSL_CERTFILE=
|
||
# Path to client certificate file for SSL authentication
|
||
REDIS_SSL_KEYFILE=
|
||
# Path to client private key file for SSL authentication
|
||
REDIS_DB=0
|
||
|
||
# redis Sentinel configuration.
|
||
REDIS_USE_SENTINEL=false
|
||
REDIS_SENTINELS=
|
||
REDIS_SENTINEL_SERVICE_NAME=
|
||
REDIS_SENTINEL_USERNAME=
|
||
REDIS_SENTINEL_PASSWORD=
|
||
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
|
||
|
||
# redis Cluster configuration.
|
||
REDIS_USE_CLUSTERS=false
|
||
REDIS_CLUSTERS=
|
||
REDIS_CLUSTERS_PASSWORD=
|
||
|
||
# celery configuration
|
||
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
|
||
CELERY_BACKEND=redis
|
||
|
||
# Database configuration
|
||
DB_TYPE=postgresql
|
||
DB_USERNAME=postgres
|
||
DB_PASSWORD=difyai123456
|
||
DB_HOST=localhost
|
||
DB_PORT=5432
|
||
DB_DATABASE=dify
|
||
|
||
SQLALCHEMY_POOL_PRE_PING=true
|
||
SQLALCHEMY_POOL_TIMEOUT=30
|
||
|
||
# Storage configuration
|
||
# use for store upload files, private keys...
|
||
# storage type: opendal, s3, aliyun-oss, azure-blob, baidu-obs, google-storage, huawei-obs, oci-storage, tencent-cos, volcengine-tos, supabase
|
||
STORAGE_TYPE=opendal
|
||
|
||
# Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal
|
||
OPENDAL_SCHEME=fs
|
||
OPENDAL_FS_ROOT=storage
|
||
|
||
# S3 Storage configuration
|
||
S3_USE_AWS_MANAGED_IAM=false
|
||
S3_ENDPOINT=https://your-bucket-name.storage.s3.cloudflare.com
|
||
S3_BUCKET_NAME=your-bucket-name
|
||
S3_ACCESS_KEY=your-access-key
|
||
S3_SECRET_KEY=your-secret-key
|
||
S3_REGION=your-region
|
||
|
||
# Azure Blob Storage configuration
|
||
AZURE_BLOB_ACCOUNT_NAME=your-account-name
|
||
AZURE_BLOB_ACCOUNT_KEY=your-account-key
|
||
AZURE_BLOB_CONTAINER_NAME=your-container-name
|
||
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
|
||
|
||
# Aliyun oss Storage configuration
|
||
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
|
||
ALIYUN_OSS_ACCESS_KEY=your-access-key
|
||
ALIYUN_OSS_SECRET_KEY=your-secret-key
|
||
ALIYUN_OSS_ENDPOINT=your-endpoint
|
||
ALIYUN_OSS_AUTH_VERSION=v1
|
||
ALIYUN_OSS_REGION=your-region
|
||
# Don't start with '/'. OSS doesn't support leading slash in object names.
|
||
ALIYUN_OSS_PATH=your-path
|
||
|
||
# Google Storage configuration
|
||
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
|
||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
|
||
|
||
# Tencent COS Storage configuration
|
||
TENCENT_COS_BUCKET_NAME=your-bucket-name
|
||
TENCENT_COS_SECRET_KEY=your-secret-key
|
||
TENCENT_COS_SECRET_ID=your-secret-id
|
||
TENCENT_COS_REGION=your-region
|
||
TENCENT_COS_SCHEME=your-scheme
|
||
|
||
# Huawei OBS Storage Configuration
|
||
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
|
||
HUAWEI_OBS_SECRET_KEY=your-secret-key
|
||
HUAWEI_OBS_ACCESS_KEY=your-access-key
|
||
HUAWEI_OBS_SERVER=your-server-url
|
||
|
||
# Baidu OBS Storage Configuration
|
||
BAIDU_OBS_BUCKET_NAME=your-bucket-name
|
||
BAIDU_OBS_SECRET_KEY=your-secret-key
|
||
BAIDU_OBS_ACCESS_KEY=your-access-key
|
||
BAIDU_OBS_ENDPOINT=your-server-url
|
||
|
||
# OCI Storage configuration
|
||
OCI_ENDPOINT=your-endpoint
|
||
OCI_BUCKET_NAME=your-bucket-name
|
||
OCI_ACCESS_KEY=your-access-key
|
||
OCI_SECRET_KEY=your-secret-key
|
||
OCI_REGION=your-region
|
||
|
||
# Volcengine tos Storage configuration
|
||
VOLCENGINE_TOS_ENDPOINT=your-endpoint
|
||
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
|
||
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
|
||
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
|
||
VOLCENGINE_TOS_REGION=your-region
|
||
|
||
# Supabase Storage Configuration
|
||
SUPABASE_BUCKET_NAME=your-bucket-name
|
||
SUPABASE_API_KEY=your-access-key
|
||
SUPABASE_URL=your-server-url
|
||
|
||
# CORS configuration
|
||
WEB_API_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||
CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||
# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site’s top-level domain (e.g., `example.com`). Leading dots are optional.
|
||
COOKIE_DOMAIN=
|
||
|
||
# Vector database configuration
|
||
# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
|
||
VECTOR_STORE=weaviate
|
||
# Prefix used to create collection name in vector database
|
||
VECTOR_INDEX_NAME_PREFIX=Vector_index
|
||
|
||
# Weaviate configuration
|
||
WEAVIATE_ENDPOINT=http://localhost:8080
|
||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||
WEAVIATE_GRPC_ENABLED=false
|
||
WEAVIATE_BATCH_SIZE=100
|
||
WEAVIATE_TOKENIZATION=word
|
||
|
||
# OceanBase Vector configuration
|
||
OCEANBASE_VECTOR_HOST=127.0.0.1
|
||
OCEANBASE_VECTOR_PORT=2881
|
||
OCEANBASE_VECTOR_USER=root@test
|
||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||
OCEANBASE_VECTOR_DATABASE=test
|
||
OCEANBASE_MEMORY_LIMIT=6G
|
||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||
OCEANBASE_FULLTEXT_PARSER=ik
|
||
SEEKDB_MEMORY_LIMIT=2G
|
||
|
||
# Qdrant configuration, use `http://localhost:6333` for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
|
||
QDRANT_URL=http://localhost:6333
|
||
QDRANT_API_KEY=difyai123456
|
||
QDRANT_CLIENT_TIMEOUT=20
|
||
QDRANT_GRPC_ENABLED=false
|
||
QDRANT_GRPC_PORT=6334
|
||
QDRANT_REPLICATION_FACTOR=1
|
||
|
||
#Couchbase configuration
|
||
COUCHBASE_CONNECTION_STRING=127.0.0.1
|
||
COUCHBASE_USER=Administrator
|
||
COUCHBASE_PASSWORD=password
|
||
COUCHBASE_BUCKET_NAME=Embeddings
|
||
COUCHBASE_SCOPE_NAME=_default
|
||
|
||
# Milvus configuration
|
||
MILVUS_URI=http://127.0.0.1:19530
|
||
MILVUS_TOKEN=
|
||
MILVUS_USER=root
|
||
MILVUS_PASSWORD=Milvus
|
||
MILVUS_ANALYZER_PARAMS=
|
||
|
||
# MyScale configuration
|
||
MYSCALE_HOST=127.0.0.1
|
||
MYSCALE_PORT=8123
|
||
MYSCALE_USER=default
|
||
MYSCALE_PASSWORD=
|
||
MYSCALE_DATABASE=default
|
||
MYSCALE_FTS_PARAMS=
|
||
|
||
# Relyt configuration
|
||
RELYT_HOST=127.0.0.1
|
||
RELYT_PORT=5432
|
||
RELYT_USER=postgres
|
||
RELYT_PASSWORD=postgres
|
||
RELYT_DATABASE=postgres
|
||
|
||
# Tencent configuration
|
||
TENCENT_VECTOR_DB_URL=http://127.0.0.1
|
||
TENCENT_VECTOR_DB_API_KEY=dify
|
||
TENCENT_VECTOR_DB_TIMEOUT=30
|
||
TENCENT_VECTOR_DB_USERNAME=dify
|
||
TENCENT_VECTOR_DB_DATABASE=dify
|
||
TENCENT_VECTOR_DB_SHARD=1
|
||
TENCENT_VECTOR_DB_REPLICAS=2
|
||
TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false
|
||
|
||
# ElasticSearch configuration
|
||
ELASTICSEARCH_HOST=127.0.0.1
|
||
ELASTICSEARCH_PORT=9200
|
||
ELASTICSEARCH_USERNAME=elastic
|
||
ELASTICSEARCH_PASSWORD=elastic
|
||
|
||
# PGVECTO_RS configuration
|
||
PGVECTO_RS_HOST=localhost
|
||
PGVECTO_RS_PORT=5431
|
||
PGVECTO_RS_USER=postgres
|
||
PGVECTO_RS_PASSWORD=difyai123456
|
||
PGVECTO_RS_DATABASE=postgres
|
||
|
||
# PGVector configuration
|
||
PGVECTOR_HOST=127.0.0.1
|
||
PGVECTOR_PORT=5433
|
||
PGVECTOR_USER=postgres
|
||
PGVECTOR_PASSWORD=postgres
|
||
PGVECTOR_DATABASE=postgres
|
||
PGVECTOR_MIN_CONNECTION=1
|
||
PGVECTOR_MAX_CONNECTION=5
|
||
|
||
# TableStore Vector configuration
|
||
TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com
|
||
TABLESTORE_INSTANCE_NAME=instance-name
|
||
TABLESTORE_ACCESS_KEY_ID=xxx
|
||
TABLESTORE_ACCESS_KEY_SECRET=xxx
|
||
TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false
|
||
|
||
# Tidb Vector configuration
|
||
TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com
|
||
TIDB_VECTOR_PORT=4000
|
||
TIDB_VECTOR_USER=xxx.root
|
||
TIDB_VECTOR_PASSWORD=xxxxxx
|
||
TIDB_VECTOR_DATABASE=dify
|
||
|
||
# Tidb on qdrant configuration
|
||
TIDB_ON_QDRANT_URL=http://127.0.0.1
|
||
TIDB_ON_QDRANT_API_KEY=dify
|
||
TIDB_ON_QDRANT_CLIENT_TIMEOUT=20
|
||
TIDB_ON_QDRANT_GRPC_ENABLED=false
|
||
TIDB_ON_QDRANT_GRPC_PORT=6334
|
||
TIDB_PUBLIC_KEY=dify
|
||
TIDB_PRIVATE_KEY=dify
|
||
TIDB_API_URL=http://127.0.0.1
|
||
TIDB_IAM_API_URL=http://127.0.0.1
|
||
TIDB_REGION=regions/aws-us-east-1
|
||
TIDB_PROJECT_ID=dify
|
||
TIDB_SPEND_LIMIT=100
|
||
|
||
# Chroma configuration
|
||
CHROMA_HOST=127.0.0.1
|
||
CHROMA_PORT=8000
|
||
CHROMA_TENANT=default_tenant
|
||
CHROMA_DATABASE=default_database
|
||
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
|
||
CHROMA_AUTH_CREDENTIALS=difyai123456
|
||
|
||
# AnalyticDB configuration
|
||
ANALYTICDB_KEY_ID=your-ak
|
||
ANALYTICDB_KEY_SECRET=your-sk
|
||
ANALYTICDB_REGION_ID=cn-hangzhou
|
||
ANALYTICDB_INSTANCE_ID=gp-ab123456
|
||
ANALYTICDB_ACCOUNT=testaccount
|
||
ANALYTICDB_PASSWORD=testpassword
|
||
ANALYTICDB_NAMESPACE=dify
|
||
ANALYTICDB_NAMESPACE_PASSWORD=difypassword
|
||
ANALYTICDB_HOST=gp-test.aliyuncs.com
|
||
ANALYTICDB_PORT=5432
|
||
ANALYTICDB_MIN_CONNECTION=1
|
||
ANALYTICDB_MAX_CONNECTION=5
|
||
|
||
# OpenSearch configuration
|
||
OPENSEARCH_HOST=127.0.0.1
|
||
OPENSEARCH_PORT=9200
|
||
OPENSEARCH_USER=admin
|
||
OPENSEARCH_PASSWORD=admin
|
||
OPENSEARCH_SECURE=true
|
||
OPENSEARCH_VERIFY_CERTS=true
|
||
|
||
# Baidu configuration
|
||
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
|
||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000
|
||
BAIDU_VECTOR_DB_ACCOUNT=root
|
||
BAIDU_VECTOR_DB_API_KEY=dify
|
||
BAIDU_VECTOR_DB_DATABASE=dify
|
||
BAIDU_VECTOR_DB_SHARD=1
|
||
BAIDU_VECTOR_DB_REPLICAS=3
|
||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
|
||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE
|
||
|
||
# Upstash configuration
|
||
UPSTASH_VECTOR_URL=your-server-url
|
||
UPSTASH_VECTOR_TOKEN=your-access-token
|
||
|
||
# ViKingDB configuration
|
||
VIKINGDB_ACCESS_KEY=your-ak
|
||
VIKINGDB_SECRET_KEY=your-sk
|
||
VIKINGDB_REGION=cn-shanghai
|
||
VIKINGDB_HOST=api-vikingdb.xxx.volces.com
|
||
VIKINGDB_SCHEMA=http
|
||
VIKINGDB_CONNECTION_TIMEOUT=30
|
||
VIKINGDB_SOCKET_TIMEOUT=30
|
||
|
||
# Matrixone configration
|
||
MATRIXONE_HOST=127.0.0.1
|
||
MATRIXONE_PORT=6001
|
||
MATRIXONE_USER=dump
|
||
MATRIXONE_PASSWORD=111
|
||
MATRIXONE_DATABASE=dify
|
||
|
||
# Lindorm configuration
|
||
LINDORM_URL=http://ld-*******************-proxy-search-pub.lindorm.aliyuncs.com:30070
|
||
LINDORM_USERNAME=admin
|
||
LINDORM_PASSWORD=admin
|
||
LINDORM_USING_UGC=True
|
||
LINDORM_QUERY_TIMEOUT=1
|
||
|
||
# AlibabaCloud MySQL Vector configuration
|
||
ALIBABACLOUD_MYSQL_HOST=127.0.0.1
|
||
ALIBABACLOUD_MYSQL_PORT=3306
|
||
ALIBABACLOUD_MYSQL_USER=root
|
||
ALIBABACLOUD_MYSQL_PASSWORD=root
|
||
ALIBABACLOUD_MYSQL_DATABASE=dify
|
||
ALIBABACLOUD_MYSQL_MAX_CONNECTION=5
|
||
ALIBABACLOUD_MYSQL_HNSW_M=6
|
||
|
||
# openGauss configuration
|
||
OPENGAUSS_HOST=127.0.0.1
|
||
OPENGAUSS_PORT=6600
|
||
OPENGAUSS_USER=postgres
|
||
OPENGAUSS_PASSWORD=Dify@123
|
||
OPENGAUSS_DATABASE=dify
|
||
OPENGAUSS_MIN_CONNECTION=1
|
||
OPENGAUSS_MAX_CONNECTION=5
|
||
|
||
# Upload configuration
|
||
UPLOAD_FILE_SIZE_LIMIT=15
|
||
UPLOAD_FILE_BATCH_LIMIT=5
|
||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||
|
||
# Comma-separated list of file extensions blocked from upload for security reasons.
|
||
# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll).
|
||
# Empty by default to allow all file types.
|
||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||
|
||
# Model configuration
|
||
MULTIMODAL_SEND_FORMAT=base64
|
||
PROMPT_GENERATION_MAX_TOKENS=512
|
||
CODE_GENERATION_MAX_TOKENS=1024
|
||
PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false
|
||
|
||
# Mail configuration, support: resend, smtp, sendgrid
|
||
MAIL_TYPE=
|
||
# If using SendGrid, use the 'from' field for authentication if necessary.
|
||
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@dify.ai>
|
||
# resend configuration
|
||
RESEND_API_KEY=
|
||
RESEND_API_URL=https://api.resend.com
|
||
# smtp configuration
|
||
SMTP_SERVER=smtp.gmail.com
|
||
SMTP_PORT=465
|
||
SMTP_USERNAME=123
|
||
SMTP_PASSWORD=abc
|
||
SMTP_USE_TLS=true
|
||
SMTP_OPPORTUNISTIC_TLS=false
|
||
# Sendgid configuration
|
||
SENDGRID_API_KEY=
|
||
# Sentry configuration
|
||
SENTRY_DSN=
|
||
|
||
# DEBUG
|
||
DEBUG=false
|
||
ENABLE_REQUEST_LOGGING=False
|
||
SQLALCHEMY_ECHO=false
|
||
|
||
# Notion import configuration, support public and internal
|
||
NOTION_INTEGRATION_TYPE=public
|
||
NOTION_CLIENT_SECRET=you-client-secret
|
||
NOTION_CLIENT_ID=you-client-id
|
||
NOTION_INTERNAL_SECRET=you-internal-secret
|
||
|
||
ETL_TYPE=dify
|
||
UNSTRUCTURED_API_URL=
|
||
UNSTRUCTURED_API_KEY=
|
||
SCARF_NO_ANALYTICS=true
|
||
|
||
#ssrf
|
||
SSRF_PROXY_HTTP_URL=
|
||
SSRF_PROXY_HTTPS_URL=
|
||
SSRF_DEFAULT_MAX_RETRIES=3
|
||
SSRF_DEFAULT_TIME_OUT=5
|
||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||
SSRF_POOL_MAX_CONNECTIONS=100
|
||
SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||
SSRF_POOL_KEEPALIVE_EXPIRY=5.0
|
||
|
||
BATCH_UPLOAD_LIMIT=10
|
||
KEYWORD_DATA_SOURCE_TYPE=database
|
||
|
||
# Workflow file upload limit
|
||
WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||
|
||
# CODE EXECUTION CONFIGURATION
|
||
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
|
||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||
CODE_EXECUTION_SSL_VERIFY=True
|
||
CODE_EXECUTION_POOL_MAX_CONNECTIONS=100
|
||
CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||
CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0
|
||
CODE_EXECUTION_CONNECT_TIMEOUT=10
|
||
CODE_EXECUTION_READ_TIMEOUT=60
|
||
CODE_EXECUTION_WRITE_TIMEOUT=10
|
||
CODE_MAX_NUMBER=9223372036854775807
|
||
CODE_MIN_NUMBER=-9223372036854775808
|
||
CODE_MAX_STRING_LENGTH=400000
|
||
TEMPLATE_TRANSFORM_MAX_LENGTH=400000
|
||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||
|
||
# API Tool configuration
|
||
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
|
||
API_TOOL_DEFAULT_READ_TIMEOUT=60
|
||
|
||
# HTTP Node configuration
|
||
HTTP_REQUEST_MAX_CONNECT_TIMEOUT=300
|
||
HTTP_REQUEST_MAX_READ_TIMEOUT=600
|
||
HTTP_REQUEST_MAX_WRITE_TIMEOUT=600
|
||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||
HTTP_REQUEST_NODE_SSL_VERIFY=True
|
||
|
||
# Webhook request configuration
|
||
WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760
|
||
|
||
# Respect X-* headers to redirect clients
|
||
RESPECT_XFORWARD_HEADERS_ENABLED=false
|
||
|
||
# Log file path
|
||
LOG_FILE=
|
||
# Log file max size, the unit is MB
|
||
LOG_FILE_MAX_SIZE=20
|
||
# Log file max backup count
|
||
LOG_FILE_BACKUP_COUNT=5
|
||
# Log dateformat
|
||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||
# Log Timezone
|
||
LOG_TZ=UTC
|
||
# Log format
|
||
LOG_FORMAT=%(asctime)s,%(msecs)d %(levelname)-2s [%(filename)s:%(lineno)d] %(req_id)s %(message)s
|
||
|
||
# Indexing configuration
|
||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||
|
||
# Workflow runtime configuration
|
||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||
WORKFLOW_MAX_EXECUTION_TIME=1200
|
||
WORKFLOW_CALL_MAX_DEPTH=5
|
||
MAX_VARIABLE_SIZE=204800
|
||
|
||
# GraphEngine Worker Pool Configuration
|
||
# Minimum number of workers per GraphEngine instance (default: 1)
|
||
GRAPH_ENGINE_MIN_WORKERS=1
|
||
# Maximum number of workers per GraphEngine instance (default: 10)
|
||
GRAPH_ENGINE_MAX_WORKERS=10
|
||
# Queue depth threshold that triggers worker scale up (default: 3)
|
||
GRAPH_ENGINE_SCALE_UP_THRESHOLD=3
|
||
# Seconds of idle time before scaling down workers (default: 5.0)
|
||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0
|
||
|
||
# Workflow storage configuration
|
||
# Options: rdbms, hybrid
|
||
# rdbms: Use only the relational database (default)
|
||
# hybrid: Save new data to object storage, read from both object storage and RDBMS
|
||
WORKFLOW_NODE_EXECUTION_STORAGE=rdbms
|
||
|
||
# Repository configuration
|
||
# Core workflow execution repository implementation
|
||
CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository
|
||
|
||
# Core workflow node execution repository implementation
|
||
CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository
|
||
|
||
# API workflow node execution repository implementation
|
||
API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository
|
||
|
||
# API workflow run repository implementation
|
||
API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository
|
||
# Workflow log cleanup configuration
|
||
# Enable automatic cleanup of workflow run logs to manage database size
|
||
WORKFLOW_LOG_CLEANUP_ENABLED=false
|
||
# Number of days to retain workflow run logs (default: 30 days)
|
||
WORKFLOW_LOG_RETENTION_DAYS=30
|
||
# Batch size for workflow log cleanup operations (default: 100)
|
||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
|
||
|
||
# App configuration
|
||
APP_MAX_EXECUTION_TIME=1200
|
||
APP_MAX_ACTIVE_REQUESTS=0
|
||
|
||
# Celery beat configuration
|
||
CELERY_BEAT_SCHEDULER_TIME=1
|
||
|
||
# Celery schedule tasks configuration
|
||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false
|
||
ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
|
||
ENABLE_CREATE_TIDB_SERVERLESS_TASK=false
|
||
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false
|
||
ENABLE_CLEAN_MESSAGES=false
|
||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false
|
||
ENABLE_DATASETS_QUEUE_MONITOR=false
|
||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true
|
||
ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true
|
||
# Interval time in minutes for polling scheduled workflows(default: 1 min)
|
||
WORKFLOW_SCHEDULE_POLLER_INTERVAL=1
|
||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100
|
||
# Maximum number of scheduled workflows to dispatch per tick (0 for unlimited)
|
||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0
|
||
|
||
# Position configuration
|
||
POSITION_TOOL_PINS=
|
||
POSITION_TOOL_INCLUDES=
|
||
POSITION_TOOL_EXCLUDES=
|
||
|
||
POSITION_PROVIDER_PINS=
|
||
POSITION_PROVIDER_INCLUDES=
|
||
POSITION_PROVIDER_EXCLUDES=
|
||
|
||
# Plugin configuration
|
||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
|
||
PLUGIN_REMOTE_INSTALL_PORT=5003
|
||
PLUGIN_REMOTE_INSTALL_HOST=localhost
|
||
PLUGIN_MAX_PACKAGE_SIZE=15728640
|
||
INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||
|
||
# Marketplace configuration
|
||
MARKETPLACE_ENABLED=true
|
||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||
|
||
# Endpoint configuration
|
||
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||
|
||
# Reset password token expiry minutes
|
||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5
|
||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5
|
||
OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5
|
||
|
||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||
|
||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||
MAX_SUBMIT_COUNT=100
|
||
# Lockout duration in seconds
|
||
LOGIN_LOCKOUT_DURATION=86400
|
||
|
||
# Enable OpenTelemetry
|
||
ENABLE_OTEL=false
|
||
OTLP_TRACE_ENDPOINT=
|
||
OTLP_METRIC_ENDPOINT=
|
||
OTLP_BASE_ENDPOINT=http://localhost:4318
|
||
OTLP_API_KEY=
|
||
OTEL_EXPORTER_OTLP_PROTOCOL=
|
||
OTEL_EXPORTER_TYPE=otlp
|
||
OTEL_SAMPLING_RATE=0.1
|
||
OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000
|
||
OTEL_MAX_QUEUE_SIZE=2048
|
||
OTEL_MAX_EXPORT_BATCH_SIZE=512
|
||
OTEL_METRIC_EXPORT_INTERVAL=60000
|
||
OTEL_BATCH_EXPORT_TIMEOUT=10000
|
||
OTEL_METRIC_EXPORT_TIMEOUT=30000
|
||
|
||
# Prevent Clickjacking
|
||
ALLOW_EMBED=false
|
||
|
||
# Dataset queue monitor configuration
|
||
QUEUE_MONITOR_THRESHOLD=200
|
||
# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai
|
||
QUEUE_MONITOR_ALERT_EMAILS=
|
||
# Monitor interval in minutes, default is 30 minutes
|
||
QUEUE_MONITOR_INTERVAL=30
|
||
|
||
# Swagger UI configuration
|
||
SWAGGER_UI_ENABLED=true
|
||
SWAGGER_UI_PATH=/swagger-ui.html
|
||
|
||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||
|
||
# Tenant isolated task queue configuration
|
||
TENANT_ISOLATED_TASK_CONCURRENCY=1
|
||
|
||
# Maximum number of segments for dataset segments API (0 for unlimited)
|
||
DATASET_MAX_SEGMENTS_PER_REQUEST=0
|