From 555ae8f2d78d1ea17bc501b43fa3e70e9cb5efbe Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Fri, 24 Jan 2025 17:18:05 +0800 Subject: [PATCH 1/6] Update Dify --- .gitignore | 1 + dify/code/.env.example | 145 ++--- dify/code/docker-compose-template.yaml | 576 +++++++++++++++++++ dify/code/docker-compose.yaml | 369 ++++++++---- dify/code/elasticsearch/docker-entrypoint.sh | 25 + dify/code/generate_docker_compose | 112 ++++ package-lock.json | 2 +- 7 files changed, 1036 insertions(+), 194 deletions(-) create mode 100644 dify/code/docker-compose-template.yaml create mode 100755 dify/code/elasticsearch/docker-entrypoint.sh create mode 100755 dify/code/generate_docker_compose diff --git a/.gitignore b/.gitignore index d131151e2..2c150e996 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ repo node_modules +.DS_Store diff --git a/dify/code/.env.example b/dify/code/.env.example index 719a02587..b21bdc708 100644 --- a/dify/code/.env.example +++ b/dify/code/.env.example @@ -105,8 +105,12 @@ FILES_ACCESS_TIMEOUT=300 # Access token expiration time in minutes ACCESS_TOKEN_EXPIRE_MINUTES=60 +# Refresh token expiration time in days +REFRESH_TOKEN_EXPIRE_DAYS=30 + # The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 # ------------------------------ # Container Startup Related Configuration @@ -119,15 +123,18 @@ DIFY_BIND_ADDRESS=0.0.0.0 # API service binding port number, default 5001. DIFY_PORT=5001 -# The number of API server workers, i.e., the number of gevent workers. -# Formula: number of cpu cores x 2 + 1 +# The number of API server workers, i.e., the number of workers. +# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent # Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers -SERVER_WORKER_AMOUNT= +SERVER_WORKER_AMOUNT=1 # Defaults to gevent. If using windows, it can be switched to sync or solo. -SERVER_WORKER_CLASS= +SERVER_WORKER_CLASS=gevent + +# Default number of worker connections, the default is 10. +SERVER_WORKER_CONNECTIONS=10 -# Similar to SERVER_WORKER_CLASS. Default is gevent. +# Similar to SERVER_WORKER_CLASS. # If using windows, it can be switched to sync or solo. CELERY_WORKER_CLASS= @@ -227,6 +234,7 @@ REDIS_PORT=6379 REDIS_USERNAME= REDIS_PASSWORD=difyai123456 REDIS_USE_SSL=false +REDIS_DB=0 # Whether to use Redis Sentinel mode. # If set to true, the application will automatically discover and connect to the master node through Sentinel. @@ -281,44 +289,42 @@ CONSOLE_CORS_ALLOW_ORIGINS=* # ------------------------------ # The type of storage to use for storing user files. -# Supported values are `local` , `s3` , `azure-blob` , `google-storage`, `tencent-cos`, `huawei-obs`, `volcengine-tos`, `baidu-obs`, `supabase` -# Default: `local` -STORAGE_TYPE=local -STORAGE_LOCAL_PATH=storage +STORAGE_TYPE=opendal + +# Apache OpenDAL Configuration +# The configuration for OpenDAL consists of the following format: OPENDAL__. +# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. +# Dify will scan configurations starting with OPENDAL_ and automatically apply them. +# The scheme name for the OpenDAL storage. +OPENDAL_SCHEME=fs +# Configurations for OpenDAL Local File System. +OPENDAL_FS_ROOT=storage # S3 Configuration -# Whether to use AWS managed IAM roles for authenticating with the S3 service. -# If set to false, the access key and secret key must be provided. -S3_USE_AWS_MANAGED_IAM=false -# The endpoint of the S3 service. +# S3_ENDPOINT= -# The region of the S3 service. S3_REGION=us-east-1 -# The name of the S3 bucket to use for storing files. S3_BUCKET_NAME=difyai -# The access key to use for authenticating with the S3 service. S3_ACCESS_KEY= -# The secret key to use for authenticating with the S3 service. S3_SECRET_KEY= +# Whether to use AWS managed IAM roles for authenticating with the S3 service. +# If set to false, the access key and secret key must be provided. +S3_USE_AWS_MANAGED_IAM=false # Azure Blob Configuration -# The name of the Azure Blob Storage account to use for storing files. +# AZURE_BLOB_ACCOUNT_NAME=difyai -# The access key to use for authenticating with the Azure Blob Storage account. AZURE_BLOB_ACCOUNT_KEY=difyai -# The name of the Azure Blob Storage container to use for storing files. AZURE_BLOB_CONTAINER_NAME=difyai-container -# The URL of the Azure Blob Storage account. AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net # Google Storage Configuration -# The name of the Google Storage bucket to use for storing files. +# GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name -# The service account JSON key to use for authenticating with the Google Storage service. -GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= # The Alibaba Cloud OSS configurations, -# only available when STORAGE_TYPE is `aliyun-oss` +# ALIYUN_OSS_BUCKET_NAME=your-bucket-name ALIYUN_OSS_ACCESS_KEY=your-access-key ALIYUN_OSS_SECRET_KEY=your-secret-key @@ -329,55 +335,47 @@ ALIYUN_OSS_AUTH_VERSION=v4 ALIYUN_OSS_PATH=your-path # Tencent COS Configuration -# The name of the Tencent COS bucket to use for storing files. +# TENCENT_COS_BUCKET_NAME=your-bucket-name -# The secret key to use for authenticating with the Tencent COS service. TENCENT_COS_SECRET_KEY=your-secret-key -# The secret id to use for authenticating with the Tencent COS service. TENCENT_COS_SECRET_ID=your-secret-id -# The region of the Tencent COS service. TENCENT_COS_REGION=your-region -# The scheme of the Tencent COS service. TENCENT_COS_SCHEME=your-scheme +# Oracle Storage Configuration +# +OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +OCI_REGION=us-ashburn-1 + # Huawei OBS Configuration -# The name of the Huawei OBS bucket to use for storing files. +# HUAWEI_OBS_BUCKET_NAME=your-bucket-name -# The secret key to use for authenticating with the Huawei OBS service. HUAWEI_OBS_SECRET_KEY=your-secret-key -# The access key to use for authenticating with the Huawei OBS service. HUAWEI_OBS_ACCESS_KEY=your-access-key -# The server url of the HUAWEI OBS service. HUAWEI_OBS_SERVER=your-server-url # Volcengine TOS Configuration -# The name of the Volcengine TOS bucket to use for storing files. +# VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name -# The secret key to use for authenticating with the Volcengine TOS service. VOLCENGINE_TOS_SECRET_KEY=your-secret-key -# The access key to use for authenticating with the Volcengine TOS service. VOLCENGINE_TOS_ACCESS_KEY=your-access-key -# The endpoint of the Volcengine TOS service. VOLCENGINE_TOS_ENDPOINT=your-server-url -# The region of the Volcengine TOS service. VOLCENGINE_TOS_REGION=your-region # Baidu OBS Storage Configuration -# The name of the Baidu OBS bucket to use for storing files. +# BAIDU_OBS_BUCKET_NAME=your-bucket-name -# The secret key to use for authenticating with the Baidu OBS service. BAIDU_OBS_SECRET_KEY=your-secret-key -# The access key to use for authenticating with the Baidu OBS service. BAIDU_OBS_ACCESS_KEY=your-access-key -# The endpoint of the Baidu OBS service. BAIDU_OBS_ENDPOINT=your-server-url # Supabase Storage Configuration -# The name of the Supabase bucket to use for storing files. +# SUPABASE_BUCKET_NAME=your-bucket-name -# The api key to use for authenticating with the Supabase service. SUPABASE_API_KEY=your-access-key -# The project endpoint url of the Supabase service. SUPABASE_URL=your-server-url # ------------------------------ @@ -385,34 +383,27 @@ SUPABASE_URL=your-server-url # ------------------------------ # The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`. VECTOR_STORE=weaviate # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT=http://weaviate:8080 -# The Weaviate API key. WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. QDRANT_URL=http://qdrant:6333 -# The Qdrant API key. QDRANT_API_KEY=difyai123456 -# The Qdrant client timeout setting. QDRANT_CLIENT_TIMEOUT=20 -# The Qdrant client enable gRPC mode. QDRANT_GRPC_ENABLED=false -# The Qdrant server gRPC mode PORT. QDRANT_GRPC_PORT=6334 # Milvus configuration Only available when VECTOR_STORE is `milvus`. # The milvus uri. MILVUS_URI=http://127.0.0.1:19530 -# The milvus token. MILVUS_TOKEN= -# The milvus username. MILVUS_USER=root -# The milvus password. MILVUS_PASSWORD=Milvus +MILVUS_ENABLE_HYBRID_SEARCH=False # MyScale configuration, only available when VECTOR_STORE is `myscale` # For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: @@ -465,8 +456,8 @@ ANALYTICDB_MAX_CONNECTION=5 # TiDB vector configurations, only available when VECTOR_STORE is `tidb` TIDB_VECTOR_HOST=tidb TIDB_VECTOR_PORT=4000 -TIDB_VECTOR_USER=xxx.root -TIDB_VECTOR_PASSWORD=xxxxxx +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= TIDB_VECTOR_DATABASE=dify # Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` @@ -489,7 +480,7 @@ CHROMA_PORT=8000 CHROMA_TENANT=default_tenant CHROMA_DATABASE=default_database CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider -CHROMA_AUTH_CREDENTIALS=xxxxxx +CHROMA_AUTH_CREDENTIALS= # Oracle configuration, only available when VECTOR_STORE is `oracle` ORACLE_HOST=oracle @@ -526,6 +517,7 @@ ELASTICSEARCH_HOST=0.0.0.0 ELASTICSEARCH_PORT=9200 ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 # baidu vector configurations, only available when VECTOR_STORE is `baidu` BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 @@ -545,11 +537,10 @@ VIKINGDB_SCHEMA=http VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30 - # Lindorm configuration, only available when VECTOR_STORE is `lindorm` -LINDORM_URL=http://ld-***************-proxy-search-pub.lindorm.aliyuncs.com:30070 -LINDORM_USERNAME=username -LINDORM_PASSWORD=password +LINDORM_URL=http://lindorm:30070 +LINDORM_USERNAME=lindorm +LINDORM_PASSWORD=lindorm # OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` OCEANBASE_VECTOR_HOST=oceanbase @@ -557,8 +548,13 @@ OCEANBASE_VECTOR_PORT=2881 OCEANBASE_VECTOR_USER=root@test OCEANBASE_VECTOR_PASSWORD=difyai123456 OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_CLUSTER_NAME=difyai OCEANBASE_MEMORY_LIMIT=6G +# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPSTASH_VECTOR_TOKEN=dify + # ------------------------------ # Knowledge Configuration # ------------------------------ @@ -601,20 +597,16 @@ CODE_GENERATION_MAX_TOKENS=1024 # Multi-modal Configuration # ------------------------------ -# The format of the image/video sent when the multi-modal model is input, +# The format of the image/video/audio/document sent when the multi-modal model is input, # the default is base64, optional url. # The delay of the call in url mode will be lower than that in base64 mode. # It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video. -MULTIMODAL_SEND_IMAGE_FORMAT=base64 -MULTIMODAL_SEND_VIDEO_FORMAT=base64 - +# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. +MULTIMODAL_SEND_FORMAT=base64 # Upload image file size limit, default 10M. UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 - # Upload video file size limit, default 100M. UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 - # Upload audio file size limit, default 50M. UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 @@ -622,15 +614,14 @@ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 # Sentry Configuration # Used for application monitoring and error log tracking. # ------------------------------ +SENTRY_DSN= # API Service Sentry DSN address, default is empty, when empty, # all monitoring information is not reported to Sentry. # If not set, Sentry error reporting will be disabled. API_SENTRY_DSN= - # API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. API_SENTRY_TRACES_SAMPLE_RATE=1.0 - # API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. API_SENTRY_PROFILES_SAMPLE_RATE=1.0 @@ -668,8 +659,10 @@ MAIL_TYPE=resend MAIL_DEFAULT_SEND_FROM= # API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. +RESEND_API_URL=https://api.resend.com RESEND_API_KEY=your-resend-api-key + # SMTP server configuration, used when MAIL_TYPE is `smtp` SMTP_SERVER= SMTP_PORT=465 @@ -694,24 +687,26 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 # The sandbox service endpoint. CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox CODE_MAX_NUMBER=9223372036854775807 CODE_MIN_NUMBER=-9223372036854775808 CODE_MAX_DEPTH=5 CODE_MAX_PRECISION=20 CODE_MAX_STRING_LENGTH=80000 -TEMPLATE_TRANSFORM_MAX_LENGTH=80000 CODE_MAX_STRING_ARRAY_LENGTH=30 CODE_MAX_OBJECT_ARRAY_LENGTH=30 CODE_MAX_NUMBER_ARRAY_LENGTH=1000 CODE_EXECUTION_CONNECT_TIMEOUT=10 CODE_EXECUTION_READ_TIMEOUT=60 CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=80000 # Workflow runtime configuration WORKFLOW_MAX_EXECUTION_STEPS=500 WORKFLOW_MAX_EXECUTION_TIME=1200 WORKFLOW_CALL_MAX_DEPTH=5 MAX_VARIABLE_SIZE=204800 +WORKFLOW_PARALLEL_DEPTH_LIMIT=3 WORKFLOW_FILE_UPLOAD_LIMIT=10 # HTTP request node in workflow configuration @@ -931,3 +926,9 @@ CSP_WHITELIST= # Enable or disable create tidb service job CREATE_TIDB_SERVICE_JOB_ENABLED=false + +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 + +# The maximum number of top-k value for RAG. +TOP_K_MAX_VALUE=10 diff --git a/dify/code/docker-compose-template.yaml b/dify/code/docker-compose-template.yaml new file mode 100644 index 000000000..8aafc6188 --- /dev/null +++ b/dify/code/docker-compose-template.yaml @@ -0,0 +1,576 @@ +x-shared-env: &shared-api-worker-env +services: + # API service + api: + image: langgenius/dify-api:0.15.2 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:0.15.2 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:0.15.2 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ./volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + - ./volumes/certbot/logs:/var/log/letsencrypt + - ./volumes/certbot/conf/live:/etc/letsencrypt/live + - ./certbot/update-cert.template.txt:/update-cert.template.txt + - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ./nginx/https.conf.template:/etc/nginx/https.conf.template + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ./nginx/ssl:/etc/ssl # cert dir (legacy) + - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + + # The TiDB vector store. + # For production use, please refer to https://github.com/pingcap/tidb-docker-compose + tidb: + image: pingcap/tidb:v8.4.0 + profiles: + - tidb + command: + - --store=unistore + restart: always + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ./volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ./volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + container_name: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ./volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215 + profiles: + - oceanbase + restart: always + volumes: + - ./volumes/oceanbase/data:/root/ob + - ./volumes/oceanbase/conf:/root/.obd/cluster + - ./volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OB_SERVER_IP: '127.0.0.1' + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ./startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + container_name: milvus-etcd + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ./volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + container_name: milvus-minio + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ./volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + container_name: milvus-standalone + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ./volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + ports: + - 19530:19530 + - 9091:9091 + networks: + - milvus + + # Opensearch vector database + opensearch: + container_name: opensearch + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ./volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + container_name: opensearch-dashboards + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # MyScale vector database + myscale: + container_name: myscale + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ./volumes/myscale/data:/var/lib/clickhouse + - ./volumes/myscale/log:/var/log/clickhouse-server + - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ./volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/dify/code/docker-compose.yaml b/dify/code/docker-compose.yaml index 102ef58a6..3f7d374b9 100644 --- a/dify/code/docker-compose.yaml +++ b/dify/code/docker-compose.yaml @@ -1,32 +1,40 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + x-shared-env: &shared-api-worker-env - WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} + SERVICE_API_URL: ${SERVICE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + APP_WEB_URL: ${APP_WEB_URL:-} + FILES_URL: ${FILES_URL:-} LOG_LEVEL: ${LOG_LEVEL:-INFO} - LOG_FILE: ${LOG_FILE:-} + LOG_FILE: ${LOG_FILE:-/app/logs/server.log} LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} - # Log dateformat LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} - # Log Timezone LOG_TZ: ${LOG_TZ:-UTC} DEBUG: ${DEBUG:-false} FLASK_DEBUG: ${FLASK_DEBUG:-false} SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} INIT_PASSWORD: ${INIT_PASSWORD:-} - CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} - CONSOLE_API_URL: ${CONSOLE_API_URL:-} - SERVICE_API_URL: ${SERVICE_API_URL:-} - APP_WEB_URL: ${APP_WEB_URL:-} + DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} - FILES_URL: ${FILES_URL:-} + MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} + ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} + REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} - MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} - DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} + APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} DIFY_PORT: ${DIFY_PORT:-5001} - SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-} - SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-} + SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} + SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} + SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-} @@ -43,6 +51,11 @@ x-shared-env: &shared-api-worker-env SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} + POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} + POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} + POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} + POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} REDIS_HOST: ${REDIS_HOST:-redis} REDIS_PORT: ${REDIS_PORT:-6379} REDIS_USERNAME: ${REDIS_USERNAME:-} @@ -55,10 +68,9 @@ x-shared-env: &shared-api-worker-env REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} - REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} + REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} - ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} BROKER_USE_SSL: ${BROKER_USE_SSL:-false} CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} @@ -66,50 +78,54 @@ x-shared-env: &shared-api-worker-env CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} - STORAGE_TYPE: ${STORAGE_TYPE:-local} - STORAGE_LOCAL_PATH: ${STORAGE_LOCAL_PATH:-storage} - S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + STORAGE_TYPE: ${STORAGE_TYPE:-opendal} + OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} + OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} S3_ENDPOINT: ${S3_ENDPOINT:-} - S3_BUCKET_NAME: ${S3_BUCKET_NAME:-} + S3_REGION: ${S3_REGION:-us-east-1} + S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} S3_SECRET_KEY: ${S3_SECRET_KEY:-} - S3_REGION: ${S3_REGION:-us-east-1} - AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-} - AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-} - AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-} - AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-} - GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-} + S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} + AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} + AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} + AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} + GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} - ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-} - ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-} - ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-} - ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-} - ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-} + ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} + ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} + ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} + ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} + ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} - ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-} - TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-} - TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-} - TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-} - TENCENT_COS_REGION: ${TENCENT_COS_REGION:-} - TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-} - HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-} - HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-} - HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-} - HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-} - OCI_ENDPOINT: ${OCI_ENDPOINT:-} - OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-} - OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-} - OCI_SECRET_KEY: ${OCI_SECRET_KEY:-} - OCI_REGION: ${OCI_REGION:-} - VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-} - VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-} - VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-} - VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-} - VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-} - BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-} - BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-} - BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-} - BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-} + ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} + TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} + TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} + TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} + TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} + TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com} + OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} + OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} + OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} + OCI_REGION: ${OCI_REGION:-us-ashburn-1} + HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} + HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} + HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} + HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} + VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} + VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} + VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} + VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} + VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} + BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} + BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} + BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} + BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} + SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} + SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} + SUPABASE_URL: ${SUPABASE_URL:-your-server-url} VECTOR_STORE: ${VECTOR_STORE:-weaviate} WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} @@ -118,31 +134,46 @@ x-shared-env: &shared-api-worker-env QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} - COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-'couchbase-server'} - COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} - COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} - COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} - COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530} MILVUS_TOKEN: ${MILVUS_TOKEN:-} MILVUS_USER: ${MILVUS_USER:-root} MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus} + MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} MYSCALE_HOST: ${MYSCALE_HOST:-myscale} MYSCALE_PORT: ${MYSCALE_PORT:-8123} MYSCALE_USER: ${MYSCALE_USER:-default} MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} - RELYT_HOST: ${RELYT_HOST:-db} - RELYT_PORT: ${RELYT_PORT:-5432} - RELYT_USER: ${RELYT_USER:-postgres} - RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} - RELYT_DATABASE: ${RELYT_DATABASE:-postgres} + COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} + COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} + COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} + COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} + COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} PGVECTOR_USER: ${PGVECTOR_USER:-postgres} PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} + PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} + PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} + PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} + PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} + PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} + PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} + PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} + ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} + ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} + ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} + ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} + ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} + ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} + ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} + ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} + ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} + ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} + ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} + ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} @@ -160,38 +191,22 @@ x-shared-env: &shared-api-worker-env TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} - ORACLE_HOST: ${ORACLE_HOST:-oracle} - ORACLE_PORT: ${ORACLE_PORT:-1521} - ORACLE_USER: ${ORACLE_USER:-dify} - ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} - ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1} CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} CHROMA_PORT: ${CHROMA_PORT:-8000} CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} - ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} - ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} - ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} - ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} - LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} - LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} - LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm } - KIBANA_PORT: ${KIBANA_PORT:-5601} - # AnalyticDB configuration - ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-} - ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-} - ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-} - ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-} - ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-} - ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-} - ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} - ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-} - ANALYTICDB_HOST: ${ANALYTICDB_HOST:-} - ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} - ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} - ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} + ORACLE_HOST: ${ORACLE_HOST:-oracle} + ORACLE_PORT: ${ORACLE_PORT:-1521} + ORACLE_USER: ${ORACLE_USER:-dify} + ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} + ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1} + RELYT_HOST: ${RELYT_HOST:-db} + RELYT_PORT: ${RELYT_PORT:-5432} + RELYT_USER: ${RELYT_USER:-postgres} + RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} + RELYT_DATABASE: ${RELYT_DATABASE:-postgres} OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} @@ -204,6 +219,11 @@ x-shared-env: &shared-api-worker-env TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} + ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} + ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} + ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} + ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + KIBANA_PORT: ${KIBANA_PORT:-5601} BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} @@ -211,11 +231,23 @@ x-shared-env: &shared-api-worker-env BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} - VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-dify} - VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-dify} + VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} + VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} + VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} + VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} + LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} + LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} + LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm} + OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} + OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} + OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} + OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} + OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} @@ -223,82 +255,154 @@ x-shared-env: &shared-api-worker-env ETL_TYPE: ${ETL_TYPE:-dify} UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} + SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} - MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64} - MULTIMODAL_SEND_VIDEO_FORMAT: ${MULTIMODAL_SEND_VIDEO_FORMAT:-base64} + MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} - SENTRY_DSN: ${API_SENTRY_DSN:-} - SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} - SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + SENTRY_DSN: ${SENTRY_DSN:-} + API_SENTRY_DSN: ${API_SENTRY_DSN:-} + API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} MAIL_TYPE: ${MAIL_TYPE:-resend} MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} + RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} + RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} SMTP_SERVER: ${SMTP_SERVER:-} SMTP_PORT: ${SMTP_PORT:-465} SMTP_USERNAME: ${SMTP_USERNAME:-} SMTP_PASSWORD: ${SMTP_PASSWORD:-} SMTP_USE_TLS: ${SMTP_USE_TLS:-true} SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} - RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} - RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} - CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} - CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} - CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} - CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000} - TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} + CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} + CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} - SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} - SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} + WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} + WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} - APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-12000} + SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} + SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + PGUSER: ${PGUSER:-${DB_USERNAME}} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} + POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} + SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} + PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} + PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} + OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} + OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} + OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} + OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_PORT: ${NGINX_PORT:-80} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} + CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} + SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} + EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} - MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} - OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-http://oceanbase-vector} - OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} - OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} - OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} - OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} - OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + CSP_WHITELIST: ${CSP_WHITELIST:-} CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} - RETRIEVAL_TOP_N: ${RETRIEVAL_TOP_N:-0} + MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} services: # API service api: - image: langgenius/dify-api:0.13.0 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. <<: *shared-api-worker-env # Startup mode, 'api' starts the API server. MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} depends_on: - db - redis @@ -312,13 +416,16 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.13.0 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. <<: *shared-api-worker-env # Startup mode, 'worker' starts the Celery worker for processing the queue. MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} depends_on: - db - redis @@ -331,7 +438,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.13.0 + image: langgenius/dify-web:0.15.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -340,6 +447,8 @@ services: NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} CSP_WHITELIST: ${CSP_WHITELIST:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} # The postgres database. db: @@ -492,6 +601,16 @@ services: - api - web + # The TiDB vector store. + # For production use, please refer to https://github.com/pingcap/tidb-docker-compose + tidb: + image: pingcap/tidb:v8.4.0 + profiles: + - tidb + command: + - --store=unistore + restart: always + # The Weaviate vector store. weaviate: image: semitechnologies/weaviate:1.19.0 @@ -696,7 +815,7 @@ services: - milvus milvus-standalone: - image: milvusdb/milvus:v2.3.1 + image: milvusdb/milvus:v2.5.0-beta profiles: - milvus command: [ 'milvus', 'run', 'standalone' ] @@ -772,18 +891,26 @@ services: image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 profiles: - elasticsearch + - elasticsearch-ja restart: always volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh - dify_es01_data:/usr/share/elasticsearch/data environment: ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} cluster.name: dify-es-cluster node.name: dify-es0 discovery.type: single-node - xpack.license.self_generated.type: trial + xpack.license.self_generated.type: basic xpack.security.enabled: 'true' xpack.security.enrollment.enabled: 'false' xpack.security.http.ssl.enabled: 'false' + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] healthcheck: test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] interval: 30s diff --git a/dify/code/elasticsearch/docker-entrypoint.sh b/dify/code/elasticsearch/docker-entrypoint.sh new file mode 100755 index 000000000..6669aec5a --- /dev/null +++ b/dify/code/elasticsearch/docker-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then + # Check if the ICU tokenizer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then + printf '%s\n' "Installing the ICU tokenizer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then + printf '%s\n' "Failed to install the ICU tokenizer plugin" + exit 1 + fi + fi + # Check if the Japanese language analyzer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then + printf '%s\n' "Installing the Japanese language analyzer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then + printf '%s\n' "Failed to install the Japanese language analyzer plugin" + exit 1 + fi + fi +fi + +# Run the original entrypoint script +exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh diff --git a/dify/code/generate_docker_compose b/dify/code/generate_docker_compose new file mode 100755 index 000000000..b5c0acefb --- /dev/null +++ b/dify/code/generate_docker_compose @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +import os +import re +import sys + + +def parse_env_example(file_path): + """ + Parses the .env.example file and returns a dictionary with variable names as keys and default values as values. + """ + env_vars = {} + with open(file_path, "r") as f: + for line_number, line in enumerate(f, 1): + line = line.strip() + # Ignore empty lines and comments + if not line or line.startswith("#"): + continue + # Use regex to parse KEY=VALUE + match = re.match(r"^([^=]+)=(.*)$", line) + if match: + key = match.group(1).strip() + value = match.group(2).strip() + # Remove possible quotes around the value + if (value.startswith('"') and value.endswith('"')) or ( + value.startswith("'") and value.endswith("'") + ): + value = value[1:-1] + env_vars[key] = value + else: + print(f"Warning: Unable to parse line {line_number}: {line}") + return env_vars + + +def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"): + """ + Generates a shared environment variables block as a YAML string. + """ + lines = [f"x-shared-env: &{anchor_name}"] + for key, default in env_vars.items(): + if key == "COMPOSE_PROFILES": + continue + # If default value is empty, use ${KEY:-} + if default == "": + lines.append(f" {key}: ${{{key}:-}}") + else: + # If default value contains special characters, wrap it in quotes + if re.search(r"[:\s]", default): + default = f"{default}" + lines.append(f" {key}: ${{{key}:-{default}}}") + return "\n".join(lines) + + +def insert_shared_env(template_path, output_path, shared_env_block, header_comments): + """ + Inserts the shared environment variables block and header comments into the template file, + removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file. + """ + with open(template_path, "r") as f: + template_content = f.read() + + # Remove existing x-shared-env: &shared-api-worker-env lines + template_content = re.sub( + r"^x-shared-env: &shared-api-worker-env\s*\n?", + "", + template_content, + flags=re.MULTILINE, + ) + + # Prepare the final content with header comments and shared env block + final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}" + + with open(output_path, "w") as f: + f.write(final_content) + print(f"Generated {output_path}") + + +def main(): + env_example_path = ".env.example" + template_path = "docker-compose-template.yaml" + output_path = "docker-compose.yaml" + anchor_name = "shared-api-worker-env" # Can be modified as needed + + # Define header comments to be added at the top of docker-compose.yaml + header_comments = ( + "# ==================================================================\n" + "# WARNING: This file is auto-generated by generate_docker_compose\n" + "# Do not modify this file directly. Instead, update the .env.example\n" + "# or docker-compose-template.yaml and regenerate this file.\n" + "# ==================================================================\n" + ) + + # Check if required files exist + for path in [env_example_path, template_path]: + if not os.path.isfile(path): + print(f"Error: File {path} does not exist.") + sys.exit(1) + + # Parse .env.example file + env_vars = parse_env_example(env_example_path) + + if not env_vars: + print("Warning: No environment variables found in .env.example.") + + # Generate shared environment variables block + shared_env_block = generate_shared_env_block(env_vars, anchor_name) + + # Insert shared environment variables block and header comments into the template + insert_shared_env(template_path, output_path, shared_env_block, header_comments) + + +if __name__ == "__main__": + main() diff --git a/package-lock.json b/package-lock.json index 76870d1b9..a803854f2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,5 +1,5 @@ { - "name": "compose", + "name": "easypanel-dify", "lockfileVersion": 3, "requires": true, "packages": { From 27fc009e7911b35ec9e05a44aa86fc2c44012b68 Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Fri, 14 Feb 2025 14:00:33 +0800 Subject: [PATCH 2/6] Update dify --- code/.env.example | 938 +++++++++++++++++ code/README.md | 99 ++ code/certbot/README.md | 76 ++ code/certbot/docker-entrypoint.sh | 30 + code/certbot/update-cert.template.txt | 19 + code/couchbase-server/Dockerfile | 4 + code/couchbase-server/init-cbserver.sh | 44 + code/docker-compose-template.yaml | 566 +++++++++++ code/docker-compose.middleware.yaml | 123 +++ code/docker-compose.png | Bin 0 -> 63694 bytes code/docker-compose.yaml | 962 ++++++++++++++++++ code/elasticsearch/docker-entrypoint.sh | 25 + code/generate_docker_compose | 112 ++ code/middleware.env.example | 89 ++ code/nginx/conf.d/default.conf.template | 37 + code/nginx/docker-entrypoint.sh | 39 + code/nginx/https.conf.template | 9 + code/nginx/nginx.conf.template | 34 + code/nginx/proxy.conf.template | 11 + code/nginx/ssl/.gitkeep | 0 code/ssrf_proxy/docker-entrypoint.sh | 42 + code/ssrf_proxy/squid.conf.template | 51 + code/startupscripts/init.sh | 13 + code/startupscripts/init_user.script | 10 + code/tidb/config/pd.toml | 4 + code/tidb/config/tiflash-learner.toml | 13 + code/tidb/config/tiflash.toml | 19 + code/tidb/docker-compose.yaml | 62 ++ .../config/users.d/custom_users_config.xml | 17 + code/volumes/oceanbase/init.d/vec_memory.sql | 1 + .../opensearch/opensearch_dashboards.yml | 222 ++++ code/volumes/sandbox/conf/config.yaml | 14 + code/volumes/sandbox/conf/config.yaml.example | 35 + .../dependencies/python-requirements.txt | 0 34 files changed, 3720 insertions(+) create mode 100644 code/.env.example create mode 100644 code/README.md create mode 100644 code/certbot/README.md create mode 100755 code/certbot/docker-entrypoint.sh create mode 100755 code/certbot/update-cert.template.txt create mode 100644 code/couchbase-server/Dockerfile create mode 100755 code/couchbase-server/init-cbserver.sh create mode 100644 code/docker-compose-template.yaml create mode 100644 code/docker-compose.middleware.yaml create mode 100644 code/docker-compose.png create mode 100644 code/docker-compose.yaml create mode 100755 code/elasticsearch/docker-entrypoint.sh create mode 100755 code/generate_docker_compose create mode 100644 code/middleware.env.example create mode 100644 code/nginx/conf.d/default.conf.template create mode 100755 code/nginx/docker-entrypoint.sh create mode 100644 code/nginx/https.conf.template create mode 100644 code/nginx/nginx.conf.template create mode 100644 code/nginx/proxy.conf.template create mode 100644 code/nginx/ssl/.gitkeep create mode 100755 code/ssrf_proxy/docker-entrypoint.sh create mode 100644 code/ssrf_proxy/squid.conf.template create mode 100755 code/startupscripts/init.sh create mode 100755 code/startupscripts/init_user.script create mode 100644 code/tidb/config/pd.toml create mode 100644 code/tidb/config/tiflash-learner.toml create mode 100644 code/tidb/config/tiflash.toml create mode 100644 code/tidb/docker-compose.yaml create mode 100644 code/volumes/myscale/config/users.d/custom_users_config.xml create mode 100644 code/volumes/oceanbase/init.d/vec_memory.sql create mode 100644 code/volumes/opensearch/opensearch_dashboards.yml create mode 100644 code/volumes/sandbox/conf/config.yaml create mode 100644 code/volumes/sandbox/conf/config.yaml.example create mode 100644 code/volumes/sandbox/dependencies/python-requirements.txt diff --git a/code/.env.example b/code/.env.example new file mode 100644 index 000000000..3bc79059d --- /dev/null +++ b/code/.env.example @@ -0,0 +1,938 @@ +# ------------------------------ +# Environment Variables for API service & worker +# ------------------------------ + +# ------------------------------ +# Common Variables +# ------------------------------ + +# The backend URL of the console API, +# used to concatenate the authorization callback. +# If empty, it is the same domain. +# Example: https://api.console.dify.ai +CONSOLE_API_URL= + +# The front-end URL of the console web, +# used to concatenate some front-end addresses and for CORS configuration use. +# If empty, it is the same domain. +# Example: https://console.dify.ai +CONSOLE_WEB_URL= + +# Service API Url, +# used to display Service API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://api.dify.ai +SERVICE_API_URL= + +# WebApp API backend Url, +# used to declare the back-end URL for the front-end API. +# If empty, it is the same domain. +# Example: https://api.app.dify.ai +APP_API_URL= + +# WebApp Url, +# used to display WebAPP API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://app.dify.ai +APP_WEB_URL= + +# File preview or download Url prefix. +# used to display File preview or download Url to the front-end or as Multi-model inputs; +# Url is signed and has expiration time. +FILES_URL= + +# ------------------------------ +# Server Configuration +# ------------------------------ + +# The log level for the application. +# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` +LOG_LEVEL=INFO +# Log file path +LOG_FILE=/app/logs/server.log +# Log file max size, the unit is MB +LOG_FILE_MAX_SIZE=20 +# Log file max backup count +LOG_FILE_BACKUP_COUNT=5 +# Log dateformat +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +# Log Timezone +LOG_TZ=UTC + +# Debug mode, default is false. +# It is recommended to turn on this configuration for local development +# to prevent some problems caused by monkey patch. +DEBUG=false + +# Flask debug mode, it can output trace information at the interface when turned on, +# which is convenient for debugging. +FLASK_DEBUG=false + +# A secretkey that is used for securely signing the session cookie +# and encrypting sensitive information on the database. +# You can generate a strong key using `openssl rand -base64 42`. +SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U + +# Password for admin user initialization. +# If left unset, admin user will not be prompted for a password +# when creating the initial admin account. +# The length of the password cannot exceed 30 charactors. +INIT_PASSWORD= + +# Deployment environment. +# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. +# Testing environment. There will be a distinct color label on the front-end page, +# indicating that this environment is a testing environment. +DEPLOY_ENV=PRODUCTION + +# Whether to enable the version check policy. +# If set to empty, https://updates.dify.ai will be called for version check. +CHECK_UPDATE_URL=https://updates.dify.ai + +# Used to change the OpenAI base address, default is https://api.openai.com/v1. +# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, +# or when a local model provides OpenAI compatible API, it can be replaced. +OPENAI_API_BASE=https://api.openai.com/v1 + +# When enabled, migrations will be executed prior to application startup +# and the application will start after the migrations have completed. +MIGRATION_ENABLED=true + +# File Access Time specifies a time interval in seconds for the file to be accessed. +# The default value is 300 seconds. +FILES_ACCESS_TIMEOUT=300 + +# Access token expiration time in minutes +ACCESS_TOKEN_EXPIRE_MINUTES=60 + +# Refresh token expiration time in days +REFRESH_TOKEN_EXPIRE_DAYS=30 + +# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 + +# ------------------------------ +# Container Startup Related Configuration +# Only effective when starting with docker image or docker-compose. +# ------------------------------ + +# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +DIFY_BIND_ADDRESS=0.0.0.0 + +# API service binding port number, default 5001. +DIFY_PORT=5001 + +# The number of API server workers, i.e., the number of workers. +# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent +# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers +SERVER_WORKER_AMOUNT=1 + +# Defaults to gevent. If using windows, it can be switched to sync or solo. +SERVER_WORKER_CLASS=gevent + +# Default number of worker connections, the default is 10. +SERVER_WORKER_CONNECTIONS=10 + +# Similar to SERVER_WORKER_CLASS. +# If using windows, it can be switched to sync or solo. +CELERY_WORKER_CLASS= + +# Request handling timeout. The default is 200, +# it is recommended to set it to 360 to support a longer sse connection time. +GUNICORN_TIMEOUT=360 + +# The number of Celery workers. The default is 1, and can be set as needed. +CELERY_WORKER_AMOUNT= + +# Flag indicating whether to enable autoscaling of Celery workers. +# +# Autoscaling is useful when tasks are CPU intensive and can be dynamically +# allocated and deallocated based on the workload. +# +# When autoscaling is enabled, the maximum and minimum number of workers can +# be specified. The autoscaling algorithm will dynamically adjust the number +# of workers within the specified range. +# +# Default is false (i.e., autoscaling is disabled). +# +# Example: +# CELERY_AUTO_SCALE=true +CELERY_AUTO_SCALE=false + +# The maximum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MAX_WORKERS= + +# The minimum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MIN_WORKERS= + +# API Tool configuration +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 + + +# ------------------------------ +# Database Configuration +# The database uses PostgreSQL. Please use the public schema. +# It is consistent with the configuration in the 'db' service below. +# ------------------------------ + +DB_USERNAME=postgres +DB_PASSWORD=difyai123456 +DB_HOST=db +DB_PORT=5432 +DB_DATABASE=dify +# The size of the database connection pool. +# The default is 30 connections, which can be appropriately increased. +SQLALCHEMY_POOL_SIZE=30 +# Database connection pool recycling time, the default is 3600 seconds. +SQLALCHEMY_POOL_RECYCLE=3600 +# Whether to print SQL, default is false. +SQLALCHEMY_ECHO=false + +# Maximum number of connections to the database +# Default is 100 +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +POSTGRES_MAX_CONNECTIONS=100 + +# Sets the amount of shared memory used for postgres's shared buffers. +# Default is 128MB +# Recommended value: 25% of available memory +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS +POSTGRES_SHARED_BUFFERS=128MB + +# Sets the amount of memory used by each database worker for working space. +# Default is 4MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM +POSTGRES_WORK_MEM=4MB + +# Sets the amount of memory reserved for maintenance activities. +# Default is 64MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM +POSTGRES_MAINTENANCE_WORK_MEM=64MB + +# Sets the planner's assumption about the effective cache size. +# Default is 4096MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB + +# ------------------------------ +# Redis Configuration +# This Redis configuration is used for caching and for pub/sub during conversation. +# ------------------------------ + +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=difyai123456 +REDIS_USE_SSL=false +REDIS_DB=0 + +# Whether to use Redis Sentinel mode. +# If set to true, the application will automatically discover and connect to the master node through Sentinel. +REDIS_USE_SENTINEL=false + +# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. +# Format: `:,:,:` +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 + +# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. +# Format: `:,:,:` +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= + +# ------------------------------ +# Celery Configuration +# ------------------------------ + +# Use redis as the broker, and redis db 1 for celery broker. +# Format as follows: `redis://:@:/` +# Example: redis://:difyai123456@redis:6379/1 +# If use Redis Sentinel, format as follows: `sentinel://:@:/` +# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 +CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +BROKER_USE_SSL=false + +# If you are using Redis Sentinel for high availability, configure the following settings. +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 + +# ------------------------------ +# CORS Configuration +# Used to set the front-end cross-domain access policy. +# ------------------------------ + +# Specifies the allowed origins for cross-origin requests to the Web API, +# e.g. https://dify.app or * for all origins. +WEB_API_CORS_ALLOW_ORIGINS=* + +# Specifies the allowed origins for cross-origin requests to the console API, +# e.g. https://cloud.dify.ai or * for all origins. +CONSOLE_CORS_ALLOW_ORIGINS=* + +# ------------------------------ +# File Storage Configuration +# ------------------------------ + +# The type of storage to use for storing user files. +STORAGE_TYPE=opendal + +# Apache OpenDAL Configuration +# The configuration for OpenDAL consists of the following format: OPENDAL__. +# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. +# Dify will scan configurations starting with OPENDAL_ and automatically apply them. +# The scheme name for the OpenDAL storage. +OPENDAL_SCHEME=fs +# Configurations for OpenDAL Local File System. +OPENDAL_FS_ROOT=storage + +# S3 Configuration +# +S3_ENDPOINT= +S3_REGION=us-east-1 +S3_BUCKET_NAME=difyai +S3_ACCESS_KEY= +S3_SECRET_KEY= +# Whether to use AWS managed IAM roles for authenticating with the S3 service. +# If set to false, the access key and secret key must be provided. +S3_USE_AWS_MANAGED_IAM=false + +# Azure Blob Configuration +# +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_ACCOUNT_KEY=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net + +# Google Storage Configuration +# +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= + +# The Alibaba Cloud OSS configurations, +# +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +# Don't start with '/'. OSS doesn't support leading slash in object names. +ALIYUN_OSS_PATH=your-path + +# Tencent COS Configuration +# +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme + +# Oracle Storage Configuration +# +OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +OCI_REGION=us-ashburn-1 + +# Huawei OBS Configuration +# +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +HUAWEI_OBS_SERVER=your-server-url + +# Volcengine TOS Configuration +# +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region + +# Baidu OBS Storage Configuration +# +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +BAIDU_OBS_ENDPOINT=your-server-url + +# Supabase Storage Configuration +# +SUPABASE_BUCKET_NAME=your-bucket-name +SUPABASE_API_KEY=your-access-key +SUPABASE_URL=your-server-url + +# ------------------------------ +# Vector Database Configuration +# ------------------------------ + +# The type of vector store to use. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`. +VECTOR_STORE=weaviate + +# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. +WEAVIATE_ENDPOINT=http://weaviate:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih + +# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=difyai123456 +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 + +# Milvus configuration Only available when VECTOR_STORE is `milvus`. +# The milvus uri. +MILVUS_URI=http://127.0.0.1:19530 +MILVUS_TOKEN= +MILVUS_USER=root +MILVUS_PASSWORD=Milvus +MILVUS_ENABLE_HYBRID_SEARCH=False + +# MyScale configuration, only available when VECTOR_STORE is `myscale` +# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: +# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= + +# Couchbase configurations, only available when VECTOR_STORE is `couchbase` +# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) +COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server +COUCHBASE_USER=Administrator +COUCHBASE_PASSWORD=password +COUCHBASE_BUCKET_NAME=Embeddings +COUCHBASE_SCOPE_NAME=_default + +# pgvector configurations, only available when VECTOR_STORE is `pgvector` +PGVECTOR_HOST=pgvector +PGVECTOR_PORT=5432 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=difyai123456 +PGVECTOR_DATABASE=dify +PGVECTOR_MIN_CONNECTION=1 +PGVECTOR_MAX_CONNECTION=5 + +# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify + +# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +ANALYTICDB_HOST=gp-test.aliyuncs.com +ANALYTICDB_PORT=5432 +ANALYTICDB_MIN_CONNECTION=1 +ANALYTICDB_MAX_CONNECTION=5 + +# TiDB vector configurations, only available when VECTOR_STORE is `tidb` +TIDB_VECTOR_HOST=tidb +TIDB_VECTOR_PORT=4000 +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= +TIDB_VECTOR_DATABASE=dify + +# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` +TIDB_ON_QDRANT_URL=http://127.0.0.1 +TIDB_ON_QDRANT_API_KEY=dify +TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 +TIDB_ON_QDRANT_GRPC_ENABLED=false +TIDB_ON_QDRANT_GRPC_PORT=6334 +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +TIDB_API_URL=http://127.0.0.1 +TIDB_IAM_API_URL=http://127.0.0.1 +TIDB_REGION=regions/aws-us-east-1 +TIDB_PROJECT_ID=dify +TIDB_SPEND_LIMIT=100 + +# Chroma configuration, only available when VECTOR_STORE is `chroma` +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= + +# Oracle configuration, only available when VECTOR_STORE is `oracle` +ORACLE_HOST=oracle +ORACLE_PORT=1521 +ORACLE_USER=dify +ORACLE_PASSWORD=dify +ORACLE_DATABASE=FREEPDB1 + +# relyt configurations, only available when VECTOR_STORE is `relyt` +RELYT_HOST=db +RELYT_PORT=5432 +RELYT_USER=postgres +RELYT_PASSWORD=difyai123456 +RELYT_DATABASE=postgres + +# open search configuration, only available when VECTOR_STORE is `opensearch` +OPENSEARCH_HOST=opensearch +OPENSEARCH_PORT=9200 +OPENSEARCH_USER=admin +OPENSEARCH_PASSWORD=admin +OPENSEARCH_SECURE=true + +# tencent vector configurations, only available when VECTOR_STORE is `tencent` +TENCENT_VECTOR_DB_URL=http://127.0.0.1 +TENCENT_VECTOR_DB_API_KEY=dify +TENCENT_VECTOR_DB_TIMEOUT=30 +TENCENT_VECTOR_DB_USERNAME=dify +TENCENT_VECTOR_DB_DATABASE=dify +TENCENT_VECTOR_DB_SHARD=1 +TENCENT_VECTOR_DB_REPLICAS=2 + +# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` +ELASTICSEARCH_HOST=0.0.0.0 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_USERNAME=elastic +ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 + +# baidu vector configurations, only available when VECTOR_STORE is `baidu` +BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 +BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 +BAIDU_VECTOR_DB_ACCOUNT=root +BAIDU_VECTOR_DB_API_KEY=dify +BAIDU_VECTOR_DB_DATABASE=dify +BAIDU_VECTOR_DB_SHARD=1 +BAIDU_VECTOR_DB_REPLICAS=3 + +# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +VIKINGDB_REGION=cn-shanghai +VIKINGDB_HOST=api-vikingdb.xxx.volces.com +VIKINGDB_SCHEMA=http +VIKINGDB_CONNECTION_TIMEOUT=30 +VIKINGDB_SOCKET_TIMEOUT=30 + +# Lindorm configuration, only available when VECTOR_STORE is `lindorm` +LINDORM_URL=http://lindorm:30070 +LINDORM_USERNAME=lindorm +LINDORM_PASSWORD=lindorm + +# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` +OCEANBASE_VECTOR_HOST=oceanbase +OCEANBASE_VECTOR_PORT=2881 +OCEANBASE_VECTOR_USER=root@test +OCEANBASE_VECTOR_PASSWORD=difyai123456 +OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_CLUSTER_NAME=difyai +OCEANBASE_MEMORY_LIMIT=6G + +# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPSTASH_VECTOR_TOKEN=dify + +# ------------------------------ +# Knowledge Configuration +# ------------------------------ + +# Upload file size limit, default 15M. +UPLOAD_FILE_SIZE_LIMIT=15 + +# The maximum number of files that can be uploaded at a time, default 5. +UPLOAD_FILE_BATCH_LIMIT=5 + +# ETL type, support: `dify`, `Unstructured` +# `dify` Dify's proprietary file extraction scheme +# `Unstructured` Unstructured.io file extraction scheme +ETL_TYPE=dify + +# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured +# Or using Unstructured for document extractor node for pptx. +# For example: http://unstructured:8000/general/v0/general +UNSTRUCTURED_API_URL= +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true + +# ------------------------------ +# Model Configuration +# ------------------------------ + +# The maximum number of tokens allowed for prompt generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating a prompt in the prompt generation tool. +# Default: 512 tokens. +PROMPT_GENERATION_MAX_TOKENS=512 + +# The maximum number of tokens allowed for code generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating code in the code generation tool. +# Default: 1024 tokens. +CODE_GENERATION_MAX_TOKENS=1024 + +# ------------------------------ +# Multi-modal Configuration +# ------------------------------ + +# The format of the image/video/audio/document sent when the multi-modal model is input, +# the default is base64, optional url. +# The delay of the call in url mode will be lower than that in base64 mode. +# It is generally recommended to use the more compatible base64 mode. +# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. +MULTIMODAL_SEND_FORMAT=base64 +# Upload image file size limit, default 10M. +UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 +# Upload video file size limit, default 100M. +UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 +# Upload audio file size limit, default 50M. +UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 + +# ------------------------------ +# Sentry Configuration +# Used for application monitoring and error log tracking. +# ------------------------------ +SENTRY_DSN= + +# API Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +API_SENTRY_DSN= +# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 + +# Web Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +WEB_SENTRY_DSN= + +# ------------------------------ +# Notion Integration Configuration +# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations +# ------------------------------ + +# Configure as "public" or "internal". +# Since Notion's OAuth redirect URL only supports HTTPS, +# if deploying locally, please use Notion's internal integration. +NOTION_INTEGRATION_TYPE=public +# Notion OAuth client secret (used for public integration type) +NOTION_CLIENT_SECRET= +# Notion OAuth client id (used for public integration type) +NOTION_CLIENT_ID= +# Notion internal integration secret. +# If the value of NOTION_INTEGRATION_TYPE is "internal", +# you need to configure this variable. +NOTION_INTERNAL_SECRET= + +# ------------------------------ +# Mail related configuration +# ------------------------------ + +# Mail type, support: resend, smtp +MAIL_TYPE=resend + +# Default send from email address, if not specified +MAIL_DEFAULT_SEND_FROM= + +# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. +RESEND_API_URL=https://api.resend.com +RESEND_API_KEY=your-resend-api-key + + +# SMTP server configuration, used when MAIL_TYPE is `smtp` +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false + +# ------------------------------ +# Others Configuration +# ------------------------------ + +# Maximum length of segmentation tokens for indexing +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 + +# Member invitation link valid time (hours), +# Default: 72. +INVITE_EXPIRY_HOURS=72 + +# Reset password token valid time (minutes), +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 + +# The sandbox service endpoint. +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=80000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=80000 + +# Workflow runtime configuration +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_PARALLEL_DEPTH_LIMIT=3 +WORKFLOW_FILE_UPLOAD_LIMIT=10 + +# HTTP request node in workflow configuration +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 + +# SSRF Proxy server HTTP URL +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +# SSRF Proxy server HTTPS URL +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 + +# ------------------------------ +# Environment Variables for web Service +# ------------------------------ + +# The timeout for the text generation in millisecond +TEXT_GENERATION_TIMEOUT_MS=60000 + +# ------------------------------ +# Environment Variables for db Service +# ------------------------------ + +PGUSER=${DB_USERNAME} +# The password for the default postgres user. +POSTGRES_PASSWORD=${DB_PASSWORD} +# The name of the default postgres database. +POSTGRES_DB=${DB_DATABASE} +# postgres data directory +PGDATA=/var/lib/postgresql/data/pgdata + +# ------------------------------ +# Environment Variables for sandbox Service +# ------------------------------ + +# The API key for the sandbox service +SANDBOX_API_KEY=dify-sandbox +# The mode in which the Gin framework runs +SANDBOX_GIN_MODE=release +# The timeout for the worker in seconds +SANDBOX_WORKER_TIMEOUT=15 +# Enable network for the sandbox service +SANDBOX_ENABLE_NETWORK=true +# HTTP proxy URL for SSRF protection +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +# HTTPS proxy URL for SSRF protection +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +# The port on which the sandbox service runs +SANDBOX_PORT=8194 + +# ------------------------------ +# Environment Variables for weaviate Service +# (only used when VECTOR_STORE is weaviate) +# ------------------------------ +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai + +# ------------------------------ +# Environment Variables for Chroma +# (only used when VECTOR_STORE is chroma) +# ------------------------------ + +# Authentication credentials for Chroma server +CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 +# Authentication provider for Chroma server +CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider +# Persistence setting for Chroma server +CHROMA_IS_PERSISTENT=TRUE + +# ------------------------------ +# Environment Variables for Oracle Service +# (only used when VECTOR_STORE is Oracle) +# ------------------------------ +ORACLE_PWD=Dify123456 +ORACLE_CHARACTERSET=AL32UTF8 + +# ------------------------------ +# Environment Variables for milvus Service +# (only used when VECTOR_STORE is milvus) +# ------------------------------ +# ETCD configuration for auto compaction mode +ETCD_AUTO_COMPACTION_MODE=revision +# ETCD configuration for auto compaction retention in terms of number of revisions +ETCD_AUTO_COMPACTION_RETENTION=1000 +# ETCD configuration for backend quota in bytes +ETCD_QUOTA_BACKEND_BYTES=4294967296 +# ETCD configuration for the number of changes before triggering a snapshot +ETCD_SNAPSHOT_COUNT=50000 +# MinIO access key for authentication +MINIO_ACCESS_KEY=minioadmin +# MinIO secret key for authentication +MINIO_SECRET_KEY=minioadmin +# ETCD service endpoints +ETCD_ENDPOINTS=etcd:2379 +# MinIO service address +MINIO_ADDRESS=minio:9000 +# Enable or disable security authorization +MILVUS_AUTHORIZATION_ENABLED=true + +# ------------------------------ +# Environment Variables for pgvector / pgvector-rs Service +# (only used when VECTOR_STORE is pgvector / pgvector-rs) +# ------------------------------ +PGVECTOR_PGUSER=postgres +# The password for the default postgres user. +PGVECTOR_POSTGRES_PASSWORD=difyai123456 +# The name of the default postgres database. +PGVECTOR_POSTGRES_DB=dify +# postgres data directory +PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata + +# ------------------------------ +# Environment Variables for opensearch +# (only used when VECTOR_STORE is opensearch) +# ------------------------------ +OPENSEARCH_DISCOVERY_TYPE=single-node +OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true +OPENSEARCH_JAVA_OPTS_MIN=512m +OPENSEARCH_JAVA_OPTS_MAX=1024m +OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 +OPENSEARCH_MEMLOCK_SOFT=-1 +OPENSEARCH_MEMLOCK_HARD=-1 +OPENSEARCH_NOFILE_SOFT=65536 +OPENSEARCH_NOFILE_HARD=65536 + +# ------------------------------ +# Environment Variables for Nginx reverse proxy +# ------------------------------ +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +# HTTP port +NGINX_PORT=80 +# SSL settings are only applied when HTTPS_ENABLED is true +NGINX_SSL_PORT=443 +# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory +# and modify the env vars below accordingly. +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3 + +# Nginx performance tuning +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=15M +NGINX_KEEPALIVE_TIMEOUT=65 + +# Proxy settings +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s + +# Set true to accept requests for /.well-known/acme-challenge/ +NGINX_ENABLE_CERTBOT_CHALLENGE=false + +# ------------------------------ +# Certbot Configuration +# ------------------------------ + +# Email address (required to get certificates from Let's Encrypt) +CERTBOT_EMAIL=your_email@example.com + +# Domain name +CERTBOT_DOMAIN=your_domain.com + +# certbot command options +# i.e: --force-renewal --dry-run --test-cert --debug +CERTBOT_OPTIONS= + +# ------------------------------ +# Environment Variables for SSRF Proxy +# ------------------------------ +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 + +# ------------------------------ +# docker env var for specifying vector db type at startup +# (based on the vector db type, the corresponding docker +# compose profile will be used) +# if you want to use unstructured, add ',unstructured' to the end +# ------------------------------ +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate} + +# ------------------------------ +# Docker Compose Service Expose Host Port Configurations +# ------------------------------ +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 + +# ---------------------------------------------------------------------------- +# ModelProvider & Tool Position Configuration +# Used to specify the model providers and tools that can be used in the app. +# ---------------------------------------------------------------------------- + +# Pin, include, and exclude tools +# Use comma-separated values with no spaces between items. +# Example: POSITION_TOOL_PINS=bing,google +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= + +# Pin, include, and exclude model providers +# Use comma-separated values with no spaces between items. +# Example: POSITION_PROVIDER_PINS=openai,openllm +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= + +# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP +CSP_WHITELIST= + +# Enable or disable create tidb service job +CREATE_TIDB_SERVICE_JOB_ENABLED=false + +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 + +# The maximum number of top-k value for RAG. +TOP_K_MAX_VALUE=10 diff --git a/code/README.md b/code/README.md new file mode 100644 index 000000000..c3cd1f9e3 --- /dev/null +++ b/code/README.md @@ -0,0 +1,99 @@ +## README for docker Deployment + +Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users. + +### What's Updated + +- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections. + For more information, refer `docker/certbot/README.md`. + +- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments. + + > What is `.env`?

+ > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments. + +- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file. +- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades. +- **Legacy Support**: Previous deployment files are now located in the `docker-legacy` directory and will no longer be maintained. + +### How to Deploy Dify with `docker-compose.yaml` + +1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system. +2. **Environment Setup**: + - Navigate to the `docker` directory. + - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. + - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. +3. **Running the Services**: + - Execute `docker compose up` from the `docker` directory to start the services. + - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. +4. **SSL Certificate Setup**: + - Rrefer `docker/certbot/README.md` to set up SSL certificates using Certbot. + +### How to Deploy Middleware for Developing Dify + +1. **Middleware Setup**: + - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. + - Navigate to the `docker` directory. + - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). +2. **Running Middleware Services**: + - Execute `docker-compose -f docker-compose.middleware.yaml up --env-file middleware.env -d` to start the middleware services. + +### Migration for Existing Users + +For users migrating from the `docker-legacy` setup: + +1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup. +2. **Transfer Customizations**: + - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create. +3. **Data Migration**: + - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. + +### Overview of `.env` + +#### Key Modules and Customization + +- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details. +- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc. +- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontends operate. + +#### Other notable variables + +The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables: + +1. **Common Variables**: + - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services. + - `APP_WEB_URL`: Frontend application URL. + - `FILES_URL`: Base URL for file downloads and previews. + +2. **Server Configuration**: + - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings. + - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data. + +3. **Database Configuration**: + - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details. + +4. **Redis Configuration**: + - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings. + +5. **Celery Configuration**: + - `CELERY_BROKER_URL`: Configuration for Celery message broker. + +6. **Storage Configuration**: + - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. + +7. **Vector Database Configuration**: + - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`). + - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`. + +8. **CORS Configuration**: + - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing. + +9. **Other Service-Specific Environment Variables**: + - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`. + +### Additional Information + +- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions. +- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory. + +This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support. diff --git a/code/certbot/README.md b/code/certbot/README.md new file mode 100644 index 000000000..21be34b33 --- /dev/null +++ b/code/certbot/README.md @@ -0,0 +1,76 @@ +# Launching new servers with SSL certificates + +## Short description + +docker compose certbot configurations with Backward compatibility (without certbot container). +Use `docker compose --profile certbot up` to use this features. + +## The simplest way for launching new servers with SSL certificates + +1. Get letsencrypt certs + set `.env` values + ```properties + NGINX_SSL_CERT_FILENAME=fullchain.pem + NGINX_SSL_CERT_KEY_FILENAME=privkey.pem + NGINX_ENABLE_CERTBOT_CHALLENGE=true + CERTBOT_DOMAIN=your_domain.com + CERTBOT_EMAIL=example@your_domain.com + ``` + execute command: + ```shell + docker network prune + docker compose --profile certbot up --force-recreate -d + ``` + then after the containers launched: + ```shell + docker compose exec -it certbot /bin/sh /update-cert.sh + ``` +2. Edit `.env` file and `docker compose --profile certbot up` again. + set `.env` value additionally + ```properties + NGINX_HTTPS_ENABLED=true + ``` + execute command: + ```shell + docker compose --profile certbot up -d --no-deps --force-recreate nginx + ``` + Then you can access your serve with HTTPS. + [https://your_domain.com](https://your_domain.com) + +## SSL certificates renewal + +For SSL certificates renewal, execute commands below: + +```shell +docker compose exec -it certbot /bin/sh /update-cert.sh +docker compose exec nginx nginx -s reload +``` + +## Options for certbot + +`CERTBOT_OPTIONS` key might be helpful for testing. i.e., + +```properties +CERTBOT_OPTIONS=--dry-run +``` + +To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates. + +```shell +docker compose --profile certbot up -d --no-deps --force-recreate certbot +docker compose exec -it certbot /bin/sh /update-cert.sh +``` + +Then, reload the nginx container if necessary. + +```shell +docker compose exec nginx nginx -s reload +``` + +## For legacy servers + +To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option. + +```shell +docker compose up -d +``` diff --git a/code/certbot/docker-entrypoint.sh b/code/certbot/docker-entrypoint.sh new file mode 100755 index 000000000..a70ecd825 --- /dev/null +++ b/code/certbot/docker-entrypoint.sh @@ -0,0 +1,30 @@ +#!/bin/sh +set -e + +printf '%s\n' "Docker entrypoint script is running" + +printf '%s\n' "\nChecking specific environment variables:" +printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}" +printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}" +printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}" + +printf '%s\n' "\nChecking mounted directories:" +for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do + if [ -d "$dir" ]; then + printf '%s\n' "$dir exists. Contents:" + ls -la "$dir" + else + printf '%s\n' "$dir does not exist." + fi +done + +printf '%s\n' "\nGenerating update-cert.sh from template" +sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \ + -e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \ + -e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \ + /update-cert.template.txt > /update-cert.sh + +chmod +x /update-cert.sh + +printf '%s\n' "\nExecuting command:" "$@" +exec "$@" diff --git a/code/certbot/update-cert.template.txt b/code/certbot/update-cert.template.txt new file mode 100755 index 000000000..16786a192 --- /dev/null +++ b/code/certbot/update-cert.template.txt @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +DOMAIN="${CERTBOT_DOMAIN}" +EMAIL="${CERTBOT_EMAIL}" +OPTIONS="${CERTBOT_OPTIONS}" +CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする + +# Check if the certificate already exists +if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then + echo "Certificate exists. Attempting to renew..." + certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS} +else + echo "Certificate does not exist. Obtaining a new certificate..." + certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS} +fi +echo "Certificate operation successful" +# Note: Nginx reload should be handled outside this container +echo "Please ensure to reload Nginx to apply any certificate changes." diff --git a/code/couchbase-server/Dockerfile b/code/couchbase-server/Dockerfile new file mode 100644 index 000000000..bd8af6415 --- /dev/null +++ b/code/couchbase-server/Dockerfile @@ -0,0 +1,4 @@ +FROM couchbase/server:latest AS stage_base +# FROM couchbase:latest AS stage_base +COPY init-cbserver.sh /opt/couchbase/init/ +RUN chmod +x /opt/couchbase/init/init-cbserver.sh \ No newline at end of file diff --git a/code/couchbase-server/init-cbserver.sh b/code/couchbase-server/init-cbserver.sh new file mode 100755 index 000000000..e66bc1853 --- /dev/null +++ b/code/couchbase-server/init-cbserver.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would +# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88 + +/entrypoint.sh couchbase-server & + +# track if setup is complete so we don't try to setup again +FILE=/opt/couchbase/init/setupComplete.txt + +if ! [ -f "$FILE" ]; then + # used to automatically create the cluster based on environment variables + # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html + + echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD + + sleep 20s + /opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \ + --cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \ + --cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \ + --services data,index,query,fts \ + --cluster-ramsize $COUCHBASE_RAM_SIZE \ + --cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \ + --cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \ + --cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \ + --index-storage-setting default + + sleep 2s + + # used to auto create the bucket based on environment variables + # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html + + /opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \ + --username $COUCHBASE_ADMINISTRATOR_USERNAME \ + --password $COUCHBASE_ADMINISTRATOR_PASSWORD \ + --bucket $COUCHBASE_BUCKET \ + --bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \ + --bucket-type couchbase + + # create file so we know that the cluster is setup and don't run the setup again + touch $FILE +fi + # docker compose will stop the container from running unless we do this + # known issue and workaround + tail -f /dev/null diff --git a/code/docker-compose-template.yaml b/code/docker-compose-template.yaml new file mode 100644 index 000000000..fc4e7d9c8 --- /dev/null +++ b/code/docker-compose-template.yaml @@ -0,0 +1,566 @@ +x-shared-env: &shared-api-worker-env +services: + # API service + api: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:0.15.3 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ./volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + - ./volumes/certbot/logs:/var/log/letsencrypt + - ./volumes/certbot/conf/live:/etc/letsencrypt/live + - ./certbot/update-cert.template.txt:/update-cert.template.txt + - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ./nginx/https.conf.template:/etc/nginx/https.conf.template + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ./nginx/ssl:/etc/ssl # cert dir (legacy) + - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ./volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ./volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + container_name: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ./volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215 + profiles: + - oceanbase + restart: always + volumes: + - ./volumes/oceanbase/data:/root/ob + - ./volumes/oceanbase/conf:/root/.obd/cluster + - ./volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OB_SERVER_IP: '127.0.0.1' + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ./startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + container_name: milvus-etcd + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ./volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + container_name: milvus-minio + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ./volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + container_name: milvus-standalone + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ./volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + ports: + - 19530:19530 + - 9091:9091 + networks: + - milvus + + # Opensearch vector database + opensearch: + container_name: opensearch + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ./volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + container_name: opensearch-dashboards + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # MyScale vector database + myscale: + container_name: myscale + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ./volumes/myscale/data:/var/lib/clickhouse + - ./volumes/myscale/log:/var/log/clickhouse-server + - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ./volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/code/docker-compose.middleware.yaml b/code/docker-compose.middleware.yaml new file mode 100644 index 000000000..11f530219 --- /dev/null +++ b/code/docker-compose.middleware.yaml @@ -0,0 +1,123 @@ +services: + # The postgres database. + db: + image: postgres:15-alpine + restart: always + env_file: + - ./middleware.env + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data + ports: + - "${EXPOSE_POSTGRES_PORT:-5432}:5432" + healthcheck: + test: [ "CMD", "pg_isready" ] + interval: 1s + timeout: 3s + retries: 30 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + ports: + - "${EXPOSE_REDIS_PORT:-6379}:6379" + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + - ./volumes/sandbox/conf:/conf + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ] + networks: + - ssrf_proxy_network + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + ports: + - "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}" + - "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}" + networks: + - ssrf_proxy_network + - default + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - "" + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the container. + - ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate + env_file: + - ./middleware.env + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + ports: + - "${EXPOSE_WEAVIATE_PORT:-8080}:8080" + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true diff --git a/code/docker-compose.png b/code/docker-compose.png new file mode 100644 index 0000000000000000000000000000000000000000..bdac113086d870f2117baee3f9d8d64600e28065 GIT binary patch literal 63694 zcmeFZc{rA9+cthztJ#pqR46JHDT*RPgCSB$goq3cri#csl!Q!4q>wUCWhf+bGK7ds znTO1oGH3esQ|npld%yShZQHy3{{B2`ZO`+R`?}BT9FAi@_I*E2H-$4Y)azK+Q79B@ z*^`or6w1mE6w2})YgXY;=qJxm;lEbtoRX2GERz4c%!=}*P_|NJC66gt2K{Wd(omXM zkQk{;EZNz%aaW9+O45z{SM^z(AFhjZHa$Ham%hFGi`-{Lo^Onq<^u-yuX%6J#HNQ| zP@#KTM!l&rwCvJRjbfz@=Qim1bj{D^bNF^r?LR;jBwRKyV>#^-B79qXxZ+vGY|w#W z;h-=&x@C0#{P@Uy-_1t(=l{~3Gfw<^TJoQt*uvw7{{71s*7eQ*{FP+50k0YTKY!^H zoAm!a`2Qy7pBec7zjL}@no<7q7`YGT1B~_yGuO6rb8`s_ZqnD+e}Aie`LAEU6jW7R zKYl!JX>I-S%NJ_T&4;9CCkN>^Y;g2qJHmPRF!Qoy%j9V9x)yiqOG-YWx##NYTHW`_ zjoxx`erh14BJy-#$J+{Vr?4>HtjSa1jAf@d$M_wLN>dE$60d$?{Zvq}jh~;++1Yty zVxnMvs@0o@P2}b2K%s_?)VMev)7!UA#=1}c=og_a*R9dryzc*RKka zQc~M^d8sBRC*^3pqmQ*OJoCuo`QYa2TFiUsX3M8X+fFEGYI?b_TSvG2zLcc1QdCrQ z%ZhBn@(xzCNmbwl6AVzhN!!&&D*`yu3VD zFRwz0g;Cn~85!#F-fyo}=oCEMr26hU-5brUhy1#QZLiZ>jt|tn-dnrc=QVlXf~BzW z%&4y_g89gucpYv#-9nFJ8X8+LkN0PXG7?&5xs#>;%sOkThXk6NXCX4csINX zymxe3O}mF})%QnJEjF~8nwlq1o>Umm`E3vS!V1{@%($n)_E8mhi_p9__fudV0 zS8>C?Ut0TG-6cADS495S%~kL8&(AH)4qvZ{^{aYyxjaZXK;X){J$v>%p()tvDINA- zzw!l^iW6HA8(B5WTJZSs<42`5>c_AiawTg<0RA)yJS1ij^MgQff zf6sV|`Hy0{@!rZ64vVv`Nj6hA;`W%f#%Sf(*Qej+wx8)@5}hCawu_JNg2;Bx&{cPx zZ*czkQB1+xh2r;mt0LA5-E5(vrKOEH5qklPkZ4%Pw?)jFA^!pO2`j7gs%><1G3@{3 zuJDWL>FF4@OsW4~viT1mIl{yC_}uapE0Rn)rHvX>sd0wnTuw-aTmO^cd<>*yWIAyg zW8XiXy@4a4nrg(-T@m6voIQJ{@0P2pk|%B0_P2G5CnZ#X+%jLjk?w z4dg`Q{GE8pcd2c7IKk|sq-5-;rLch5EwrJrqkVJ&m=R);mp>#)IX+Dqrri(k3QJRqY$-Vrc%Io;^uPNo0D4TeAFU3PP`b zdF~t(w1rnaRlZLFFY5B|3V5zqO>?<5+s<-!Fb%)jVQXi{f3=L+cDz#Nz4?Iu(u3Fi z`(W1F-DSsa-P-@c_LnTiOn*M=OhLq{ryH1wnuRDI>$fCKs&# zEt2~9i4#dW1?wVCY~xhi#7N>P`8m0%HePLojg5_@wDcbQruD*1Pm=k-1-&a*cC)Rc z^D$V$Gply&+<82#a_hEj$NCidgZ6sXTzq{ggE3gd{GPk}`h%?0_{WPw;*FW=h0p%o zarpa&{oH=qr*5}gEV>w&o`ak)Xu~RKpj713@aFyd!&J=sDeUa*jA~kHYApV|YWLn; z$_T|EMly657u+InipbAEP8eftYJr&ZMVoSasq z-e@+coTzpAP(J+q?KQ*OgDp10uYJYiJyV7bxW!YGnbZ4P=()KvT$Y}ML@TY8D_34` z%@$il#jMa{$D(seH${4OYPcv-JO69iO)b4^*SN&Qny+TR3&{8HHnj8dTlg5HL^GzY zmJ&RB!iC1!b?d={?`Xo_8RL+AT$rD&j5^EV{`z)V06#*!G#1>Xzj}9gR20{qJ?j#( zfA3is^>*mpyEs-M5oR+rG%_-h8*@JD)=-o1^z3Yyj5qr<@&qsAQ{==C7Q>3z)CnZftnr@q_PYnJm0w0|nW$g>m1LiKP;0;@ew?X0Tp*yeq_Zi_{`2S0Bcr3oGVToWCTEvakl0>l z^IEsai{dD;Fnyvi)nZsP=&;FhB%NBu^kum%E$oPD`Ir)NJlsF6q97Q(H|yKTcj<#( zl6kG5;g_3&hOUiITHT`GEbV>kj*gDDpFcek6ZhlL-A+At!BAOK^Nrb>SnX3|HU5PL z#+cB=2;rNrb-OU2n3$MV?p59W^+{Y?*w}m?KMtL`C?~fL2Lu5!#TWx!P7(c8%w=7w zk~-Lw{!M3e!I7UY306s`wkj6FsxWbmHX_#weebf)xEnbR)75VjsDG>Hzb=Q_T6vEvN5dN z#K!hIZK0z>hq6DtwXN-PaF>h+^D>Iay#7H}psU0@^%lFJ+mu@3-#rkln>G9D zJRY@%n%WsBR^!s8%D30f2D>;mv*{Wb?0G=Nau3+apb@xdUGDpI%)iVuz>R*QC41#7YZrnU{=#c2l58t`584>^I&mRH?sT$dd zi;D+{*$8=U5x$q!YQOpW_wVoT46=&P{Gk6?pR@wIsJbs9dFzedmO*=adkU}mlgWt* z=ciAf<{i&Wi7gw>@U|&rr}z9k8e8^U@Ki?mk>M-zo!$}#je&xOk;XYTT0TBLcYzx$ zEiGRnHd!Jvn#}(;50Y?TuMi!h0_^-6Dz)BY^C6mbnp*rv0%FyCO_Y-h-%TO>>E3NR9jV10o%{YXVg$;Kl`j;JMGzI(<-Qu03K zk*n((Svk4K_wFtKo^6-${{8!6obuSWKXHx=N=r}8&CUIY3bB7YH)ktsKlgros9F8_ zp__c=<>l7%7vAJVM;HIJr{xgmy>a8lMh1rMh_0NRiW(Zc%^%EtZ|2O`MivmG98_rJNE56t9btW4gmqhgRGvs z7vB~CG*LC1IF1C;r--IvxHQniLI4ghVj6X_&lR-g2+qZ4Y zyX>)T+qPtYpmeLT$StCl3O@%2cN{s=7@lri!u;{$$4}+uXR5w^n<$*br%Pj%o@Jl~RpiLfL$A@yVHq zI<1J8FOO!=bZ=g^b^iB+FP3ZuMaT{b2`xrm)vc5tDRr9P)QS*|!|^%HBp3ET?U2)n zFJ~sbHd5_s}CHF;X^e z+7$6E8@O&QHMJm5?ew(evaKBU)2}o#?wn43`?d>pA3ZjaRj216v)}w(Z?kV_w5C@G zMo9R?N*JCm(x(~HdLKM3crwpv^?J(Z^77hL zF#z1R_zt?`e%<&eoVDG)agrDk4IA;y?nXV ziT}p;$p^MTy3ZLd&+&@097b0^hyexeQu zgY(?EbBY%)#$L+)aNOeJ#pjeq$SnkVCSB+nVzI^^n$IZ>XmX@|IbrU1w!?d)txxt$b^h``l=_Z{ICR2_%t@`|{ ztMto5LBtsA@!nTV11MoWRaTw@C-BZV(zcdUndyV);Mwrs$J2Iq9$bZDBUk+?R?`cx zjpQwW=7_vHo)8hO80EMNl$T?Vf;b%J=qTeuTy>dNw+Ef<A^tB_D_n~1S~{DXL{9Cpgr1TBfIIzYIG0RhaxUB9P>6W)5feR}{n^_-H@ zeT0l>Vl)C|Ir|IyySpEp2^Q&lw{q30&?}|>hfRO1;nT{FpLzk{$ojLWnxvB7lY{OE zi~!nH!otE0lZ$gb5|)_i_TFAqW6?6%M%{s#*M;d?HIvu}5mGl_AN#Ow-8!=yR_E1u zF2v`CO1Y0zI4sa7WKFW#+S-1NS38h7-QjLI_T^9#3OL)*0;YzptjtUb1uImou5v}a zOTfRQy|(uw`<5ILB*?c=nnQ*)_ldlw0 zU2|EsVpZ?E+26lAFgHi+X9o|>&Gdv&97jfOZQrq@9oY91PDn}`kL>BwQr_%hW-T|b z2MXy|{dHDi{$T3OR*oO9SLD=9di)N9wSb@lK#$Wx22V9qp_ zS5Vj>cj}Z$$EUT%fxAP@mF4>qc1C*mveFRze=;sHaRUmT=t;Mh7cX9jELxbFE~gBN zeUp=udlVAlE$zw5GRN65IB0yqNN~a?3L)EUpq5%zR@QPTz31|^D2MlN$*ELSyqB3N zdeESH4Ius1eLbC>D<}=VMz@QO)x3?Ex%f>rZqHWi1~G~w&}DB{cXxMW-PNHcA)y`} z9Ua0$0Vi(E9kfXu0voe0dpb0)>zQwAY3-Q0=k1)x!At29oTWUwci(;c_H99}%8U~M zCKHSewY9&JC#E^bi zL#?(>gaH?CQ2Kj zhrlbno85C>_e*JMKFAR|X67|FZ{8d8iUHm1@}Jn^IO?A=|`tzVWgj3d$1Pur>m=`&6T-A#(=Wm#=`C<5Le(EoG{(X zK@$ZWxJHBJR=$$wBdnicB$iWic*?M$rS8q8xG`oUzreu2yttSzSIRyN&3V{>L!(R& zrnQdy#~H=(K2m=ek)V^nQ!`i^8XEdFR+%SQx8uD@r`fs&|1uLybkhg(Qwhxht5X8= zzkg=|;dATOt)u$-yMIg?-ri5Ao59jnxT3=Q2FDO!0O zV1$~Ic)RCJR^6S!hLse{pGigUjDdqJ+wQI&0aO^!xN-@Msi0x)ON4CewC3p70CZVA zp0CfD&kC$wy&B&lYa_4m_A2$xGeOa5K@p~zQtpfmJU@A^fH(A(SlEB%EN4j!P@!s6qpatCwHoRXDYMma1Z5_&d7ygpHT1rS_$J(ljwnKP~rAAW4l zcj34DZH59bcd1~U8>#M+N?UC_xbfoZVy#E?*qPXI`L)r>%&d?8+%TMqFI&*_LTYi+pQ#!ic z`}REr0zV}$?~ym9G~pNt0N*FJg-=ZE(({A*oTvwWeLE{5Yj2;u<;WfRkNNo#z)n}i zHk1a~kByCWy>elHjC^OTe*E}7yjIEivtki;6!0F(pGJE2{;KmGRDXAU8)(kQn-AS6 zMltyV9MeV)4vx>o#b5t}XuE*#A3uM-2W3QP<#oOGkIq-U!#;F*Y7f#n-*uyn)4g#) zq87MhbbR~?Xz#yqxa@xgw9(8w&jheH(&ez}2(V!Flm<=qZjkyPpkf!e<5%Nz0F@bo z>oy^WO^yTS{&-lcIf5{5Imnj++G>& zDCp?#--8)3?W@`mJhkQUt)pOSwuA2QD}B1Jl>E-z^k_$0+g23RVFfyI3>!9V9nM+I zE~w*CbZF(0l)TaOej5@9dC4EeKC7Pc3l_y$Gk5VMWO7mnSbB=xCVTem(`_dnxQ2vi zrO<|D+Gx=+GVVmIv@UhIf8P}W=8XD<3;yp6>(1aXBE#=}92m%hG@g7pZ@G!Nd5y3p zqzbu{CtbG)n@T=(ci+Loa|%VO*s`r4oHx>Srs^vyKBjD-h|nhS^71l(%Ure{WYg3#oQsboR~vJRn=W6!XIPzuBVWqr{%ZjOuQ7;O^frh>uG73&YnFxgDPEQdh1gy zd$p)Ruquy(gI}i^85zZ&_DqSb$f&~HgLso}008vz@~T3$$s%GF9Uc*(oDmci6?Na) z`7`$Ymlpd6sjDf%x7v=He^YTu3Ay!LeU0voey0 zH>EK$)=He*;ezS)?r7PTZq9-C^f6m^()>@INxYD{KX=(VV z= zMK!IuHlgG=IQ;^fN;b!SJ{-ZhV`zvE)JBysjhdXke#~KE;m;_?c(ZT#d{|^RN!xus zat%co5#6tp*|&3Xroth%%!H1VyKY`y{)~GslD7e;cgoZ>*{rWhDr4@Q{+|PX2yV% z&mw01E_0?ku)+9i#3`n%36&`|6>}qw4;@cgg}M*<=)bV+Slg6oMGtnWw@el0`_BIzhlcW!H|R^NubfL?O_QMEH7 zHj_rz53(XO{l&CT^FBDbBg2_tW~#~!$c33%S)+s@{qkHelhwW7`6A|nflb6L7X*Sk zh5&v(t3Jh$33U&q%k`w7Wi1>8=D?fPSI?B8Tne{02uiq_Il8LFvXeR@f4`iW*}>4T zF#NL76IMJ->D8-O^~qP(N1hFNW46nD4;R-8?z1mGVO^epvBvYH{^LQ2`S|tK37;oV z?!S9?5M1VY!uBG3GUVi-Fn#{7b0PLCcfRnE98xGwwYuE9-`7bwiTj+OcCLfcTQ?qOAR|>cX{i=c>8{t)k>BrB7%g&dd#H zEgn}@WoEaZ*8V?r2|*%;SdU+~of|FsBZjC}qa04WR`FuhV&Z9}{oH@$juQ2oSRwFxIl}xNPI>I_|A6Jd)yw_WF>BVWQ4rbkUq4U; zw$!jTJ`Z#(plF=g;j27+e1({MBCz9gyU2hH{xy8WGB7i{B0X>k2{Edt8l4l_;_J3X z)4Fd*&=Ct7h_KEGq)Q?k;tCS0HCcxUZrM{83GRBk>ar3!LkjJ_V)E{=cI55Vv(O7-n(X!-N0; zbkHt-v`x+>BJBNp=6zY?8_DKHKvk9U32fzxKLwac^k8gf=v&Qq5#heNy6$58i;nKu zP1MwYtBN92oKk87eN^i&>y^d}5-#YpdCaSxvzK`G?JGi58$~40{&;`w^!T^4P&?wy zR3TsF!~sj^X_35Pw1T|l{qG) zy(;3AYT6ApAU0vlS+c&C!@S@E5+3K)t&V97oN)xgbBTzs8aKSlQ_-?>24}~!Q3g<) z=%?6CFENd(8F%&qCd8fO)ti4n%i#s&z{ST$Qz1SZ4;3+H-EbvhiqiVkG+QVXUbW=E zAe*m~IiaUcCI_|7;lqbjC$d!CjPjCni}sszNK%fTJLmJUaznF_<;Z0ss=&x01?f@K zvVA2Aml#c5$o9Eh-lF#atheD*yScgXnx6Mtw|+g5gekemYU#1Z@CdIOu*UNj^wSrWonAdB-<=>qba)C zrLdr2v^`sL=hAsDmWQTwTw8maV`G|v>Ibvk4_V=ZVVXV4kW zVVudQ{iO1flLaVg92-yR8Ly&c_ZS~HBLrW=JqyeQBcI0gB%O{BhXpsQvF=yMB)yGk z`$|enF^4pB;Fk$4C-wW~1LK-l<;1kK)wgD63s4i_MNeL`^d>s|fxRHBuzWR)4G@d? zbv~_`Yh9cV`RGF@XzMNdYYh#DxW}(wrpAq^!CWCgt;DFr0FLwFfG}$SO#jsQ^pP^p z-n~+&VoBP2t)BJ~5*L2`Fj3&RUsx_DsTZ>ocZqBn%(Dx=50kd+h{e#S&%WH1Angcm z0YobeSgq<|+0BURl8^EeDEX7y48x@rwAJPF(`DefDug44p= zkH)fvioCKRJ${bv`wLLwT)TFiOo)D6dHtQla(cClIF$q)gjrC9yAR4uB|-C+ znVFKMQn<7`)4oE9#hJ6g9HWDSgFermxj}p1&CKFsy%hWP0IfvY5i&)Lfu@;-jF|oR zX}wWUlBCu;d2-F;$B*Bo$wpTLsQ+xu(Rvl_YIMrPgrD8&2VHn%WPcaut=Y*PnN0jK zUqDuPrL|bEmzWz_fqm!=`wasF!^@X12{Cy8=~E^sLv4^TiRTF*2MNM4BO{~g)%g`0 zHg3H4>62tI$2yFUWE>9;3C+A>*XlnB^#EpbJzl#(VsEBza4_4M7ou(`cUN+98Pq@a z^{q@XRB=i`O|(ryBBVet)OtUVJJ`;$%RZYa@Km+;^{G{yqpLZSpqWM7Ob}o$^eGe< zYGm88z-yDxqLmNa)eeB$NjoDc-I2cw9)Pl2aE~}i-2hv}1Sk;O2naB^KK#%5&j-b~97Katf}g*?4p0RgUokz=uB@p*$3_kBVoErkLBUD9*_v(b z^{_Y<*ku(QPj{umV(tiwdpxe*iC~6m5w;2y5r*Vrt+r#H1AvVEG=VJ`bnUG8mA3jw0#acmgl9;qq^{x5QA`UQs%6=-hSEMawT40i_r_#1_mI9pb0VD1N+tjk7DO349 zo0`LSf?~@D=SRoF5@WVY&*?&f=A}W}8P0n+r0qyG6U>`@M3#VpUMNI59Dxh74wy(H z0L-zVEL*>P`EpD1-9FC5A44lTLIJ%IYEk+ZQ~<&smX{N8a*p$)k_@!G{lKYudU|cW zm0`p}1qzMQB+l4k<)GFQ7FewZgo-d#M4W0sW1GkEzDlzv>?1%h)tope*?(c;tTge$ zeZ|gN1=bF{S11Z3VyF-}a3JYg1-lUf^1{Nx^_I-^D(#JeySSu1h((IhwBSzC3>CX5 zGdN<)$?w-ppa_wTiJ!xEzYnt8jvRC5+G$V^k-U9)*c$(bicZ?vZm6eC2I&a{m!g&YH@Trw^%cA=a{i zNCoxyb%s7aJbb(8=qGvvd>MR(!NaTg&Uk;#dWbC3{c#CE&BZ(SYGrQ$Im*eWndyos zvWx=y#0$~-B^W`%t1);zioP0kHbna9QHuTCsLc3aqY8XXiGM1&nrYs@IID`5>`C$v zu(+~PT{!HSL5EF)>R0RC8Osd^N$ezho}^M5qL3H(`s?mZl(xY^PmpCiSap5!$Eb7R zI8h>?JKFzG%2(P?(6$%UvIXDF%+h_WEQ|>k+cn*p}*gn^(jKoq(U|%DEKXsFSWzm z=xT?=ZJY*MmvVXXBd9qfEEHE%;4E?mw)OV9R#hpI2MOars+pGjgIk8)nAw;sH ztg(`?g7fU&-(L(B8#&VF(W93zEeYj;5QKL{&gGciZFnG1RX*(Py^ifCpnmZpE6EEe zx%a?{M5nf9u@>TI@$u(^h7X`)wF43W8|u*$O}xGRl^L_7P|w}f?70Q*OjZM16;nu3 z2CHZJyD!QQg^(++Bo}9>*#_+3rJsp*oO1X8H?=q{+I{-;3837^3nS*_;aQJ8j7Nuy zWSX!2+oAp40~qpUXy|f;rE_>~F(}Zyru5JLOeewD$!iIbe#C;0U?h-Lr0Sqk+wc!! z*P}=L^nf`={+WOjS<&K;e^!)y*kA;r%Sc+zJT>-q0O&d5{qk`hefYOqFZKeQ09D&I z)HHl_UJvzHSw2`qd1+hF{8QH`@7cFc2InEscE)sR0+GIm_cL?6>eR^C*m(l&V^=I$ z@@nov5g@+6!%+3BUTqM(?yxY!ot&KfgvKfQm^#4ZnV`dWi`Z?5Wvy%yEJ7!W>CESW zuF=c>6bCd1AxK>-4RPP( zE!eB%L&PsFjjC4tCMr@o1fx{ELQOOhFp5KEp!Yw;TeMf^$j9}>uvFs5W0>*+nHNR+ z_U+qMcP6gdc70dS$`^kd+^FE0%o3%Zqa&AN3}>V;qJ$6!#4HnlqcJ0;Ti9K31jtgK7vxBshQ zLe>K$bz2z4uO^Th1XZPrlatfuVRH+Mv|icE#yb3dt~g1d7bG@TKlpEkwd-|n*oaFQ z<)6Z}bi^9W!q(lpKSxgpgs|F+S892D;X3haY;1HWV^{}87_v`RxnUVs_qm9MvZUXM zx7v?y5iwh%WjneODg<`XvoEY;tpNf3Iyw5DjiExkDW1G`w#qzz4;Ln0SCD9z3q4qN z?b~-~X8vC@8k^*`<|Oc@ZBXvOubj#nx8PQ1jZ66e2R(&2f@=7B1_s;#^9T?UeB(Q# zxB1UBQ<=bZ2d~9L6y*Kyut`3PDry>ct6l<%_y}}NM^Enr36GqOK*8>uBz`sPrNO06 zi{&OSMcG(g>=1SlvtGx6x`e>F7Kjl@=UlwJ@id(0Z}OUhY%_Q*Kcn3n6ZPUF@px&J zKNnXqd>HWtveau&VTr^GRo4$rhb+!;;GjxIH9r?WOa4}EK-KU-QGWw0+-tyeXQ>Zw zi1EY_6#3OUu?LqNr03YyuUT`{)>ahz`RXs5LpPdSq0r4-WQ3JnqPz9dh#Z zr3aJh1&_)W(5M;mT_-eev}C$rxeYm{O{YSpyMPc0bAkm83Jx~G3x!$?w-DnmP*<$S z75MNP%nc5*5~faSc##?44)`KX#xN&`f^ zO1e5bIw(hHZ@oo$i!ZTUO~vepC9FQo*X{dj#%yXuG@v#eE9*K)2;de%5mM@N9C9YJ zEQ$i)Ji}fgOW%-+>ZRX!F}6D_N%yeBLT34*AS*S1!b-|69-eRd9HN$qc{7ozq5_O# zCuc+j%+=MtZOOQ8laZd@j$Q>x!od&o7r|JP1WKLv(g&q$fpjtFt}L|L&GC2FE1^As zL?#qoD7>8s%v-U7|7MG5u=`k^R5oV&}V?A-;djac~&4pB}os>_p?o=4Li{ zimTNYgVP4<;r|9r3`bCkF|j0+LKR-Ne~9_&&(yLnUj+LO#NOD=>< z?|xpJ-{blSeH&qvn1LzwT@s8f|9H*#|0iCv=)o3+A_Lc?u~1WC@bPZm-cM09!b9PQ zEea1(GJN`Pi_SGq?q?eN{NdMb6r)W|O+SFv>n2fr@Y0HnKY|J&3&}5!8RjWHjKCCa zD#Df{FHr=ag1JX_tnvvzR2R44;9x}20nb5v`YANkz7+TU-T^Y^Au6^sgY@Uu2IOKdY2RTZRqNZkfhOs;(o@)oLkh7+Yw}Bjm=(BQH`jVmv=Gx@ z!ysXN{cU}4#5eMu^-RR_`f6n~iO(NJLetyjsf>A&%+ctf2 zF>*`3{CEFITWMuuuJaNZ{ay)?>~S?7OW>GiqU=CL?I5|=NBvKx& zXX3lw7<;50S}_AfgxewV6X&lw+3&$xkEJAu>tNYd;sr^yvb0=7`CL(Ph&e-;%=+tt zklmMUh05?0c;O^^Ilg$4&?seIDz-Q`w)|+1`~8^CM6(4b05DAY+Qy$w*2T=Pb_ObSkZ5IBVQ-m0g*`-F5H>nPKzBBm4J% z<#Zl`y5|O)?WBP!XG)xbR$cn-YfeRxa4%IyPb~CEEYeY|Resf)WxnRyd3#;6C#&G& zhY#0fO)-lwMaQ;+Gk=(sm4(_+O;R#@eo}pq)}Eq=jx0{oj9o*6gRkGx3QRCxJ*nJS z^`xTsiJiXke2tmy>%qe{_ofKdoqQbKI$*b5%IOPE!N>%+44w`=-ru}92w+8V1QS*F zexbx%@Q-6@XOZb4q{86EBf`SV948k{0OL{H5F-&)OKC-8V@6@Xm%gbpC`WryG$D8q zZ(CmZnZ<*}#Cn5(6WK*dX$k^f3ad-R{t%DztusHmx~?S7gVbF{(MUIy8Wx1ZGfU`p zPgs%_&Z`qP3F#-HI6{gc9+p48r<&(7g3UQ0R(e6di@5~Llb)wqTxqE*B&;v08eN>k zk*TSKiQleIE;MHk0hZSt5gJ6{k9;Un+SS#ijh2@+R8;rj`@6F!3MJN<;{mfsOD%)1 z@Sfpi_GuXvD`N*NAkqWt6!q7{Eu#?U8&MGe9}(nq!BkQlfv6c_EB`v=Bro>h*@wNJ z^B%D}L)R=0ox)Ca{IgS`jNXAWyf3Is7&Jp^>3SL(8d8ZiEtK+RIf&UzR%aJ|b<^$o z@W+-lFStl6D=Q596V5N8qq45R_WywS$UcLM zd?>~!Z36>_Efijl89gii;;8@4%amOf>|NOX`Tyc&+NR`b1@L&Ekf0!P;!Rn)eY&0n zk`OIuJWL{)!8~N|WAgG;qy&UopLk(_GnuhwaNr*&(>IhNVR3$_2f-Jag-QrBK!ChNi%|GH-4CEGQ zt0Km5wC0npBnF22pf8x&d=a3Zh??BFQ%IJ#t7H@D5{$d>`eV4Pug61~y;kGD2NF6V zf}r`z$ggz2H*Z|3Rd}lKDy6I}M4*S&)k<&cY#Hy^%iCeaYv6CrLo_B#Zmtw3lqob&%Xc7!nDmh)#y8MG!1JzTj;RYWG?(J z=Q4butK8)b3qD+{YuB&$wHZn;inlQX);R@{8Rh#YovBdnH2z*l`afQP zr+qbXPbrViiqA;mAVwr6D&X|(fIz}XNk;Gr?ut8dgZZ}6p$Yb8(!Q`|3olSi{4b5% z0Aonx+hOhh*^;H+sHv(NZ6}@!AUxi3t%Sd<+IY84(>0|UA%Xwnz&gikq?6W#7B?v! zozNE9hG0`uQ#}+w9mB)>hhb<5^gA&eX ztY=s5-4zYKpGzE&x|rxH4ToEEawhGtECTTS=r_GjBIXsq)hl4P31lILDZVu5a9I;n z9O6ws?6vG?t^}vzfXf2Kwj?8!2?<{C$yrB-x%XlhGpNdpHV{SIVRK~Ov z)3)4YSu@7P5G^B6^Oypn(`k#lsj7%8$Ic1Z{`w({-=_z#MJo{aybZGQ^6_^XDGzc! zG9TbL{l)hPw(glF6Z<=3gNiT^z<^XfOq!N?Z9=a;|FMZ zKWuo}>(`_2uwkr%hg&k{L6?&GyY#_~f7(%yQ!sA#BF||MXq0@~%uVU@Ck9~;i2wp8 zHhctG0XPP%z#ydv*TIDoMo_cT%&kuuQyj{JhlVv;01Qb>Y~S~{QSUP{Hj6UtzoY){ zW~=5!81ScVl>)b~rwE&L+|Qe0v7tI}N4@3shl=P}W4$vDf^1r+g)hWClm7H+^REqb zbUp={ze;#i(O->KqhnzBixMDWzK8TO#>eyGEbn5~vMZ=c(v?A$+{VQfR{lHMZLep_ zHfYE)E;MY!m!N~NP{!?CeUONb?O6Y5nB_LPmW!Tx%2zNqGd2nmX%E{&sVDyaq;h_5 zJ0pNfRc~b3UB3XClElct8^%O`LitaewIr26!FfXCR;+_#bq_o;=>PKAUs^GC8>gK0 z)`8YeNku`gp4(StOmPIm|1Ohi*%x9{$3+9)@p&i{%= z8+0Pmli^f`t^_sX1QC-1d!qnbaTF9irCHK|wGLpNH{>V(REa z-bOVT6tU@$o(v+8yEjQnhPy0HCEYovgHWSLKQpR=W5~5n2=!m40H;3AaXUJA^0BqN zh{$s1PUcl_RzA(j(|Y3UY@{PN`j|DS8elGWOuE(*QBAezopr?9J+@_W^#bdp6!TG^ z(<@D+lz8hHq*gvvkdz&_IbSLFHkskun+tU%Km8;8{4}%tBlcQLeOWp2q0Rr{PQDF% zjxXOQt=Sd$v#e*zMWS1z+>uHkh{aNj!`o_C$e3_XX7{at13hBb#Czc6CQ!-#cON&p z`S%gNh`gK9>%!6IBZMmtEmBoeBMqkz`X7rx8PI=~oV*dGHXAX7l1_NUH~4j4S0`b= zSB!=!;6p3E1{4kLtlEKSrF!wA9GerQkiH6uMM*p-37jF)GKDA|OJBvaM3S>OBD0kR z3DeWEi#{UX!-C!lD#HEZ`HC{ztV40qGbbAi|Fq(6K_9jYsn#l+{_>Abg`M6RaXv48Q>v;Jz?{_B#D6H`+kARSioyHagjk{7Jc?DQp9W*`m|77YN9T;xDHlGr@a zMXI2q!?}u@^{?BvZxf3e_N~9jMxW=8A8#aNJK^U@cNuX2K};_IEDQI7&bR8}64GB$ z39}S3;pUu$-#kz%UGT3inC&g)_oHdR!$x==4X0<~)5v{!%!#zgB5*^~k;1F(K}%60 z1SLYDf^RN>=aBTMl4~DoYikvYAj+-SNq>Az%n1@s@BHoUu|=CD5s;yja+z=EbUFtr zj9ttcG8hB&eSZ;}b6MZhZgK$Yx#C_59T@U>Ss!b{cb!WbHQ;+WvF0wEjBF^yZ{4}$ zFTzSkC%Ey?dM2XiBJFzp%#y3Ffqk?@0X9lneo$RM5uvF(U;5>X6uPbGwrueL`}PqD z3$HTf)CNx42Q5(KLvX$6HEgccgv{7!kdU{F$<|bNC7!dCV2i(iyUNYf_0PAq0 zMbO0w(B{N1XBOmIoVcU}SG$sC7x)AclahQy*7>xMgooA-56B5&(a~$jJrEIIlH*FM zKX%3_U&_h?!-y0@ZgoL1aN_7uM+|%XF#!C++}K%oBFEh9aC(|oos`t8w>%(+6DIwW zNB{^NBZ&4Lz~LTJ`J_zc$4linB|U{nhHRo9zijni?+Hg zIexu#WXr+T=!HkW0-c6Ia8?yFjh3WwoFV4a)Kp>!ij;r09IXQ}opdZu?%)5* z(#oo)J_^DPY)aQ(N=iO(Uy6)^Ixq%6USmhjPJ%%gs&s zvaJ22Zd4hP3sf@hysqYtb}PX(9EiC4Q73~W)P{$N+#d0FHx{`v<*H9$G>wSGkOvaj z@twZ$z2%VD~VBK;zf1+TN6Hw;8q; zbgUa=@lNNW0fLMW*D{cP-u*&CL}`YYO1E+2$3PwW8ecfw+F>C*0i$xm!DsSBbjco~ zuUY;d!O>c7&D~8r7uv}002dOyII;eiNSMMdasdIknFjvWTr`tw!@dJGf;2L7$$06R z1fE=tLUN^rg++>ubFrQPk;btTeRi?o|3(x_9-w~c1@+QxN5FH67 zBBvL<7cLM?tNR~Zk!*k2EBXT~eeLHtX?$ zI$9z`GOTk>ZmzFq*4EbExRXnO_m5`j1T#%Lzz+3?atv2hU46i42B0Sof26Ii?}k5v zyZticH=K*g?;>vFH4u1VCZL5EA|_0Mp4?87IZ>-lFa@lfl^GT{W4O+7L3+3gT&)ZI zOnOzwgxt76gS}K1BEbPm0C6kCvF!_RHPbD5vfDuA1`oqJ7?0$hxE7Dx>fMOb67>!Z z$G8wgdNA(62S!FlC}l#uW^AxFfQMR5d~YP|?%cT(R-jxMFZSyeQU$*WJgN2M$&=io zqSbI{kfs^<_V~=V8!U%UklOk9T?BoNC=LU~Z?^^;1j)SNP6@OY$k^M9gHs_+Qpl?w zTZA8T=+VpaVp0M2qT}QL8W}Md!ldE~3b_tF$p=f;qR^;YF!PAc{(MWO9eKb0^1wXl z%mfds4cDV(U&K>Zy3y z{tQnf7Ah?oyLaZY6fiUw{3zb^rbixb^3;2;;e|_cl{}nhx{k#1#VrlmJw-BQ1%3 z)g)}4ctF*z{9S1nQrInGq8GJNv?{m*vm5%E-NfyFLnvp$fR931v>H?7Tyb7Ofr7FS zaa;$j7xm~{srGTD`g1vqIl|(5>{twsU&a|5qdSTVgLTr8LPkO>z1p26hbUc%9 zCI@T}4^KO+CXjt~U{ZuRzzK>*HNV>nIfQJoVd2U8+~)r5xa+E%3dd~+1O#GD;EQVH z$L&nei7q2Rv(Dmwv_Vy~MD{VK^I=`P?JrUZf!o9iZiOYZry@iKWQ?e&=zhQ(D%OK* zva_>$o6_G_o4B!R=!)8(Mz0&fT|Rt7aK>?(2U$;qcXC&LBUVj=FJI4gxb-)l+Lu-R zveQ)M8gLr3paBDzIS^s{%(n=F=7q;n;@0$tHhC>zAHXxo%a>mwyY()jJ+uHwE5b{A zKl<@J*3$319+V1)61hRj&(DwCd4#~L+;Rf^e><8P3w^j`MhZ9IBz_s{O@UOoD1)D@ z9K@@%!BJCOTI%%d84EE_BifKVX$ZNNvs>yf_8h9yr;E-)B8Z>lL8amD`+( zGiyI&lo&3GWAf!7Utcbhjqq0)K0uhdQjRtjRaMpM!xeA~xApa{y?uM^fh(W}@c@&{ z0u+nB+Q8Lca|XoKM^KP26?SdCpNpFmNYe&+g}%N%a(@^B$s|0(@BhOlcIO*dATSSy zErzxbqbJa28*CdC3MzYY;|}nXG%^94lL_4HN8Fyr7T%d72M>I2ZSRfaYTrhEqqB*f zk`z@$PTA$n-Zs5JVZr~PCEb|G{nrlk0ZNWXrcp016K z7{#Lfs^ls!bRILTTZw-hU9>9gQBa`zbA%n43i+DcrG>O!UYESH8{;Gh2xTa?=&kmT zE78CM1c$3jUZOU$989SrMGjH95{5F4g%7iKj`E=BS^|(;1r9G|1|LnI&w3>ZMk-+l9IpG`oijkW4+fLs?2X9z%vf_75H8nTj z2GZrN`u^57bOc&;R!P4>mm<0`&oRD>d)NqT1%-n8VS0MH4NcmTKy6=88vM%af=m1y?Nio zMVH(N2hJE1;x2rK>!rX|-^ObFYbPKNOp*ucboh~OfcG>lq(gtTEIHWF1MGb_uF zmWk=Aja>Jk3gdaJLS1(4+o*m;y|Y#Ylk;QSrip4en7{vkkrQt1)Q8 z>tb!l-A~{ptU}g?S{HUQ(TTaquh|w(b4>EVv7Nr7e4iz)A3B6A`oh%!A-{l8KZqYiim8tfW}V4r)Gl*nDl zh>6*a{acxBXG2I42pEb|Hx%!0KyMEtG__msv|+I1+DH_kQs|!imA@8}8Tl@(cTwxY z##2DHqgql-h}i)VW&~6d39nn87elq%0do+DuB?lPZf^fQ~K%uAaZZ8 zvd$1`fDR+8UWA0uf)r%!p2WdJoKK==)i&)x-VUa_!+Hz!CN>Z@>%PzO*TFbFD zobv@aXXi;{1u0&^xZ%a6<>b~8p-FUiV~&Fa0j(x-0)K(4tAD3YPj2nQJrQt%FyfAb zr5mLHGFW@CUG~BALZKw&EQp9N&fV^+iEGf*F8j7p**5F7#PK!c(lk(oXbw|hOhVfb z*}Pyxc2DI19mFyuG^Q9v&E*6JZcYf|Hdt;sCtxIMulV=DlAO3c(w4^z%Z;2m+V0_r zfeG9L#YJl2%!(B&OnS=MK=y{A;4YCttB~J`Z6oWdBJ6BZx}wu=}-vKKux+QSiky?)q3x0m&Mg!7X|E3_uQo(4!YG-k1vB zfom0|JLqsn$O#n7!!NAxm9XZo#li7l6_iseM%Rki57YEd6%Sm)mjd0NlP4*1Ra<)* z;vsqsC6b0={x*RMPg(*T<|ml$47YefDn`0mLBUqPkAIbpI9#~E2xG6d&c`$>Pg!zt zBcM4UP_RsIM)SSkr0x1LNg=jXtPimP6K^WnS5RxI=>l2PVMoO*6D#fe+dVYIzXia! z6$=5^!cn{mfZ{SyTQ)Nb3l(udiPb0yaV~OUGlClcMXCQZ3(7&CZJjLOaOiy4t&l%! z8t^Hf7cac*Kzz>-ICm}X$iD?Cr0Woc zozxyE(K?-(G;`PQ%X9=Am0wwzbeCR;8uL$J4>2^5(hBrRrTcLU>{dknCkeF5i{c{T z#n4NhaNy>;tSm9|1^xZRgf>3h$^n_MwlznTX1z9SDvb2yHOUhFA5HdhDr~}q^seB0c!r+4*Z^HFJ43!o)u6*s$}?j(P#&J25EV8 z{*UG~-2HF*et0sB**z_J|Y6dMEN4_|g(cQ=>n@Z^u8pUT2QoZ~1S*KxEvazW{ zz}%c=FMziN#l{K4J?hD-=@`AbP+CeW^X2hw_UYr`Bf!= zzd)+VEyJtS(8`QNId*ts7#i9X$D>10QchN~p>6f|?*>0V{QY+m8g-V_ zrR&#&2X5=4udk@`-rRYcK=qw>(^g)gRsuKMYHAfTN3FPk4H|B8^ytx#kRA6|`z&0z zP>^^OB_cus5RkNNLi1SKysW4kvp=|h5r8*8H?vuf9zB4;_@*(WC)Tk7-2fTRyO;Jd zG*lv4Zf=eEOS|^&mA}!ufB%4E$2>x>W)$qC4V`&*$*5zqq9)7p9JKa(Z|Lfehzy={ zdP$Y2_*nMTk!{pY@l1q9Rfg$^8vsKbSNE3?Zv=6`b5Wb(W%o{K_KRyL{p(WlQ~TiO zXz1E^cV5Z%NyAkXscuvyQ*R#JzI}tFGy;E0E6ug_iT7pbrtK}AmYTuk z|BNd<&vV4^ig6y5!MVOWvB(Ni&G$I|I}O0`uW3o!C=;mu4Aot|t12!7rU2_5n)#j^ zC-uoMLGhI@mkk!zFnJVn;EKiMOG9@89ChDtZyj2a@Bc zg}B6jvfU1>GS5Ij?m|B!*nIkmx9j<)Qu$B-u(<`)a^LrpCR{$5IL-y#sS3YPO02K+&g{ijaM(gFQen*ExCDu0IJm$FJ%8b zI?Q*d=O=dbY{Jeg2-5jZp(Njo`!l2H%NIT^ox}^$3$>66Mx%v6p%lb=Q}5TWALad> zBW20mV+Pzhnf`RAM$rc?hfHgP*h|skeR_7e{!)Y=bAJE5CwEUu(MsAgZBA9j227Bw zpdUk;s;Oz9Z{b**8J9VrdZb5T>C6otvXa{OXtp;&aqrkk`H-)0v@&{Lo4 z331+3ul4+P4O$v)1p!$ckeQkJ`8SUzHx@3R81>hW6B>KAgclw7S-8T-;c02lq0Ui( zl2uXY=-qyo&-drQ=xx8ACGesLka#`6Rj9)4s}7LcBw~9C{2s-LPyC1atar4v)u&^Y zj+K&aa@zerqSgT48NMTkx1Qgim4ENDS_6DKn)2Dchz>FFQ>X(~Ywv*rTcf|U-<=6e z=|0$$fWIjX?xjp%(P#7Q3tu1PpyDdf=87?Vhi0apA2T7=TUC=g&_$uvp~J!d7Q^9z zbBo@y7H+Vp+-DvbP;87BQK3P_?eLbvXV#o~^WvslxHG>)yivx|&e0rs66JU5-snYO z^n6KwPCPi+R>b_Y06cR%cDgLpgbfwv4NzFm54hbn&8^z;$D7YdJyzVGf6+{h9wO?W zLyl3CgMjBkV2URtl{&_y-!F!NI7ukIWc=SjQ=>BS*EVe$v36e#7=eaj|GjY*7JCZg zZtn+Y{qxU1vsg}%9$Z6IvrZ!Kf$|UGN4!cAX)^4Lf$zQ77cUGbZCSLhxw?&a)Qr7f zGWXm}F00-bP}-wFW6+95n&ale#>t_{D~R;K5V#5r=m|7c6nh56JB?-pqBx06w($Vm zR|wJ@`D)MJy;q+;?EoYoj**+6zmYrjsx-0m)2j1SB6Zw4UXUobASV9B`RXmOhCeGq&V&%EOhc()x0>z0GZGG;6;m`>H6Uqy)i=-|R7yRe&Xg z`tDLr&KQq2obQT5Ywt#@gGn2XXxpaEWuS|9^PO^XatvLf9Og|mXvvxD?s9TJNUTB- zDrK(4NAAQDKp?sPsVm2XwM*z(HYH)v-q){Q$xsLxV=;U2;^djPBn4>S2c)knbq+4) z%u`W-X&<*0lfZ&_bi;p(x;*;E+bEtI`dH=qWCu5D^-lfGNB#47`j%#eOogN^a zM43w3JSasN_U9GviX4uVch-+CLc(W0Z(bL|GSn2`Vf{7WFU7h*bDsk-psEm9XUvxN z$Cml($qBb+_>{e0p6@Am_0J@&1E;q}3A(KF2*MEs!qJnrhY=nkT^@2P3o5M)v`^&1 zn8&Sh1ED@QGmPFmbf6b|w1MJPpp9+S=iBkB!LYXRb@S1EhA)*5JlMZM)UvNwT^E_BcrDGk&3mxU(Vhw zc+eL7o@_X6s`iQ0ch0kNiiYPVX6z{_9c~ksRtyc(4w%4zC;*;kYVq?sM~nWJXC9ya z+|^;}4C9?H$Tvl706Zu+GuN?O&jIDzSpDfs0L7>bqS$+Kg3AzwFP_5_DSyE?7T)%3 zp&33O3xSL&5UB!jd(YX8Xq{cSrW} zR(&l%Ic+u+U4&n z;1wE+g|(*mG_wk2b{nbh05`nK9D3-o|G+57H0~qxRUmdDu>JVpgBiVk)Jc0>j3H9c96Wx%+ zG;xZx1oY7F)5nI6Lv8o?uEew@P^L>!>+f5#gjk71{sE=Fgw@ z&JD$8^eb^uU%!3%qD8r8=mBUHP2dfY{&vJWOhsE5Lc=Sit6aUOLSHRA2Y0JQbtPpD zU3ds>s-;5X;M&i@8qu#kWe*LuYk8x1#>3i?>;3;jeDoF83MK3|RyFiK{z18(no_0&Do^@|tahGo0 z)KTIC@^o$1Zcs;L_7-c`UJ!=^%9^R?FYl46(5?F0IEzp_9vw1(0TsdEDCm1tmE>8=*n@dL8>+VoLWATs zb0SaHu^L3Wt4v9`S4*6%;r+c7D>AGJaJakt4n+CC4qtn=sO=W6p+3uBk7vk67n)0d zN?aMdAe~2mS=6c*1O6h%A7;8Gw{a_U3sM#eQgtMr@15#5{7d%c+C9)*)UbX+>r*VgGd8a;J zN$n;|gDdy$O+juMOxL{$`XrcxUL8as_0|2A|CW76qCBDwrI$o&m>u&*e3SRXz1r~{ z0{hfg;>;-jdJ_&N&Il_3`!wO}tW&PzjvXALe2uwxTc!tSL~~?DgU!2_>f-O!Qe?rd zncSR8J6CkT&OYI-vYDH8?_2Dbq^hC6J^!>_XZ?7!ruCCrXu5UWe*FaUUNUo|Zr=Eg z2rCRTj9$Nfr>Fc8dbnHCEsr9AXEKBDSG|9j`tE6j?+40mM)vf0fnLXsHeY+Oi++9w+YXuWm8<@V4ZPHc^)iZ49a`(wt($QCL4tMQi3v^h zO72pWgOhCd+;xcq5>Aoz4mjRH)5b3AJ`y^8{rtF`=}SB6UyR(f%fQorOs+;+f}ZPI z$8R5==Hh^n@FWVWcH{+S5nk7D#*7)vJg_WVWntm9E;&*_mDnE2C_=&XxdNG~_MAQ) z>DYG?T5J~Av>n2=F1jG%;6yjo7r-Oix*`~EeadZK-1keY%DQ~rcb7OxOhJv=6%?dQt?oaHD%U;5U&C_zc$w707xS#3N9UAO>QxOaEVgNx zJ54`baV1t|V*O20bhbGi7XJ7nc$*XJeQh<4+KS4zBAvRcP55c|1Hdx=c-n;_4ZTi4x#4?nLS{6$Qyc zzdZ5&Hj4!ttd%eEB7;Qr#I+_Elp5B=jT<)>oUE`#QF0r_A4;z9X2Vu}RB+#Y&3MGy zDxshLnlW?M5Z~IN8Q)*1i~I_9ziHYNpPH&DXqu*J!}%6}PTt}?L3_)=*o9O`;g@X; zTFPKU-pvWdIEb5#z*L1FcsHNq+N)()FdITWwKx)Y1U^Hkc8o-qVD>44M83_{wvkf8 z>EbAh&@m|NfI7s5kP!3hX6_x)wm?QqV{VEl9oZ;m@>@p{%z6cSQ=?6rVE92ITh&o_ zi2whAFV#N8?b`ZBI?sA|am|^$&_ardj{bfzF`Yybrqm&V7Wp>{4k|kiKSu#7VnLaZ zL-TH_d;O3MNr2JXn4FwkVDrDf)CsuaDiGUJbWgY&FJ)ylpdl=*9N4GNW#qL%bhjq6 zt=!d!0mvf5KGOWIJy7L3UM~OWB$7HQ{iFR!7;W9Yy=X?*I#;m#|GVf^WjtB_8j<=E zKvbLMLo-AVFZzKw3l|nEuE*=`XPgp0{U}JFb}g+&HVR_&TUX{xkWa~=&h!z@+8Ok$+v%!NKFqX`Vmpf3$#8X31L+{qcYDTo`&`c#qm-i z>VIwXY5DoXomtliIZ^khDr6%Qy235=;NqHr5)uS9K5pN(Ae|maRzX(;J^&RM&H6{i z{PQVqMiM)rv6FJvtdW;#Qz9a%w_W)Ae9+mPVJsOifJ1o|q7RcXo$!k=N5(c*+sM4$ zBpw3AC7?qQzeyTQ$Bqi2dx3~B9Q0huZR>6cWgCc0gG(oneO$L}Z0bvMT9KcWoDqTp zt|5A`KyJLE75r+-vZcF1y7uIX0B96BAZG^+MI!~K*D-wmIg1ygll@+waTP7Pb2D+45MirL(Vz!PWcnRLvF0-G2IzEAnp;~(Pkc- zrS8U{CZveyn|hXYMr$a4^0x&SsPIo#NH(d6X7OLda_@QRBZ?8bz?n12Y0bsp!k@t4 zf2~3>D=Yt=PNmj}xgHWj>V#DgA+7we8CFtvn-|2NJAb|t%2v39v7FqoVdlyoVtT1= z0OPeojr6F+V#84U^QT4MZK%Zid(fDQW?uQ9Ro}1+B^?WPW(m56gp#f8VerY>ST7X_ zUsOeuhua>)ui`OibZ(}xCo#NzNAj_RbEQQ_F4oPnUB3C^j2~SkQk=U(K4kr8Y@5!( z6znpcL=xu^D#XbrR1GrUaeF%xnKWp7ayh}IpsqUw^7U{$S!|QeeKsvMZGdB$K|z*S zlO@%E!;scDC5ewYAMoO>N<4tmu2no+8Hs&4PKB1 z0yZ0M5-i?M93KuYccMrC+HZan$u@$i=KHH4tG&{8@dXuHr-T@#ej-dtEceO$W_&zX z0HX)FHX5hT8s+tg-ji#JN1&hoj7m_$Or|;1tvZ}EG9ovAR1K0?xY;Zr)bf4MWpn+t zqlwxxJN3|{Heb7MiVDB+O6hw6A)(_wnv)TDV*l6fF~jHS)kNaX;l{cDAos7oLPysn zkL8QRxZsdHZy9ZM%uirgp_mEsigulJZ*m*56YrpY5=irN&rpX|s|E=xOg&xj;Fd;O zzi-xkI};afCdt_(c11-;oBey$6a>i|Z%Uqc6Om0VdNXu*}+RVsn$qF2#W}ql4`2Ui1#~xUai?yb`{uMkUxCl7i-TecY^*^Iby~clPtjU zAvtS@sa2{@-x$a`#6hB#zJA(dzJvpd!~~Kz^lsBRLzZWD_GUUj5c1f24}Jrep+f#G z9341+%5-@QozMMW>}_g=Jhncrv<1Ho^OD_WGI5~)yQA`))%*9;;$r|l22+-pHX>({Fcf|tvUVSrneeT&!&6FD zyx;k#*Sf%`y_$woqV&5pbc19*%amoduzieJO2N3q>&73;)(ten{mGgXYHQl*`Jtzb zpcBA5n7@T0-~fz53;3C1y-7%*)0sLW5W_sYcq>Vzfk$fvouGp9LCUA*YlIuV5_HGyp07kSpNbM6;vp)+=LVyjOsWN1t|&L?Y^nH z%QtJiip80~FOX*rW_+cn$ngSV1~Ejo#`Gmw!8k6*|9`*P{vj*gY#qe^%W(q*7L#@@@=+|fkmrHlhp%vA&IZRE-k1CRBf*C~Tme)RL8OG%ToUOMoe1qsjYE| zWAH)ZVfYSWPQq(%ADc0dOqL$Gm%Ra|BEAfxftgs6jxo8B0&B^l{Z-bxKP@esH=S`) z66Glaz~HIS;|ra7lFD3X=eKx~Cm|iVa-|-ph5Oz8`}QdmJYn~{M~@xrZ+tqfIG#fS zO(?Ru$zdPI$gVQLpDL)**~dT>5hL<%)IE8dhgaxn@7DC-uyo3{m#Vco~h1d zS2(xvXHI#yqOLBVP5g|Y-_Mvjw)ESLC>D{Gj5gyOkvsK{JUJ&I%zb|+#g$s zsmJoz>ER*O^ZshnA-CqU?{v>|&iBMYc=^_?=GMyLGGL*>mk(GND#@4&W_VhsX>bvEkGuE6WNRL3=k;h*M7ij*nK0?o&{3 z%}!$qcAX4KD}Dpw@QVU--=w%<;Qicw?lsg{GO`@wd*Xy@R0x8XP(~LRmY2vmax{8U z^*nEPj9ZT$KbFXWD_rgWUeOj`&A-LOXxP~8*s+5RPd#iNcM-b;vcVAO&(zg{&2$YF zL4+5XakK?TN9#*lD!)Kwq?8MBnH<1C$5)t_!6@oYDC;l?bz<1DvCy<|n&k!ZhPMRF zv}?t#1CFtnIddP3j3(Ik3uckFP}J2j5?W+tymJ#G4he{zL+Q)tsGk0-R$aY7;nf)J z1R2WRA!Bb&Emc!Qlbk#{{TzP7tvi?Go=3gTIhPgsOrMG#y6LI*brpxV zL?^y2ppI{#_=GMpAw?!JDKs$P+wAQI`i`*14r0ua6IVWLA3U+*?^DJEZ zy)aN(J;lxpN`7kQqQQGCHy`dsc7fjcoFn?H6_R0%c3xyl418 zb__FzLQo|&29uN@%)#G_)z}qZUr#E=f*nY?{$IegW1lkv@Xd&uS=LoSQjP5lGzF

*^X7T-nzTNwk_1zTpmOLX-sG^J8f2>4w*+-|J?v0FHV)r?tf~(^eeY5u9 zf^I6q)MUIjaq%>-*1Ex#$&K`c5I#gNO>#WOZ}~{Wx`fdXm^yfB4Q$)CZDb{to__D% zlan{OR7KbvJoSbFJgPk5$;|)AKft9)(J1jON6J6kj;hZ``9x!1UbFJv5${p_?ezuK zuVO@|Ejdd$!+Tx$SWiZQAHu{;XmFt!mnGiRjbF>}Th}%JWur=l_Uod55eg-c5CD#A zv$$b!NYNIlN0fE@@kgkJ!Ths`v=Ei&Of29?JX`(l;c=2+!~MsLnft5|eFAox$w$h) z9IH}#y|HUGE1Vsk&rKr#0E!+&ZqP*~o252*@vz;W<6% zo;LI&>-Yt`YtqG@?{|i7EU2Cr*?jnS1@LhW2S8g_Z;J`n0Ul*;gy_72f^<~mU0C>U zyt-4ei&xDaoJkQ39PXKL}A`IH^mXP2Ba_@-h%` z#hcqIxOc=lE`R#2QbNaSBcaEUAWmNqdt?XdPL{$P>bk;8!4Yw7B5Z5hTa@hrS7fuZ zr5Rpbe(cPNWv_m^e8{-E$F%zAy3G0wEOs1Hd+#v1d}MZ^sfxD7w@x1iohWioO$||h zci(+sA*(mZwt`FngOv|? z2}6Br+d&D9fox6PwN2h;Q2gAIUj@6ip94s!hx-HFU$*g=m)EtV&<8W}xE zdgfbN*M);4T>Cg|_2&kRj=RL_5cpix7@0kK!7H3OTO%SW?#)BXhxj0nm7M8$ zju$K$(b9Z!{2jY_)*{v%4T&-n<<+Zkb;~@@&wQswc+SODpXza#Ugbo%lk7YKv?e?Z zm@pTe5tKn78LKVN&qNM^!Ns3mS)`iyDvee?1ZABKeXH6-zn+Ys{4X}-hzx1jhd zBnoxoaFqM{-(EgMYJ_7z-dS*Gkc!>FGYn&hG*HK?HIi(FkXc;;b?C4xFow#AC~2dz zjjKPuvgV|^3U;lvG&sn9B~CN2ghR-h|84fQB?iKADQFZ(RDE>Fy_OE0soz~vZip&* zmfS2vxOx;A;$J$u>QnT8MQ2dNlD<|v3(y16M$6Hs zjAyhpFM#tMt)MsLkG+>*-cw+I4?*V;IkOj7It2G=4_c{ly5>m;?s~&G795?fMZxb= zeu^DxhNT=a=#e-X{wROqa@kF8f1!I{!UTY@yAw>iqXoL66l9A90aN>#i8@Nk3UH-- z&=~%9s75{3of!-}>(Jg3UV`dwGr18qWd`n@L_4~@4&3A<$Pqp%UuXCD9Q}cBkCIm_ zy7;nR-9It4V=hf{{ERckq;GVrgMnD^q00PIwUHDQ?gjCBo=sADV)`;uo-7xQW#?u} zDdhDQC>`tkwz}UF*k(Bos;WZ6m>{ zEMmLKWdiV}3Qr)-@z=w>Ct1WMON;??(P+W{-C$Nq8?wH>;eOzdp+5io3YNZ}VRiD` zo3!tChZP66DQ95kFWguzR5s{)w$~91a$s0)S*kHff7@mx+7co~kyp2zh27nV#c)K&q79eKvRoHH!Ymng^uXzY+^zb+u4A)K~N zrjNIYM^Q^eBP0QtftAd0&FZjUJ}JlbTUiL(9#I#ZV0trb&mjx|Tm##(SpA|CWlve^ z#@QoQ(7?0KRCQLTcq>E9(x6iJ4Dbj6jJga6!yAsbjccrwlF;_A`ti97f)6n`iT8*x zR){j|D^Qun)amfV?V;uh(D2${))5=L|6M7=XNN;4Pf=w zWd^3@99s|0F71SZNnQmgo2s~xL9RhE)kC!p1ZV*0WFx;2%55gIJODraGc!j>auLsQ zY^UNb`i6Nhq%wAdwK(mQy0$!!d|f$6Zi)~_bWosDFd&8+l(7=W!YrqTY}GgmQx}Syp3AmR!Q6u;n?hlfOYy3IzW0Gk@#FW4H9@C1%oNF` zNQPFyG>Z^g+;iXz+2|ZQ_gGecBt*wa*Xu20?E)O;KuX+BakG=?1*BBlUg<}NM7b9x z5)nQWaJZ{r+U!ncNQ=EBZ>ZCv=@_jj<#q;T!#NKbb|xn+rWySYcXKNik~pU&43vWe zoH1qMtYG7vH$EXD5y}d{K%7Am)q}XQW50fONC;dn4H^t-s~P_Oh15m%dg3DF2U<<- zyqF&(qn6!3h;=06%o1LY85Dx76Jf zWEk}FrK@}-L?Yh_Q0k;SO`{uc+Sy4X+==gfnL9HsY?POQsi_W+e?6=xb2hgimJ%N+ zIEJD2IZmbiyKJjrS_McHmm7}UOEOK1D36QXm(};f@je_sPF8L%le)Em(`0e8_2P)9 zQcWc5fYtzEO9!exNzmCh!qObPi{$v7`AXm;+9K!S}6!V6xY zoDn3NElO6r6c?7d!9h*3#CObS3*l~QTSyZbi~U^jj1xPTvZ?b#OFo_bQso`xCbmvd zw2^o`mo24XCSeWA>-vu6Esm2OwOFct(Q5#(ECa3i_tB)c#m_oFV0iW4{U}^cT=Ntk z2v>s8QTbWh5T1Gc@%1BT?d=4+ZeT|etEy`EQdI%ZE9QF$gV9*tKT$o>2?bS51ND_D z)$Bc)-%buz7K?-7Z!cAwd}uvFo@G^_luvG=FyUN7UE5^HWfwemTSXc^r=#IK9T!!M+F&Em z*ao$rV}WsTT_k=*YBY(S0`*L)d{oxi{}RX9CwRHJ6s_OsFyq2vwSnO-L(YC8g13Zj z(mz}?v$R`uzLwc+kP7|}8((a?6D{RAJ^Q`6eP!EMb@R#Zq`w~#x**WH;6VW)j}%+! zCOKHGw{ET|9I?^3dk9z;hj~cPWu>L1kZzOR*5{yIm#fg&cw(=RN9WFo>hCI)!3p4? z#=KrXVtn+_baL6t?2t`sC+O)Lik^d&@R3kF>~O8m&m?@dm7Uy;clIjbD+2w6DxMA2 zO1_CX`z!ngR#+EQSdJ6NR{hCkOGSOH7-)@!xsj`=^=iy5OdUIG^JeXz{1bO~LkasK zeI$O^>2&TdB0Qf<0wjmEy!C`mZjaF!ikwqeI2jA`eZv5KTWHW+q^gsXrWmmgTb^rD zr?1JEi*iLCF(n^4iB$aZ3r{bGBfQE6q2Y)@?Aulm$yx#rk0|ojAgpIdBIwIYZwXJMB<<1F+{lU#E3?mKkP&AIl1saV z?>`I2m+`Z9jU)y^OWWp#@54DD0X86fhoIVNCnCu1b2VJ%8*>yPv3S9Nuv&XL*JVst zYp%6q*=E1&jhR%E`O z%+Ev2fZ9BiHfy2Bd8{q#&z~Q{O@Da%h*ID0AN54#P3v~`+O_!G8?UkcqaZw5#C7t{ z&+uNusMrpQTH|Gz%yyMv6hn%|m!I)kUq(hi68zLR0|33iDp1sF0qu^aa_hfmmIGbW zNcPTcdeN}ooHq+PL2on`jDRmKgzK?6zUIT~F?5R@!|q?1IlT<$vf&&=A>8kqI3RTq zn1o75DW@USd&<3JwOMFWL{+`S|VJa$2Jl6e!o`9USf*!shD)fOx0t_Up6p@XGN8FnWbE$@A36JzNpcFJgk_ z=Zp@UFwxg}RuRvF@>Z%@z*pO``uT$@2Nn?2ETG91fNX8&AacAf!LYn=l><~;t6NOx zd`%cAi2;Dc>fEMM!nFWQuOcN|d~D18>5lMGGQv)1J)UCrR!y+j*OLMg>;Wl&ss*iH z`{VOm#+ZrxzDMAV)x-KEG9`wevjJ!I9I|9gr*l{#mO)0W13%PrStg8JLw=vgRmod( zrgd<xmCHuj3@Bto{ z1P6)WMt8&)405lWa=DR3OS=!zlHvpc5>9}aN9FxH3(0xvRQ`ji!cT+Ti-kibupHpP z1wl&off7nQF;;C==5%zbc>lO9&Up!Xu`fD+V)mZz6(9T{QWQ)67YJlxp+|WVL}Vk` z6G&J;Qr^0ISO0x$YnpvZZ4R{|2Yq6==TAF2O)Bn>e?ROeS2738z3&4G6QnyC+mQuj ztA5dVMRiq`_>XLJJKAm{r&gYwZSEx**GWSlXDOJL&cncrNh9NWp&5(5bKi1!SQqt1 zjn*L-yO-9@pOKS8AQ@mx;?B0%|3s3ZfYI86x{HeC*ui2x*Z_yv+A#|jC<~oai_D{Q zJNzP>F=Cgc?x71Dn1K{emJo|nj~t#26eDToRTOS+ZWQPoFbNN)tdzblfNV6-cv>GffDn!q#ZZM@48?2UfBdLodQSSb<(o7 zt;o3jj!BSQDH7~zUjROeWG}(Pdr!K~c;{nymU|T2d@G4}gRvY9*kV0%5jCEpWsDO{ z*e=3jq&TItvD6*=L2xk^&}d9alCSdhgBidxHyS>ca&Pxz|2+oxq8?5y+Ka` zx7)PPv!LPhmKWdZ5_w}0Meo?t{sd86jDE+45$1lSg<-?tVd)dp))DL|8mM@Oq#XrI zE1VeM!PMOjB6bmh%SOG9`C5csiAA3ov~dd-(ztT}g>;>;$G`~jfuqP^i#3(!{#B_& zLf3o=c^XY`u|Tg;5Rm6nA*INUVkGCoUv8Fo)e8N-Rs(}}iqr%z0Hk|gGe z024XFVm;a^^R2fu{NqqmQNLGLTkB>& z<9~IchN7zcPg4_ZE7GFarR?1)Rn;6P0XZv44jiB};P?1<3u5c>G$fWzAWw>(xUjiP zXz>E6)Af-uQ!Xe!D5USosBcIS2TAm7w0u;H*a^}B=ZG=_%Z>PV+$T+(DCH+UfXE`U z_)^2B}LS+(Ysi;oYJ^LSejRuArCTAskc=UdcRYZC0N^Z7D|?9m_sX z(G8bHY9ga$@^?|=GDR(6@_DL40Nd$9^R-wwQ;GXy?f3F*vf&jow%8uy*g#fM@ulu{ z=R(NZ<}y1IK@D^@Lh;+uiX%6cz6=m7yWdQFjXA_KbflZSgbQaX<{(Q}UF|D8=t4n5oXihvh zyeT45GOj|8`Z5Ux4I=+TJCrsiEoz~O_xPtWdpNt#ar=aAIcawJK5jjJaho)HZSA&2 z@H1C2$0vn*{_!?{Non^Z|7ih6FFla?V=*mAmnY(u<>#R&9TipVuQ6!(>n2^t@A6~O zUP7P*ilQ8?$f>5L7qP?l(~E{&YxN1J5+WHN8OHG(POPQeW2x_!FO{S}5=ufBkJWl` z&?fU##)qBXrk%{R>0_T&+EO}mhPux3=h?^tAl&}WwJLg*W_k_i_xqpnnHBrirRuN* z`D&OEwsY~e2f&l`u$Uufynh#w;=K6AsHz10;l52nO`Yqe6824 z0YwwPzu5mdp>bi4vbS$pcJG)NekhC$jXE%N9(|1nS_p^I0X{iFQm{yi*gp7s3*Xf8 zGv9hd?X79^wMTii&+0AO)or!SN3WeIT|@%8R-@I_)i`6*aO?3i&-Io4K}pz&b{?o=@{QV7 zxu{^;0Ka=q zzw*j)%kg)uH6z2)o$AL%z{xA<6@#4N zf?8#CHJ=*PvB<0(?`I(D1S)rj1!uK9y=vyI&zM#C*EfT&+sk%+K8YTmeW2Sa{VBFf zRs76^D_OFyel6?Mzo156|M52-nF7&xOxC#C->59400T~SJY3DdwH6Z6MBO^u#l`N$ zXRb2zPiD->pc2bV`757BTUGIlD_5sDuifZW5D}?5&?|+jNY$@ ziOT)*aj8!(c#z^3Qr)dfmpNc#LZNUTA^TPbAK=)EIQtGqlfk_ow+?P@Pki|ur(=%& zM_0q*L@pEjiw=^K%8VviM)-nKicJycQU{2U$uVP^N^m9BLqOV3Y9GOzI7~LO-2qX= z=FG}7Fxtr@0+Ldss1yBr-ITjlNJoe&XZf6US0P9^tV~8t+=Z`P1@{e10XbO?~woi5G@)F8+js~j|6@n|KyZt~grHEUzC~=uj zh8sHd0H=N}w6s(r-p@3?mDV&o@z$36T7S$syF`V8S)K=9Lk1W^-Wu%iEnet1e4Mql zIsdW0>E==aFO}dD-L>G!of|M>m}M5eOHTNl@pBRp9V99ShK5cEPj)DKWL(|$Rm^dp zAj6uX`}Tt;$s$nmyY0eizLJ|e)}wsQ=a<1En?v$old{^&OENMb82>W6k%qxxa~z)kN8v6Q_r708^nKU z<=&z9Y(GUuQ4!RBFKG^bZp<;!Kp~=Bk*~vWbf#j>;^=Ik=)>%)IM1P86_y1_Qzrot z2jMsfA^=TzoDi1PCfcB2Zcf7;y8Y&FCh=L5-bq9w`mp9QB32=S+Ai(y`p!p=dY-R~ zOQVfGp~!G+n`lQ8B1*Xjqxmt>^yVVe(~*C@h^nuJnjTz`;!9UUS)QGRa&-Z2U%wp|Ig~; z+UW~E54HKa4|g;Ug;vr{YVi%Uxf5Q{^hJZ}3CaawXK3VHu+T5v?eljLLPWFFb*7)u zFoPVOPI_d^oKIJ zumHu(0(`TI?8S4H_W))WpXM^pb@Ad}z!?cAzn&iPWuSNYt%$gvu#P>+xFihvh3<=3FktT7wRq;%Ui8|K*LRvg?70x)gC zu*BEw8cjtO-r|TI-rqk)0n7C8j5>dzI?g=|oSf!si&=47z*gF7{VrWbCtQqeejxE^ zQs?w3ZzftW@XE&hZ>R=3dPJ z=zrdmy?sQq2Yf>GoU4Ikbj`XmrO1po&wUw0=_%$jNpJy=_;&Br!4ba3oH|C}KaGby zHHqV&%mpLYnK7cepYPo>wd1E(bx3wjqAfd8;PgoO24jw!Em~czmsr`*e%PeEy2%iI zP1>cu?0_&|oKmp9c6Ved3Jti-9OLe_P3c7z0dHeU)!i%BsA7zkVBNPT5x< zVMyJ2`qC0a)cRcPusH30qJugMj^h6OAHCRGx6dqgj$6rL{hw2k!%?9i3~$hy%AP_E zV1MlH^90$PAHbi%SY&bd2o*_$ z1Ajw@>sa=8+032B2o=nMTEx?zy`&=m;hP61J4yM+gpYriNTBe8@L+$*Sr<~h^Xs|k zkAD1^#Nj7@Px@9onw%>=94_0B`+0HI9U=`Kl&Q8wA#0A!^)(+Dai)XqjtoD;QspG`n$UpvKYJmN zRQ|{@)%+s-vRWX5^%PPM0eM_Oz|-;u3#TJiEPk5dvbDB26cm$Jg!R7+V)S+~R|8)* zX4$Jet9RgCRwV`*H=W2QZ`*dSnak=iW5?E4fDx-Grkz{Xg(H4Rb)^Rw<<+7h9bR2X z^PJ?1?{;NOv7ciRIh1CXW5PVnDl#JH_}XI}Sc`|aTIHQ~o_Aelg~*pjd+C4d*s-}k z!QZrtyBm2Px1JhGo;+knNFT__4I=ZAsVsaHp#c@oz$QjfFlcWvUwvS27wz{>7gnv~ ze2AtZlg=q^(c{Dw=M!%kIiz{K$y+vYYJzRtHP4GawHFUux}#yAXx~Vfg{SY!gWEWF zxWTrMjg377)nrwg2D_=4eRJR7kTi~(9cODPO;i+>AD%kDNuwyBgFVDuB6&}#QvkYH zJX9GAJ=VOsT)TN?RliqneJ>nX6Lq%wf{)M1%%~Z8#m#L}drdC-;xE^6Ej8KC4>a3U zafY8aL_utED{K%*iKirEaB{+QlPXkh(~dw^U{f+)Ha8X+MDe{kFLWD-*>k#-fFdHw z67q~A_rTyM+9%Y*O~L7D_Rp}`2~oayY88hJ=C&KMvI(4e7L}WxzvS*Kam{gO5~@f;seY`-;KAn#ij8jhM0OGDPjBDawNl3kPY)A=Sq)dZioz_&>_3Bc2e=PhGM=;Y_}-K<7*oAPSr;mpXd`yg?r zR7I;O=DN6idUL1Kpdmwg93L~F0sm}tE4@Oll%^yqb}umY9|@A+_jA_jS*^s2joLKi zLfsrzsJ9-exAqTdErHL|rtOOU_4tG;`#1A_$E>N@eX4Q<%TL_xQYPoywXy2RO}3Lc{%sPP6EcHCc%|~(yHgO z-5kN20QS^4(j?#k<;rM0XaXQp;?LyYBrzy7`f>Wa`^n{!fHEEggN4+^1P-*8Y7w!Ys4(X_P91m1WxGv= z`2J8IPu^qD6Y>cOQ6t4%fWv_IN6MC;Hqp{<_4OY@GFzg6lDSi)D5&e%57}vb{$2j< zZ`TbRhDmi25E>eaYFGL5|6kQ;@`vp8Nlx3Kh!bWx2j*%UK~7U zuPMq#K04U6PO<9$7BH+&xnozn%^yvbpk3{lO}az;np4@QD$VWRsYg@}?cQtR7!jHU zj!9lK;MI@W2E?*a>iR3o&h*I}Qw_M22}k2@KY0Uf<;?MPe=xcuF+EAF22RE-PBZNr zj5~_Gy|F_2v8C0#+-z=83zmXE1uD+cO%ksr@uEFPKtvEHb58&Ma_TL!ckt~kGokyU zDw8v+tWG{AZa_@WcTUVPN0}YKRu-rss<{ykthumqV(yW(Bz|pqDaZ_*7&dj`>#kOj z=gu};>GnlU&q7x7pe2XI_Jns;@Hb)Lm<&JbWT?c$;3qnAJB{kd>3xlD|BV`EL)TP} z2Zx>rKtGnR_WaqijwqkHNJ@A1DIR%3Ty3S|wMok=XM zC^sY6`@Vz%&?&c&&kOJ*ZmE!0Pbzf|E!s15I2)G6i!!nx<2}1ZOu&7ZnQ$lGrW5MZ zu54~)lfr58)axmVaP8IY>1Yggu=g~jtzf4{b5Bc$#3|MoJ+kQT_lxzVRVz zJ)ufXF6r8@zd7}IS6w?|ivzNiEC4uX8Fx^3=}=7#GqJg5v~WbMNoibS>A2NO9GUgg z)xWmc;ZoAl6d?y~cNWTw960`=t3O+~J(^@V#C+J{z;x%u+pfFfORyQ5Tj5dDfLcMJ zKzfmI(&J5j2{x&PqTZzq0ghVVb;~V;^8q|lc@#1-rRe^opUaWxi*BHKhvA|a?SHCi zu?+UKT358biHr$eKrO(o@J)D~T*_CtrWu3Z6H0s zhWjL-vfw*8?Ahp-*A*ohhqhOr_Qp?R3MM7G@1a~wCD{lL= zf|Dsy*amIdwB0oDGMP~u$xEs;_WcJNhdb$hx7HB#+IQ6S&zr=KXK##Ak)-nQ5>$+T!Q_X zqCOF&9iq1XVF(%xYSTs2?N101;d5HAE{51TLF`gon&dN+WLGf8#8w5S0#$}T;2z@g zdaTeLreUK(f`S;uoa8j#Vx0Y*|BH3;)Ax;3%6wVkML|m8ilewj-2UXBf>wxqzI$r% z!-u2PRJgxJ7t?MIZm=QjMM?a5sh*@(M}p1Evd!&dfM0?ya7jQ25f%p^;1vvxtVP?D z{%-jVMCwC55U~R+%m|D>`CX7at=KtA>fz$*?8P?Zb){_e&)Wdcz)3uc`C<|ovX+Kf zegSRSdJUNbA$sXZTlg01uTH7ZH%75aExkvbvfPWwXw7KslrGzl%LvTJP$o`}v(o;b zr`cU8T#wBKa{!ZBhqMNLp(^_KYFu|M`LF-;D&v$wo)TR3$11*2=BS6hTTAy~^816y z#dTDd9c|iVgx5!BoN2~i*vS(Y*E#@`DpPLwmEnyg{`}2FL&S0HlI;e9RcpG*5Qf-3 zQrj|GgCj7qh{cC#u@O{u+)B(^9yu|#Jn3Q(PZ5&|6USXtGP&)m2n0IZ3)n&al>^&b z@eZ&F9M4}v#hpx~UNa`n#La^dWM6IcpP2-lU2d!%S^$)!QS&L1_a5CQP33|7K$LVP+owx z2q@@d`3r_Q2Cr!A>s_bvCOyYuwlAjTo*zPm;`>X8%;djcGG&ilW@nk-cdt(_S_sWUVoU$ z{6Bw{kp8k?RRx6Bd8*1U=*wl;pkc!~l4pR)6S6UnEFZZZuOELs-#5^5IUAP&l=4*_ zJ1R#>MmPX-92mM<-?K^n99H!%6So%r8k|5f^$ptnHtClSii)gsCml(p+n{yOnf{G8 zVRFi8LNH(+c{Vb8 z^VB=+XAv68Vl2C#oLu?!40BX?oKvg4o{G*NFj$fVK{^kKQUI%ifC+42VbxPJYC>)w zWQe8sQY0Y&H%E41IRX(X&ird%wp48BTHRLc0RTt|;ht?dZHq!R0JAcdeI&K=IMoTFz!Ysf5od6bR=Q)44D+f`BR+5x`%<#F z5=wrJ0A!&!YA3V#0Mtc7QP+eT?wAfDFubP1u92P6xDy`5(cQ!e5u#(tf_2EKkYqaF zsZD=VlYYBDj6jn%eH&0mZR>6HHW~nIHm5!csc=}riLhJ9pF01AGEyCD@rLwh6P6jy z7jyo>63H$Rv)8z8+AUkkASofEjh!y2k++%9>1`@diU>j|sUI*)l;u)SbU~y!9y&DA zZ92*7NNOaXkxDc=OSzF;EvLOX-1xbO7&w+sPJLN>`VF&+hY0Y%8955}CuLtmkma!B zm?e?k+Dr_NUjp*P)}=nbg8qpnP^tc)I{gBYk-FizbpDxgoQ=fT`~b==q4v(E=UKz zVL47bWjZ8x@NeNWn#xnBxKq=6;`mav*5}sae@6!MzD#bh1)ND}IE ztYM-=-my(Hau&kD0!_<}GOmqLN?7nM1(sQpW6A!PPtj*N?<6t>pQ?P#*-(XWWW z43_@df)rpT;-PCYx4zCo=aP?xP`8hgM~~;R=Snp#q+~oS?vwQ{McZyt3$_+PJXe+^ zJ;9&rMzVT1Gns#HJ$$M z{wz|^oN?>xyeF>L2sd*upu?3#{BqgqsA47yNW43v%Uhir*Nax6A#NxOL_Q+UNhrDC z;N*0mK+~`%U*vbjf`CL>fGaKKMoQ>3?lngP^8_RnL1uF5pHY=TR+t5P_ceT!9YI14 zuHb*uR$?{f5uYXqhsI93N`tQ0%ga>4V^DSm=W9kDKp`z42#@7W#k4aVml8cJVbRz_ zig{BN z*MKCmG$C!4VjmT2Bs>2qA1pS^%2Xf}Nr5QnQ!#}PaS~D+oz|E#gSff8Y-PMe=FjR8 zpn8^DRt8o6rv=a*dyJZxR325>PLVQF@oYEUmctem8xLTOs6=ti^Kg69&59!-8snfi zie%`moCFHVDiYBzwK7=|Qq~7+In_)BI` z!(c!ArJ`|9#vOyzWKW!H71(X?;5H-v`B-11kLc>Hb;rIF(+cdp3@qZ{zl_JC$s&U- zSmn^Ui7YRNFZ_|u*J|)0lgPhB0*BE8&yEW>6c5gzV)$xVGZ@M_Qa8|jS&UPz2R3NW z@%D2b78ko9q{dZaxFXugbrJq3o=5M<=tk z=g{3Uyl~h3tNlG`Rp3i<;9>tqB0s^1+z=Tp&b=vuW#~uSv3&=TDPmw?a0%EiWY}V& z)i-dC#eiBNLsl*UZB6w2hDenf=nP!684v^4Z0FA}vejh<8Xc z_9AHKzw^*=$(U0AOqIA{8JW$$XwJPPgjc~y`(@M(L{X}Uu3x4$@WsnLE+3G678GPJ zzBf%x6$KAy0|p0L-m7GJqfH$JTM`%cb&433rOiN*Bt;`<=rf19tR@R>jo;=W6i9{b z%A(R8ahvI+=T6upc-=?ZNchK+We%_xv`lD1L;(5h-8-Y{#^#;;FEKA#riP18dt05`TGJ+hp1{5t zTTFNtF$jU$S?g|$9}ggR>>=gBlQQeo_}*+ zuo#Efep~-F7270`<`}(#(_4nS$+Ei+lEjM><0UmoP|p78wU6bNKhn*4NyXy__~9ewEQ* z+ACMpH`mh9A&DW=EWlL1ssrW71+TA-Il_!SS5ir4TX5;`kg;U*hZ>H;SjCF`FAB|m z{=UuIEJLv^$O>bc*okW%Be`azhatc0Z3+vWVR>l}iqT8R$ zOBf%qmciO2x<{IZ#EOrVVmiU)c3Y@ehT!;7j>KPCh$+8QLr})3PdPeGg{>)$MK+iH7y$h&+D;jnl@Mbo-K^-YIJ$r>2&|E* ztz1dJ(1pkiOLoWq{MbReHOwq^*T?IJ1P9AZRu(C-JS!nX#eJ36v0JxaXz|93(f)G2 z1Uo_$OX{re&*9L1ojmt7{=jz46`uy8tJ&O&+LafSda>w`jR$>#I_Fbd%p}x-4q*c? z4L_B-&xhE(lyE}0Cb+ygN^MCKGp+2`ZGHOnOS+w%#bUEhnnJXjt_T8VB2fb2)~@>B zAL|#?+10}%^!Dw^Ynkdymbrygz~S*~q^67{%|13P>a;|rG5Ylx9t1Pcj)J9@HwJ0O zEuu0V%SHO%!jJXj0v(4lTMyVoY>hH@x0$A*BSZ}&mducT2jB4eKkc1)T+V6x_b10mi!Bvb z(xinc)$=}M?%#7iujl#a`Rn=P>E#bm-|PE5zvp=@pW}0U4rDhO%uB$C87}=vzgQL0 z*vk)}y`G95Z^gZj@2b>`q8pd!&RFT^_0-UWK$`FlbSIg=7o&;^D(-Wj2F3kgCq>o^ z;S+wdXWuN`nw&gjNQh9%>&YCaoUNsuBHk)ZnaZ(=-K)M5Qqw`PW9<|$F02dUHjOC+%FHu1FCIrIo#8XUMQIS4qP| zWo(FGEK+nIx+>k62i@7-`Th|)Z9;&|V|0MG-K%j$dIYtY3Bjv+LF10-KpL<4sdKrf zO;!Ic^=5(*F(nHC>z@!gUXTH^$Ib2_$+Vf)VAAw?awZ z<|WKUb742qiNs4}17sfbIW0@T`HQ|- z&U2`K|AcFBupiLbNF2~xwD^{88CZ<-8G>05Vf|mc?c(h;xGxNXZ#__?+~fdo%=QWv zlhXc{O0IDtrEvyR4Xj`4d`H_8m6BTP2a2V1KK1C2bUEzsQnqvo@80s!%{pZMoEw+7 z&W$cTU07UbyD+9yOFco$ZS?ZI(o3~#JEkrf5a@i@dbUzT`k9Sp@4_lp$JUyuhx$G9 zt9LpyK=V5l^}p@wc)xG>TKzMB8$Eq+zb9$+F9yGLJKoV{P-kE;c;~$vof3)Z0)^~e zgO{^qXn1k>01ZUt-2waFVZ4vt?Y_$@3HH zcUYI)>tF%?Sfr4>N0A7*`ITMH(=d-tO3pYOm5g9PZ9gC0^l5KF44 zE>g}?YyIH7_qt;{?dPlgm%(Q^IE(Dbr6e{0WbxU$2!rig_jeXd+vMRr$t5EF%D%|= ztf#3`({Odu-ZSA{dcW8|z6EhPIiA{*oHpccCVQ$C3JbZIg?^9<>F%;VdkoP73m5P* z&aaSltDuLmAT+oHqbK74@c14Klp(zXaM|Cx|&Gnbw=IX_5`~1Pn zUiEeP4#qj*f{73$24alE4WS`@(zw=m@nRWp6$x{c`p7ZyM#|fs^|B* zlRXO>m3{L5pwi8vp#}mXlr?u67+mngA^Kh5 zC#A?suo`;hIDCO)?q$+8jlAUHWujKF))Ys(NP<*Nec?i%=TB&6kyKlFG*phsppq{ z`n>vml)lfl1QWWqB=6+Zzt6R{f%_t!*JhSrH<}n~>*D2IEaAEgP$*7Jqxt5KK`bZ;^Kp zroRNc(8WxGL=tvjk>$u#o~gS`9(lQ4p5*86Z_ZpdkrnWwT_|^?S2RfTJ?~Da%)Q=h z$A7uzbb5BO!5_|iUs=8FE(m0P>ntH21zE_P*G*m+2M3E0^Xf2>Dl3*fiFCTQeGG3~ z7TU{HgYx%b61Pe`r$zZ;6050f>NObnB^3Lr z8yg$7a3^@)-Ivy2k11mdVx5JzTzYrW422#baQQRb|t(apQ6sD;kb!(9qXUQ3Y&H^X)8X z^4T8b#cEHLE@HP327 zn_Zvk)*h99DevO-FX{ou@!W{Q;KNCol_v>zG-l4MdVsBQUwlOCj`N-0{gfLy zb!}Un663)xUIos0yV<24|3#FYNLqiC`GAX>`}Xad2;gC*w3fa;=_!h}Gg@0QpziKx zVd|eki)ewApgEejK}?}52mf^p4`P!gVN0Bz)QC_jK(@Z?|{?|KFRa7jB5tolF zCkal*vO5zhFq_ai>->$7$Q%osyMOu9Ugoz69nMj0AEc;t`vLN7NdPTLY=T>iOj^kz(y44FClhxHyl+sSOc(apgYVyVb1=+C# z;?&%`^J&{p=86->k2eH(eh{GhWSdN^+QgRG-OaPJ(aa)*_~EtjkWTu>G}3o%ogd^y zu2gf^e8}GQOS>(fZx`5r(-B=Qt+S8X9D)vUm(V@&jfN<)%x~9mDP1THMl{oieEhti zDl6%2oky@q3Gop*5!x@g;#hF7ijh8UBRunl zEHcM3;=h+>p%mCh{-HW+q9~CAKI?T}t8Z*fqq~t#T!kGOS0N*h-N=3@MaVN1+{%p1 z&6G59T}pte*i;wF$hU(-LYzGOr!vsloc*Z%>-9cK z(ZNP$W>Vs;=m^*BJ94$q8E&=y6k3vzBUp#dg$Vn^kg3?ZVXPg&XWaU<%a+|8Io(OZ z0id?x{qD~A0}7ipI<{G@jP+A{U5D(Towek0`6c*bw_FR-@|xIG7&+XoJ4(xAeU={8 zSACst@t`=Q`8Jvxsgz{YAbknpwkZop~bge3;~GOoh|S zMRd8ScD{EAAApL8CEfpCF5-ZpjRoYKjL}m!FsN+m*8s5TcU1Sg@2)y0>>K5JVQx2` zPhD%6%P|V337i-48?t5jH<7`Dlv%3eRr%c7GfJPhvzRD%BG^$V~Cun8$sF?3#rIXiQfZtwW&95q6IS=`bw zj0&8m6OnS8kF8s%db|-2;10>N*#6kJ!rcORJ#vJTUgBQmQ8t0^Y%7+n(CaZ~QRQyU zN1jO%6Zpt!&<%dLgIY^DJmb(uWv-bK`0Hs%l#UI8i+`I|nx{Cl!sM0b@{xgg0{J43 z^2$9(9j9+EPJr36Q6)U{iFW}`KNmpb0SZeGZM~{I7D!j1I=sA_oKpCyuIA(2hp{%J zc+(k_bo&Qb9D3B?Ic?JugSZHpY=Q9owDbK4^E5->IG_~4i)q-GBuDGF#JV~VWlmFI zS9|YfGGo`Ub|xJzV%ME{ykQLt#9M6>8kPH zalP{An(Df`wG_u4YPX}Qy9i5&mWA8?>Je@=YXT?^YEgQg3RJF(hTs7P8RYB_2+-Dv zDEe4Erh9YCps&7~B3vc!ZX7-)Qr{z=v4*!6P8H$V^52W^>fJiph+%ZXJO~f#ATLro zuw#sW*4BaRna|}cOAJ{-uzRPlP>Z7@LY#4I!-=*RP7@RzjLh-5_sP%v-Sk!8XD>7E z&6sMFo`K4&qHba1sw_*1^g z9pMPkUC;^~Q!lrz6pmTA{006aem&;fwcYS3=2I_s$>PP@gtfYSRBieCG;OG;o(7gr z&bUZXaH*n_aD=Re9gqG%)XlNqs^4XIV65VP`*ovJW`)Mb$|R-P17j%;M zctFQ+qM=pw(ApOh8ZBbB_nB%1{T*$|;abj8;l`UZ9wFWorsXi~P<(S)(eq`$7PmjFu!b1`n6B{ch94{sf zX<^KE0>ZFMf@XKT$pFMW#>%R)$q$&FGzDP8qC_)7Am;YB{pL6skul+{;>qp#)g_jB zP1dre56%{`NktWC2@2aq%Hw7en~!vT0uF+$zbli{XP4_czGT6V1ZiaV3YOEu zld1n(jmWE@Fjy<~4tYkjX9>`vMW(kK_}+asaNFi>5n&f|5+235@<1xh*Lx>H1){Q{IG<{{x+w3*EvegLj4z_ zp_9kWZv~rCCt|UvOK){^1I!>aS4(mfV29?f0j`n@exR024&Hwwrfyp9y0$luy@AKI zDU5Z#^~%Z6H#>7|d+kWj31{@^Ee#L4*LWnc=IA80?wkGbntZy&8juQam?0rYkES3Y z>C_DYBAxs4T;*er!Caj#czU|meVC%F~fb1BxD%rG$%(-mY>RP0tP*sD=sH+;yD z?tb zZI&6gY-yO_4j+=e9M-})K&|Jxe7abW-Tf1WT|* zo8G@lqFzWxt?!=&4lOtt7b@;F-A90$yLcoku7``&PR%(10BogF%mN!c+*@=~TrxKN zQ|3RV9NjS2%dM6I9ioRkAL6xqgt)L|ILOAc^nS|Q=0FAbFM_2~h-^N9^p=K+2f+s7 z@|1TJ%13D16S{d&t*P>?*l28GlA#}1!$&8!w6rMNfOhj$I#6U~ej<25zE6%a6E1C& z$LOf*;xbg?=89^Zu4-wBlL@M(OpkK%2<~5S=gt}Zp4}AXw`H2KBm|2-$KHNtVBgX{ z!a!3q5>~=SDW;N8Mw^nT4|@@VVMdWic2In<#Nr>u3cHOB4~~l8NrOZw{=RZnBL{uv z*2hwyBw>O4f`ZC`02FY*9xCE@<_tZl_@%^37y(M97Iz+T8q=g(mNkn8*CRjv`kM%d zOS=%Xs~2`LSWkdfL_rp?fdkLue1bIGeHnn7T@z{(eDL52GC+U^x1AEKu|%Rh+PAgW z<|GvDl70x51Al{1#IV!eevreGXQsNm^dXy!XeisjVbLOg;;d6NdKubK8%MDq83UVT zFe@>aM-e+L+v6^;VUf^t06$eT!jnktcQj}VTOhnVD`R)8IwkW-G2D#Qt5@e#xtAofX)C-I4X2xN=6(J=tET)XLa!=v3w;25hrf zOpnvrwN}NJC(=7xN|p*YaBSzhJ4tO!!<ej*BVgmHhVQ00P=xKD___g9m##?=(NkCUQnhNlpnP zp_5vpEXHJb!_%A`v)aYLg)EWw>f7H?b-3f2X(DC&pMw|@mbVFfm2TP5Pm$!Ck*Y9R zP3=A^TCI1+9~Z_AU!Zx<%2JddL|9GWo_%u`Fw~J5Ua5lOPdsvQ(kSt0QQoIVdf7|A zjCxpb?_QaJrp5h`K8rb)c%e9W0`Y9B?45t6142<5))A0!=xTZiPO&fTUrg2D!oqM; z34d8^z>~_j6Q!pR#tTM7lg9oG8g|KM!2Wo}oQ|qSE!y>}sO6Od8!aD_Wxz%g4zDPD znjzO&Vp6pbhkjhZ`bCj?B{7#c`X`WIYfy{o4lP=4>M2%-=kpH}48JnyWx}UFIm&sVlL_x;BAAH4IVACZX^9?sSYSK zSH>V{1wwQ2WkZAAn`+)3@q(9gAbc{tp;@Xdd=jRG&d+q=MxC6Tvb6dow+d5AdMG_5 z7;+(2_6hf=1GyeUt}y{8k?)#0yu(d&i-`ePsW(I1Yy93y{VIz?e0+%$Ts5}Vy?pr! zkT+z?g4_|pYQDi81C-C!_ zSzVy#*;-w+(0%XV@O0G}4K@R`EE%_9yuxebTQ67~^rseYsX0^SS_Vo0XTE%Ekc`tg zHD%vXsr1FW?8lVtRA<#zS4U(1)>AUpELx@v*`k;TcXVD~M`38polUOwtgg$~p_;vZ zNg$xM^KCCV?lq}dtQnCzK_`;lD>1tmemb;0Zfp@o6whQRJ z1hSw-&Mu#@+W+bp;V57m!hXGm+3bPr+9T`FB809D2yg4Ctna>%Lo7qK-MDksmeybD1G>4d#oAtL))zT6(Dd)QyJNswJWlyXQMMm1X9IRNAac50N0N9Qgyzk4U1u3*#30ph0O?Lh)g-xOi@A2$Yvc`whxoDaJw%wd2_94Zo$xSyRxWpufE4G}S zU$gRD2}gx06-D1wzOH228Zq|;QJ9(b-QLW6iRQDj4LnzjaBR8x_oc5OF{wWJ=Amn* z1_Mc`?`$?1C+>4jE2XvF6_^AKL_g90V4cc6FWONBI{EdOS#M(9yS0%!0dLA{x(r|- zq{>kK3Crz#lX1__w!y~Q`VtY62i7laljm3Oao-Pq9z4^0)t2-BnzCT&%vSS*5eGx} zG7V7>g9TC_sL?$D0L7U%SfkvWN!C%_GFJf8Pj)shrlI6!&6xtxA9%}tNVm;i-IPvF zWTmucQB5z4;KY-YEFuBSM+&SoBdh+~R3~sY==ij~@_?!B)F$@yQ{Jr5`Ns1ib7SPi zhe`QpbvPbbE&vJ2uOe;(@-Nmdv#l;3eL5EdXGs>6?@iie{j9ET zJRXV!RqoK;9%hp+!dXe8E&*F*>L0%c3Ttxr*vuq2mG)%4%MdtG8Jcy?76JtNo5{dK z_Zn7=Atf-JGL><>&?DWPFX1>1pI25?*zoLN$foE#6gb{k$uQyLbQes;448@1wR_Fa z{!cY~kF|Ro=haXiLk-`2+m#C!rl-)q2V5BFqyEDWKa)ibRbHOGb-veg-<1FxmzjN~ zs(EgCIKf^Evj>KC?7<|vC5Jtwpdn_(BR`-I97k8UO%0JNU$Wyx7dpN*lBn&$@<0wt zgYu;3XHB_DIFwTwR@QN`vDqF~a(3+Aefwann27pfE9z|*w4QjB*~JNc3d{&iO!p1P z_`Ezn>f^P`!UG67PH

Zt?b5`pDE5=O1qn&Km&7?)i4+zDI!}5~w&*;Sp82x5$+A z4?^}l7GrjMO@=ej(#mXi=e>FAL6{dgyyVRaj|cd+b#`{jh+~A@4pxCG+ButdJ&$8B zV6Ur0w#3u(Jj_D$DpiExW~-(Jqa9;JRBf}@^<|yk?9SIqS5g!fy5k;{`L0)^ZEiDo z)`uwW6FU0z0I;bT4JUTC&GWkJ%r8nm>)q-iHXHW7grd(&jyOwV=T@XG@(>GvHSLA`7 zD>X!sO0xTf$e0>vnPY(JUHJ%^^1e;?&XP6Ee0Tc?O8ae(a(Z@R&2vW&iA3!VkLq`J zq+<(H?+h`omn*ii#o@(?N}pyx2Fz@TwU-}hM-(_>8by+5r`Y=;IF_{QqAz_+!iR}s zG+@ok;zu|;fO18YcTB1;k!AsAMzV1A#nYV-;7J9F$t(Z(=4sE3`bFIz6jd(X;F*TZ z%B=??4%x2Azb5;_!>D4pJuq^7@Rnav92y4$%~{0+*o>S#hO=_TsbKGEU+NnYiTd?( ziGw7i-=+@B>b3Oxkw@a+Bujs;>B&0y>$3;e*?o4$h&-0&%KU^$Y`@}=nxA&9Z*O@m zugFZ}Os1~t?)mYnGliWlB}((asS|(epKD~WoNkr-q2)F^cV1!!VAaUMo4@Ml@!*oT zbFr2+<(Zq(5a?-6>?9(t`d8n0+fPp>F`xPHQ|`lEY}4YzfBp)}EmoCGq(yw?YUh0~ z9{$+ZV94hSS_Bb;nqcW^_=!q6^j0k@yrJd3L+Ei1u zAqO_<=Cg?6R$l){&kvgq}zXh)X_){&q*)?g6H4q z`K8T+_@#K2+!3_G#79M4m8*UF;B$UKvR;y?SQ_^p;xrpT4d}W7ti9 zptI!WmT5DA4=C@fhh8cmm*@t7TL>qKRe!$Y?1_ z$h~{ZfMdy%n-ZIe#HK436cx!B-UhI<5Wf%)u|tubWlEuB5%wOF`@38w`nxqVjS_2r zdSJr+GD1E>uc*ftLotyEz95|2&&0q)Ue1CQv4S8 zQ?c0DU6h}1#tEYQ981I?aDsMnK5@)ulKi<=_9Xd|{J@`dcPqX|E`RcQpZ|0D?y!n8 zJWJ^*UZ7qp8__w-uM{I*&V&mQ2LOss57iJ*I8VExcFXQ}@vpjUXdxF*y<{V|ZVFz( z+*z~K!P|QlX!CH6r^~6USFeuUND9yNM6*wYvB-y|I2?gMswgp8j{noA94<;zK+9tt zMF?ED0@{5Fd|_)bbqy>V&?b)R0SU>47o+|bd_ANybF*EC^q3n4Cx1YH@qu$xPhj;P zGV0ZeWCw>q-F3XM6Xr{Z@1?dP(8F2LTS7RQsc3X{)k{` zL*>t}oHs{mTke~Yp6<7C^2E(ybw4ZjJXPghR&%AQj*cZ9C{}6t!93D|SdyS6{Dbrq zTtlm3->}_k?ktl?M+@DMO;G0qNri9XO|Rn}kY@_-r=C4wPJ()N_nNh9HB?n^C2VB> z(!~4^ipFr*4&@K-3CeaKz^=qwfMF!!DA!Z06Rs^JBqWAiiEG~B-Egi^YWP5mW#i^_R8NCUKy^zC)<4Ixafy~XwC$t$eL^s-8v45ypzFdm^@je~`r^Rk?B}w%8_kb(dXXBkrp87_ z%Xs(V(1`e0F zD5W7xMSef_!pZ+I>^;9Hkr*gqgFk)kHjKVM==0zIScf(E{C8!6dVKooF}t7tzD;XP zQg2Co)qi+RDK$TfQ{Cj2GRQ2Mt9#qt>gu`X?j0hPO#n=jLu{eh!^4H)t7)#;ugArm zNAePkI(wS(vv1or^<2L4zxd38bDN&pQ_ioo=wQ^z#qS51W_=Dx;=QR4e7=wWyj8$i zYfWwKbGM7XzLoOpz-?ncUocUb=L~g0EfbMEZLkMJ8@3X4MKn*Jm+Fok`xD`=>yp2j zkN(Rk3dbD-Bz1J`M&en7T1&{oCoSw_uKe$N)c)VEk)Tt!)T6g!X&4`hK(ez(AM-ET z8eg3G9F6?Ok#nih#wm8R$6rpV99l`M8q9+XKq_yQG1nSsKpy^0aT*pilP| zWx(P2{Nx6Tx~TVk`udOH;eYYr|Nim+`&a!xMJE6M;(vvW|4*%X^Fgg5`8S_mwZy!% aySGnq@AkU~imk-I8O>iZFZHKie)}J;`?2Bx literal 0 HcmV?d00001 diff --git a/code/docker-compose.yaml b/code/docker-compose.yaml new file mode 100644 index 000000000..f6b2bd443 --- /dev/null +++ b/code/docker-compose.yaml @@ -0,0 +1,962 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + +x-shared-env: &shared-api-worker-env + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} + SERVICE_API_URL: ${SERVICE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + APP_WEB_URL: ${APP_WEB_URL:-} + FILES_URL: ${FILES_URL:-} + LOG_LEVEL: ${LOG_LEVEL:-INFO} + LOG_FILE: ${LOG_FILE:-/app/logs/server.log} + LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} + LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} + LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} + LOG_TZ: ${LOG_TZ:-UTC} + DEBUG: ${DEBUG:-false} + FLASK_DEBUG: ${FLASK_DEBUG:-false} + SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} + INIT_PASSWORD: ${INIT_PASSWORD:-} + DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} + CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} + OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} + MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} + FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} + ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} + REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} + APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} + APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} + DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} + DIFY_PORT: ${DIFY_PORT:-5001} + SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} + SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} + SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} + CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} + GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} + CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-} + CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} + CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} + CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} + API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} + API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} + DB_USERNAME: ${DB_USERNAME:-postgres} + DB_PASSWORD: ${DB_PASSWORD:-difyai123456} + DB_HOST: ${DB_HOST:-db} + DB_PORT: ${DB_PORT:-5432} + DB_DATABASE: ${DB_DATABASE:-dify} + SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} + SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} + SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} + POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} + POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} + POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} + POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} + REDIS_HOST: ${REDIS_HOST:-redis} + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_USERNAME: ${REDIS_USERNAME:-} + REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} + REDIS_USE_SSL: ${REDIS_USE_SSL:-false} + REDIS_DB: ${REDIS_DB:-0} + REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} + REDIS_SENTINELS: ${REDIS_SENTINELS:-} + REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} + REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} + REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} + REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} + REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} + REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} + REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} + CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} + BROKER_USE_SSL: ${BROKER_USE_SSL:-false} + CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} + CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} + CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} + WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} + CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} + STORAGE_TYPE: ${STORAGE_TYPE:-opendal} + OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} + OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + S3_ENDPOINT: ${S3_ENDPOINT:-} + S3_REGION: ${S3_REGION:-us-east-1} + S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} + S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} + S3_SECRET_KEY: ${S3_SECRET_KEY:-} + S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} + AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} + AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} + AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} + GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} + GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} + ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} + ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} + ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} + ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} + ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} + ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} + ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} + TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} + TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} + TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} + TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} + TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com} + OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} + OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} + OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} + OCI_REGION: ${OCI_REGION:-us-ashburn-1} + HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} + HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} + HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} + HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} + VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} + VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} + VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} + VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} + VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} + BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} + BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} + BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} + BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} + SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} + SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} + SUPABASE_URL: ${SUPABASE_URL:-your-server-url} + VECTOR_STORE: ${VECTOR_STORE:-weaviate} + WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} + WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} + QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} + QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} + MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530} + MILVUS_TOKEN: ${MILVUS_TOKEN:-} + MILVUS_USER: ${MILVUS_USER:-root} + MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus} + MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} + MYSCALE_HOST: ${MYSCALE_HOST:-myscale} + MYSCALE_PORT: ${MYSCALE_PORT:-8123} + MYSCALE_USER: ${MYSCALE_USER:-default} + MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} + MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} + MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} + COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} + COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} + COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} + COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} + COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} + PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} + PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} + PGVECTOR_USER: ${PGVECTOR_USER:-postgres} + PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} + PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} + PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} + PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} + PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} + PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} + PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} + PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} + PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} + ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} + ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} + ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} + ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} + ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} + ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} + ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} + ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} + ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} + ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} + ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} + ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} + TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} + TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} + TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} + TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} + TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} + TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} + TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} + TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} + TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} + TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} + TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} + TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} + TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} + TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} + TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} + TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} + TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} + CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} + CHROMA_PORT: ${CHROMA_PORT:-8000} + CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} + CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} + CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} + CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} + ORACLE_HOST: ${ORACLE_HOST:-oracle} + ORACLE_PORT: ${ORACLE_PORT:-1521} + ORACLE_USER: ${ORACLE_USER:-dify} + ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} + ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1} + RELYT_HOST: ${RELYT_HOST:-db} + RELYT_PORT: ${RELYT_PORT:-5432} + RELYT_USER: ${RELYT_USER:-postgres} + RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} + RELYT_DATABASE: ${RELYT_DATABASE:-postgres} + OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} + OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} + OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} + OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} + OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} + TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} + TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} + TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} + TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} + TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} + TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} + TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} + ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} + ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} + ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} + ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + KIBANA_PORT: ${KIBANA_PORT:-5601} + BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} + BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} + BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} + BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} + BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} + BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} + BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} + VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} + VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} + VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} + VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} + VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} + VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} + VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} + LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} + LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} + LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm} + OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} + OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} + OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} + OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} + OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} + UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} + UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} + UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} + ETL_TYPE: ${ETL_TYPE:-dify} + UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} + UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} + SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} + PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} + CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} + MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} + UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} + UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} + UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} + SENTRY_DSN: ${SENTRY_DSN:-} + API_SENTRY_DSN: ${API_SENTRY_DSN:-} + API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} + NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} + NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} + NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} + MAIL_TYPE: ${MAIL_TYPE:-resend} + MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} + RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} + RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} + SMTP_SERVER: ${SMTP_SERVER:-} + SMTP_PORT: ${SMTP_PORT:-465} + SMTP_USERNAME: ${SMTP_USERNAME:-} + SMTP_PASSWORD: ${SMTP_PASSWORD:-} + SMTP_USE_TLS: ${SMTP_USE_TLS:-true} + SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} + INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} + RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} + CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} + CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} + CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} + CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} + CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} + CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} + CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000} + CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} + CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} + CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} + CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} + CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} + WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} + WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} + WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} + MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} + WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} + WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} + HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} + HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} + SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} + SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + PGUSER: ${PGUSER:-${DB_USERNAME}} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} + POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} + SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} + PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} + PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} + OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} + OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} + OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} + OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_PORT: ${NGINX_PORT:-80} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} + CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} + SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} + SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} + SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} + SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} + EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} + EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} + POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} + POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} + POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} + POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} + POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} + POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} + CSP_WHITELIST: ${CSP_WHITELIST:-} + CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} + MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + +services: + # API service + api: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:0.15.3 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ./volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: + [ + 'sh', + '-c', + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i + 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && + /docker-entrypoint.sh" + ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + - ./volumes/certbot/logs:/var/log/letsencrypt + - ./volumes/certbot/conf/live:/etc/letsencrypt/live + - ./certbot/update-cert.template.txt:/update-cert.template.txt + - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ./nginx/https.conf.template:/etc/nginx/https.conf.template + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ./nginx/ssl:/etc/ssl # cert dir (legacy) + - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + entrypoint: + [ + 'sh', + '-c', + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i + 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && + /docker-entrypoint.sh" + ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ./volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ./volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: + [ + "CMD-SHELL", + "curl -s -f -u Administrator:password + http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit + 1" + ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ./volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215 + profiles: + - oceanbase + restart: always + volumes: + - ./volumes/oceanbase/data:/root/ob + - ./volumes/oceanbase/conf:/root/.obd/cluster + - ./volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OB_SERVER_IP: '127.0.0.1' + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ./startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ./volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls + http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ./volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ./volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + networks: + - milvus + + # Opensearch vector database + opensearch: + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} + -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ./volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # MyScale vector database + myscale: + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ./volumes/myscale/data:/var/lib/clickhouse + - ./volumes/myscale/log:/var/log/clickhouse-server + - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ./volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/code/elasticsearch/docker-entrypoint.sh b/code/elasticsearch/docker-entrypoint.sh new file mode 100755 index 000000000..6669aec5a --- /dev/null +++ b/code/elasticsearch/docker-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then + # Check if the ICU tokenizer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then + printf '%s\n' "Installing the ICU tokenizer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then + printf '%s\n' "Failed to install the ICU tokenizer plugin" + exit 1 + fi + fi + # Check if the Japanese language analyzer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then + printf '%s\n' "Installing the Japanese language analyzer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then + printf '%s\n' "Failed to install the Japanese language analyzer plugin" + exit 1 + fi + fi +fi + +# Run the original entrypoint script +exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh diff --git a/code/generate_docker_compose b/code/generate_docker_compose new file mode 100755 index 000000000..b5c0acefb --- /dev/null +++ b/code/generate_docker_compose @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +import os +import re +import sys + + +def parse_env_example(file_path): + """ + Parses the .env.example file and returns a dictionary with variable names as keys and default values as values. + """ + env_vars = {} + with open(file_path, "r") as f: + for line_number, line in enumerate(f, 1): + line = line.strip() + # Ignore empty lines and comments + if not line or line.startswith("#"): + continue + # Use regex to parse KEY=VALUE + match = re.match(r"^([^=]+)=(.*)$", line) + if match: + key = match.group(1).strip() + value = match.group(2).strip() + # Remove possible quotes around the value + if (value.startswith('"') and value.endswith('"')) or ( + value.startswith("'") and value.endswith("'") + ): + value = value[1:-1] + env_vars[key] = value + else: + print(f"Warning: Unable to parse line {line_number}: {line}") + return env_vars + + +def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"): + """ + Generates a shared environment variables block as a YAML string. + """ + lines = [f"x-shared-env: &{anchor_name}"] + for key, default in env_vars.items(): + if key == "COMPOSE_PROFILES": + continue + # If default value is empty, use ${KEY:-} + if default == "": + lines.append(f" {key}: ${{{key}:-}}") + else: + # If default value contains special characters, wrap it in quotes + if re.search(r"[:\s]", default): + default = f"{default}" + lines.append(f" {key}: ${{{key}:-{default}}}") + return "\n".join(lines) + + +def insert_shared_env(template_path, output_path, shared_env_block, header_comments): + """ + Inserts the shared environment variables block and header comments into the template file, + removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file. + """ + with open(template_path, "r") as f: + template_content = f.read() + + # Remove existing x-shared-env: &shared-api-worker-env lines + template_content = re.sub( + r"^x-shared-env: &shared-api-worker-env\s*\n?", + "", + template_content, + flags=re.MULTILINE, + ) + + # Prepare the final content with header comments and shared env block + final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}" + + with open(output_path, "w") as f: + f.write(final_content) + print(f"Generated {output_path}") + + +def main(): + env_example_path = ".env.example" + template_path = "docker-compose-template.yaml" + output_path = "docker-compose.yaml" + anchor_name = "shared-api-worker-env" # Can be modified as needed + + # Define header comments to be added at the top of docker-compose.yaml + header_comments = ( + "# ==================================================================\n" + "# WARNING: This file is auto-generated by generate_docker_compose\n" + "# Do not modify this file directly. Instead, update the .env.example\n" + "# or docker-compose-template.yaml and regenerate this file.\n" + "# ==================================================================\n" + ) + + # Check if required files exist + for path in [env_example_path, template_path]: + if not os.path.isfile(path): + print(f"Error: File {path} does not exist.") + sys.exit(1) + + # Parse .env.example file + env_vars = parse_env_example(env_example_path) + + if not env_vars: + print("Warning: No environment variables found in .env.example.") + + # Generate shared environment variables block + shared_env_block = generate_shared_env_block(env_vars, anchor_name) + + # Insert shared environment variables block and header comments into the template + insert_shared_env(template_path, output_path, shared_env_block, header_comments) + + +if __name__ == "__main__": + main() diff --git a/code/middleware.env.example b/code/middleware.env.example new file mode 100644 index 000000000..c4ce9f011 --- /dev/null +++ b/code/middleware.env.example @@ -0,0 +1,89 @@ +# ------------------------------ +# Environment Variables for db Service +# ------------------------------ +PGUSER=postgres +# The password for the default postgres user. +POSTGRES_PASSWORD=difyai123456 +# The name of the default postgres database. +POSTGRES_DB=dify +# postgres data directory +PGDATA=/var/lib/postgresql/data/pgdata +PGDATA_HOST_VOLUME=./volumes/db/data + +# Maximum number of connections to the database +# Default is 100 +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +POSTGRES_MAX_CONNECTIONS=100 + +# Sets the amount of shared memory used for postgres's shared buffers. +# Default is 128MB +# Recommended value: 25% of available memory +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS +POSTGRES_SHARED_BUFFERS=128MB + +# Sets the amount of memory used by each database worker for working space. +# Default is 4MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM +POSTGRES_WORK_MEM=4MB + +# Sets the amount of memory reserved for maintenance activities. +# Default is 64MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM +POSTGRES_MAINTENANCE_WORK_MEM=64MB + +# Sets the planner's assumption about the effective cache size. +# Default is 4096MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB + +# ----------------------------- +# Environment Variables for redis Service +# ----------------------------- +REDIS_HOST_VOLUME=./volumes/redis/data +REDIS_PASSWORD=difyai123456 + +# ------------------------------ +# Environment Variables for sandbox Service +# ------------------------------ +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 + +# ------------------------------ +# Environment Variables for ssrf_proxy Service +# ------------------------------ +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox + +# ------------------------------ +# Environment Variables for weaviate Service +# ------------------------------ +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai +WEAVIATE_HOST_VOLUME=./volumes/weaviate + +# ------------------------------ +# Docker Compose Service Expose Host Port Configurations +# ------------------------------ +EXPOSE_POSTGRES_PORT=5432 +EXPOSE_REDIS_PORT=6379 +EXPOSE_SANDBOX_PORT=8194 +EXPOSE_SSRF_PROXY_PORT=3128 +EXPOSE_WEAVIATE_PORT=8080 diff --git a/code/nginx/conf.d/default.conf.template b/code/nginx/conf.d/default.conf.template new file mode 100644 index 000000000..9691122ce --- /dev/null +++ b/code/nginx/conf.d/default.conf.template @@ -0,0 +1,37 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +server { + listen ${NGINX_PORT}; + server_name ${NGINX_SERVER_NAME}; + + location /console/api { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /api { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /v1 { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /files { + proxy_pass http://api:5001; + include proxy.conf; + } + + location / { + proxy_pass http://web:3000; + include proxy.conf; + } + + # placeholder for acme challenge location + ${ACME_CHALLENGE_LOCATION} + + # placeholder for https config defined in https.conf.template + ${HTTPS_CONFIG} +} diff --git a/code/nginx/docker-entrypoint.sh b/code/nginx/docker-entrypoint.sh new file mode 100755 index 000000000..d343cb3ef --- /dev/null +++ b/code/nginx/docker-entrypoint.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then + # Check if the certificate and key files for the specified domain exist + if [ -n "${CERTBOT_DOMAIN}" ] && \ + [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \ + [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then + SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" + SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" + else + SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}" + SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}" + fi + export SSL_CERTIFICATE_PATH + export SSL_CERTIFICATE_KEY_PATH + + # set the HTTPS_CONFIG environment variable to the content of the https.conf.template + HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template) + export HTTPS_CONFIG + # Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template + envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf +fi + +if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then + ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }' +else + ACME_CHALLENGE_LOCATION='' +fi +export ACME_CHALLENGE_LOCATION + +env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -) + +envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf +envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf + +envsubst < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf + +# Start Nginx using the default entrypoint +exec nginx -g 'daemon off;' \ No newline at end of file diff --git a/code/nginx/https.conf.template b/code/nginx/https.conf.template new file mode 100644 index 000000000..95ea36f46 --- /dev/null +++ b/code/nginx/https.conf.template @@ -0,0 +1,9 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +listen ${NGINX_SSL_PORT} ssl; +ssl_certificate ${SSL_CERTIFICATE_PATH}; +ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH}; +ssl_protocols ${NGINX_SSL_PROTOCOLS}; +ssl_prefer_server_ciphers on; +ssl_session_cache shared:SSL:10m; +ssl_session_timeout 10m; \ No newline at end of file diff --git a/code/nginx/nginx.conf.template b/code/nginx/nginx.conf.template new file mode 100644 index 000000000..32a571653 --- /dev/null +++ b/code/nginx/nginx.conf.template @@ -0,0 +1,34 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +user nginx; +worker_processes ${NGINX_WORKER_PROCESSES}; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT}; + + #gzip on; + client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE}; + + include /etc/nginx/conf.d/*.conf; +} \ No newline at end of file diff --git a/code/nginx/proxy.conf.template b/code/nginx/proxy.conf.template new file mode 100644 index 000000000..117f80614 --- /dev/null +++ b/code/nginx/proxy.conf.template @@ -0,0 +1,11 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +proxy_set_header Host $host; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-Forwarded-Port $server_port; +proxy_http_version 1.1; +proxy_set_header Connection ""; +proxy_buffering off; +proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT}; +proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT}; diff --git a/code/nginx/ssl/.gitkeep b/code/nginx/ssl/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/code/ssrf_proxy/docker-entrypoint.sh b/code/ssrf_proxy/docker-entrypoint.sh new file mode 100755 index 000000000..613897bb7 --- /dev/null +++ b/code/ssrf_proxy/docker-entrypoint.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Modified based on Squid OCI image entrypoint + +# This entrypoint aims to forward the squid logs to stdout to assist users of +# common container related tooling (e.g., kubernetes, docker-compose, etc) to +# access the service logs. + +# Moreover, it invokes the squid binary, leaving all the desired parameters to +# be provided by the "command" passed to the spawned container. If no command +# is provided by the user, the default behavior (as per the CMD statement in +# the Dockerfile) will be to use Ubuntu's default configuration [1] and run +# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided +# systemd unit. + +# [1] The default configuration is changed in the Dockerfile to allow local +# network connections. See the Dockerfile for further information. + +echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process" +if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then + /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1 +fi + +tail -F /var/log/squid/access.log 2>/dev/null & +tail -F /var/log/squid/error.log 2>/dev/null & +tail -F /var/log/squid/store.log 2>/dev/null & +tail -F /var/log/squid/cache.log 2>/dev/null & + +# Replace environment variables in the template and output to the squid.conf +echo "[ENTRYPOINT] replacing environment variables in the template" +awk '{ + while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) { + var = substr($0, RSTART+2, RLENGTH-3) + val = ENVIRON[var] + $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH) + } + print +}' /etc/squid/squid.conf.template > /etc/squid/squid.conf + +/usr/sbin/squid -Nz +echo "[ENTRYPOINT] starting squid" +/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1 diff --git a/code/ssrf_proxy/squid.conf.template b/code/ssrf_proxy/squid.conf.template new file mode 100644 index 000000000..676fe7379 --- /dev/null +++ b/code/ssrf_proxy/squid.conf.template @@ -0,0 +1,51 @@ +acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN) +acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) +acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) +acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines +acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) +acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines +acl SSL_ports port 443 +# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +include /etc/squid/conf.d/*.conf +http_access deny all + +################################## Proxy Server ################################ +http_port ${HTTP_PORT} +coredump_dir ${COREDUMP_DIR} +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims +refresh_pattern \/InRelease$ 0 0% 0 refresh-ims +refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern . 0 20% 4320 + + +# cache_dir ufs /var/spool/squid 100 16 256 +# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks +# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default + +################################## Reverse Proxy To Sandbox ################################ +http_port ${REVERSE_PROXY_PORT} accel vhost +cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver +acl src_all src all +http_access allow src_all diff --git a/code/startupscripts/init.sh b/code/startupscripts/init.sh new file mode 100755 index 000000000..c6e6e1966 --- /dev/null +++ b/code/startupscripts/init.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +DB_INITIALIZED="/opt/oracle/oradata/dbinit" +#[ -f ${DB_INITIALIZED} ] && exit +#touch ${DB_INITIALIZED} +if [ -f ${DB_INITIALIZED} ]; then + echo 'File exists. Standards for have been Init' + exit +else + echo 'File does not exist. Standards for first time Start up this DB' + "$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script"; + touch ${DB_INITIALIZED} +fi diff --git a/code/startupscripts/init_user.script b/code/startupscripts/init_user.script new file mode 100755 index 000000000..7aa7c2804 --- /dev/null +++ b/code/startupscripts/init_user.script @@ -0,0 +1,10 @@ +show pdbs; +ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE; +alter session set container= freepdb1; +create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users; +grant DB_DEVELOPER_ROLE to dify; + +BEGIN +CTX_DDL.CREATE_PREFERENCE('my_chinese_vgram_lexer','CHINESE_VGRAM_LEXER'); +END; +/ diff --git a/code/tidb/config/pd.toml b/code/tidb/config/pd.toml new file mode 100644 index 000000000..042b251e4 --- /dev/null +++ b/code/tidb/config/pd.toml @@ -0,0 +1,4 @@ +# PD Configuration File reference: +# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file +[replication] +max-replicas = 1 \ No newline at end of file diff --git a/code/tidb/config/tiflash-learner.toml b/code/tidb/config/tiflash-learner.toml new file mode 100644 index 000000000..5098829aa --- /dev/null +++ b/code/tidb/config/tiflash-learner.toml @@ -0,0 +1,13 @@ +# TiFlash tiflash-learner.toml Configuration File reference: +# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file + +log-file = "/logs/tiflash_tikv.log" + +[server] +engine-addr = "tiflash:4030" +addr = "0.0.0.0:20280" +advertise-addr = "tiflash:20280" +status-addr = "tiflash:20292" + +[storage] +data-dir = "/data/flash" diff --git a/code/tidb/config/tiflash.toml b/code/tidb/config/tiflash.toml new file mode 100644 index 000000000..30ac13efc --- /dev/null +++ b/code/tidb/config/tiflash.toml @@ -0,0 +1,19 @@ +# TiFlash tiflash.toml Configuration File reference: +# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file + +listen_host = "0.0.0.0" +path = "/data" + +[flash] +tidb_status_addr = "tidb:10080" +service_addr = "tiflash:4030" + +[flash.proxy] +config = "/tiflash-learner.toml" + +[logger] +errorlog = "/logs/tiflash_error.log" +log = "/logs/tiflash.log" + +[raft] +pd_addr = "pd0:2379" diff --git a/code/tidb/docker-compose.yaml b/code/tidb/docker-compose.yaml new file mode 100644 index 000000000..fa1577017 --- /dev/null +++ b/code/tidb/docker-compose.yaml @@ -0,0 +1,62 @@ +services: + pd0: + image: pingcap/pd:v8.5.1 + # ports: + # - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd + - --config=/pd.toml + - --log-file=/logs/pd.log + restart: on-failure + tikv: + image: pingcap/tikv:v8.5.1 + volumes: + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv:20160 + - --status-addr=tikv:20180 + - --data-dir=/data/tikv + - --pd=pd0:2379 + - --log-file=/logs/tikv.log + depends_on: + - "pd0" + restart: on-failure + tidb: + image: pingcap/tidb:v8.5.1 + # ports: + # - "4000:4000" + volumes: + - ./volumes/logs:/logs + command: + - --advertise-address=tidb + - --store=tikv + - --path=pd0:2379 + - --log-file=/logs/tidb.log + depends_on: + - "tikv" + restart: on-failure + tiflash: + image: pingcap/tiflash:v8.5.1 + volumes: + - ./config/tiflash.toml:/tiflash.toml:ro + - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --config=/tiflash.toml + depends_on: + - "tikv" + - "tidb" + restart: on-failure diff --git a/code/volumes/myscale/config/users.d/custom_users_config.xml b/code/volumes/myscale/config/users.d/custom_users_config.xml new file mode 100644 index 000000000..67f24b69e --- /dev/null +++ b/code/volumes/myscale/config/users.d/custom_users_config.xml @@ -0,0 +1,17 @@ + + + + + + ::1 + 127.0.0.1 + 10.0.0.0/8 + 172.16.0.0/12 + 192.168.0.0/16 + + default + default + 1 + + + \ No newline at end of file diff --git a/code/volumes/oceanbase/init.d/vec_memory.sql b/code/volumes/oceanbase/init.d/vec_memory.sql new file mode 100644 index 000000000..f4c283fdf --- /dev/null +++ b/code/volumes/oceanbase/init.d/vec_memory.sql @@ -0,0 +1 @@ +ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30; \ No newline at end of file diff --git a/code/volumes/opensearch/opensearch_dashboards.yml b/code/volumes/opensearch/opensearch_dashboards.yml new file mode 100644 index 000000000..f50d63bbb --- /dev/null +++ b/code/volumes/opensearch/opensearch_dashboards.yml @@ -0,0 +1,222 @@ +--- +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Description: +# Default configuration for OpenSearch Dashboards + +# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use. +# server.port: 5601 + +# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values. +# The default is 'localhost', which usually means remote machines will not be able to connect. +# To allow connections from remote users, set this parameter to a non-loopback address. +# server.host: "localhost" + +# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy. +# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath +# from requests it receives, and to prevent a deprecation warning at startup. +# This setting cannot end in a slash. +# server.basePath: "" + +# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with +# `server.basePath` or require that they are rewritten by your reverse proxy. +# server.rewriteBasePath: false + +# The maximum payload size in bytes for incoming server requests. +# server.maxPayloadBytes: 1048576 + +# The OpenSearch Dashboards server's name. This is used for display purposes. +# server.name: "your-hostname" + +# The URLs of the OpenSearch instances to use for all your queries. +# opensearch.hosts: ["http://localhost:9200"] + +# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and +# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist. +# opensearchDashboards.index: ".opensearch_dashboards" + +# The default application to load. +# opensearchDashboards.defaultAppId: "home" + +# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck. +# This settings should be used for large clusters or for clusters with ingest heavy nodes. +# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes. +# +# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting +# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up +# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id: +# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here +# opensearch.optimizedHealthcheckId: "cluster_id" + +# If your OpenSearch is protected with basic authentication, these settings provide +# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards +# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which +# is proxied through the OpenSearch Dashboards server. +# opensearch.username: "opensearch_dashboards_system" +# opensearch.password: "pass" + +# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively. +# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser. +# server.ssl.enabled: false +# server.ssl.certificate: /path/to/your/server.crt +# server.ssl.key: /path/to/your/server.key + +# Optional settings that provide the paths to the PEM-format SSL certificate and key files. +# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when +# xpack.security.http.ssl.client_authentication in OpenSearch is set to required. +# opensearch.ssl.certificate: /path/to/your/client.crt +# opensearch.ssl.key: /path/to/your/client.key + +# Optional setting that enables you to specify a path to the PEM file for the certificate +# authority for your OpenSearch instance. +# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ] + +# To disregard the validity of SSL certificates, change this setting's value to 'none'. +# opensearch.ssl.verificationMode: full + +# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of +# the opensearch.requestTimeout setting. +# opensearch.pingTimeout: 1500 + +# Time in milliseconds to wait for responses from the back end or OpenSearch. This value +# must be a positive integer. +# opensearch.requestTimeout: 30000 + +# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side +# headers, set this value to [] (an empty list). +# opensearch.requestHeadersWhitelist: [ authorization ] + +# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten +# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration. +# opensearch.customHeaders: {} + +# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable. +# opensearch.shardTimeout: 30000 + +# Logs queries sent to OpenSearch. Requires logging.verbose set to true. +# opensearch.logQueries: false + +# Specifies the path where OpenSearch Dashboards creates the process ID file. +# pid.file: /var/run/opensearchDashboards.pid + +# Enables you to specify a file where OpenSearch Dashboards stores log output. +# logging.dest: stdout + +# Set the value of this setting to true to suppress all logging output. +# logging.silent: false + +# Set the value of this setting to true to suppress all logging output other than error messages. +# logging.quiet: false + +# Set the value of this setting to true to log all events, including system usage information +# and all requests. +# logging.verbose: false + +# Set the interval in milliseconds to sample system and process performance +# metrics. Minimum is 100ms. Defaults to 5000. +# ops.interval: 5000 + +# Specifies locale to be used for all localizable strings, dates and number formats. +# Supported languages are the following: English - en , by default , Chinese - zh-CN . +# i18n.locale: "en" + +# Set the allowlist to check input graphite Url. Allowlist is the default check list. +# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite'] + +# Set the blocklist to check input graphite Url. Blocklist is an IP list. +# Below is an example for reference +# vis_type_timeline.graphiteBlockedIPs: [ +# //Loopback +# '127.0.0.0/8', +# '::1/128', +# //Link-local Address for IPv6 +# 'fe80::/10', +# //Private IP address for IPv4 +# '10.0.0.0/8', +# '172.16.0.0/12', +# '192.168.0.0/16', +# //Unique local address (ULA) +# 'fc00::/7', +# //Reserved IP address +# '0.0.0.0/8', +# '100.64.0.0/10', +# '192.0.0.0/24', +# '192.0.2.0/24', +# '198.18.0.0/15', +# '192.88.99.0/24', +# '198.51.100.0/24', +# '203.0.113.0/24', +# '224.0.0.0/4', +# '240.0.0.0/4', +# '255.255.255.255/32', +# '::/128', +# '2001:db8::/32', +# 'ff00::/8', +# ] +# vis_type_timeline.graphiteBlockedIPs: [] + +# opensearchDashboards.branding: +# logo: +# defaultUrl: "" +# darkModeUrl: "" +# mark: +# defaultUrl: "" +# darkModeUrl: "" +# loadingLogo: +# defaultUrl: "" +# darkModeUrl: "" +# faviconUrl: "" +# applicationTitle: "" + +# Set the value of this setting to true to capture region blocked warnings and errors +# for your map rendering services. +# map.showRegionBlockedWarning: false% + +# Set the value of this setting to false to suppress search usage telemetry +# for reducing the load of OpenSearch cluster. +# data.search.usageTelemetry.enabled: false + +# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false' +# Set the value of this setting to false to disable VisBuilder +# functionality in Visualization. +# vis_builder.enabled: false + +# 2.4 New Experimental Feature +# Set the value of this setting to true to enable the experimental multiple data source +# support feature. Use with caution. +# data_source.enabled: false +# Set the value of these settings to customize crypto materials to encryption saved credentials +# in data sources. +# data_source.encryption.wrappingKeyName: 'changeme' +# data_source.encryption.wrappingKeyNamespace: 'changeme' +# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + +# 2.6 New ML Commons Dashboards Feature +# Set the value of this setting to true to enable the ml commons dashboards +# ml_commons_dashboards.enabled: false + +# 2.12 New experimental Assistant Dashboards Feature +# Set the value of this setting to true to enable the assistant dashboards +# assistant.chat.enabled: false + +# 2.13 New Query Assistant Feature +# Set the value of this setting to false to disable the query assistant +# observability.query_assist.enabled: false + +# 2.14 Enable Ui Metric Collectors in Usage Collector +# Set the value of this setting to true to enable UI Metric collections +# usageCollection.uiMetric.enabled: false + +opensearch.hosts: [https://localhost:9200] +opensearch.ssl.verificationMode: none +opensearch.username: admin +opensearch.password: 'Qazwsxedc!@#123' +opensearch.requestHeadersWhitelist: [authorization, securitytenant] + +opensearch_security.multitenancy.enabled: true +opensearch_security.multitenancy.tenants.preferred: [Private, Global] +opensearch_security.readonly_mode.roles: [kibana_read_only] +# Use this setting if you are running opensearch-dashboards without https +opensearch_security.cookie.secure: false +server.host: '0.0.0.0' diff --git a/code/volumes/sandbox/conf/config.yaml b/code/volumes/sandbox/conf/config.yaml new file mode 100644 index 000000000..8c1a1deb5 --- /dev/null +++ b/code/volumes/sandbox/conf/config.yaml @@ -0,0 +1,14 @@ +app: + port: 8194 + debug: True + key: dify-sandbox +max_workers: 4 +max_requests: 50 +worker_timeout: 5 +python_path: /usr/local/bin/python3 +enable_network: True # please make sure there is no network risk in your environment +allowed_syscalls: # please leave it empty if you have no idea how seccomp works +proxy: + socks5: '' + http: '' + https: '' diff --git a/code/volumes/sandbox/conf/config.yaml.example b/code/volumes/sandbox/conf/config.yaml.example new file mode 100644 index 000000000..f92c19e51 --- /dev/null +++ b/code/volumes/sandbox/conf/config.yaml.example @@ -0,0 +1,35 @@ +app: + port: 8194 + debug: True + key: dify-sandbox +max_workers: 4 +max_requests: 50 +worker_timeout: 5 +python_path: /usr/local/bin/python3 +python_lib_path: + - /usr/local/lib/python3.10 + - /usr/lib/python3.10 + - /usr/lib/python3 + - /usr/lib/x86_64-linux-gnu + - /etc/ssl/certs/ca-certificates.crt + - /etc/nsswitch.conf + - /etc/hosts + - /etc/resolv.conf + - /run/systemd/resolve/stub-resolv.conf + - /run/resolvconf/resolv.conf + - /etc/localtime + - /usr/share/zoneinfo + - /etc/timezone + # add more paths if needed +python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple +nodejs_path: /usr/local/bin/node +enable_network: True +allowed_syscalls: + - 1 + - 2 + - 3 + # add all the syscalls which you require +proxy: + socks5: '' + http: '' + https: '' diff --git a/code/volumes/sandbox/dependencies/python-requirements.txt b/code/volumes/sandbox/dependencies/python-requirements.txt new file mode 100644 index 000000000..e69de29bb From e2c5b64506935d1810f370952961fd1eae2201f4 Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Fri, 14 Feb 2025 18:23:24 +0800 Subject: [PATCH 3/6] Upd --- dify/code/docker-compose-template.yaml | 6 +-- dify/code/docker-compose.yaml | 59 +++++++++++++------------- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/dify/code/docker-compose-template.yaml b/dify/code/docker-compose-template.yaml index 8aafc6188..c10c4d80d 100644 --- a/dify/code/docker-compose-template.yaml +++ b/dify/code/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -25,7 +25,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -47,7 +47,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.2 + image: langgenius/dify-web:0.15.3 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/dify/code/docker-compose.yaml b/dify/code/docker-compose.yaml index 3f7d374b9..67207fd46 100644 --- a/dify/code/docker-compose.yaml +++ b/dify/code/docker-compose.yaml @@ -393,7 +393,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -416,7 +416,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -438,7 +438,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.2 + image: langgenius/dify-web:0.15.3 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -518,14 +518,7 @@ services: volumes: - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - entrypoint: - [ - 'sh', - '-c', - "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i - 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && - /docker-entrypoint.sh" - ] + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] environment: # pls clearly modify the squid env vars to fit your network environment. HTTP_PORT: ${SSRF_HTTP_PORT:-3128} @@ -572,14 +565,7 @@ services: - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/www:/var/www/html - entrypoint: - [ - 'sh', - '-c', - "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i - 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && - /docker-entrypoint.sh" - ] + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] environment: NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} @@ -600,6 +586,9 @@ services: depends_on: - api - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' # The TiDB vector store. # For production use, please refer to https://github.com/pingcap/tidb-docker-compose @@ -664,6 +653,7 @@ services: - COUCHBASE_INDEX_RAM_SIZE=512 - COUCHBASE_FTS_RAM_SIZE=1024 hostname: couchbase-server + container_name: couchbase-server working_dir: /opt/couchbase stdin_open: true tty: true @@ -673,13 +663,7 @@ services: - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data healthcheck: # ensure bucket was created before proceeding - test: - [ - "CMD-SHELL", - "curl -s -f -u Administrator:password - http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit - 1" - ] + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] interval: 10s retries: 10 start_period: 30s @@ -776,6 +760,7 @@ services: # Milvus vector database services etcd: + container_name: milvus-etcd image: quay.io/coreos/etcd:v3.5.5 profiles: - milvus @@ -786,8 +771,7 @@ services: ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} volumes: - ./volumes/milvus/etcd:/etcd - command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls - http://0.0.0.0:2379 --data-dir /etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd healthcheck: test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] interval: 30s @@ -797,6 +781,7 @@ services: - milvus minio: + container_name: milvus-minio image: minio/minio:RELEASE.2023-03-20T20-16-18Z profiles: - milvus @@ -815,6 +800,7 @@ services: - milvus milvus-standalone: + container_name: milvus-standalone image: milvusdb/milvus:v2.5.0-beta profiles: - milvus @@ -834,19 +820,22 @@ services: depends_on: - etcd - minio + ports: + - 19530:19530 + - 9091:9091 networks: - milvus # Opensearch vector database opensearch: + container_name: opensearch image: opensearchproject/opensearch:latest profiles: - opensearch environment: discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} - OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} - -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} ulimits: memlock: @@ -861,6 +850,7 @@ services: - opensearch-net opensearch-dashboards: + container_name: opensearch-dashboards image: opensearchproject/opensearch-dashboards:latest profiles: - opensearch @@ -875,6 +865,7 @@ services: # MyScale vector database myscale: + container_name: myscale image: myscale/myscaledb:1.6.4 profiles: - myscale @@ -884,11 +875,14 @@ services: - ./volumes/myscale/data:/var/lib/clickhouse - ./volumes/myscale/log:/var/log/clickhouse-server - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch profiles: - elasticsearch - elasticsearch-ja @@ -906,6 +900,8 @@ services: xpack.security.enabled: 'true' xpack.security.enrollment.enabled: 'false' xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 deploy: resources: limits: @@ -921,6 +917,7 @@ services: # https://www.elastic.co/guide/en/kibana/current/settings.html kibana: image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana profiles: - elasticsearch depends_on: @@ -936,6 +933,8 @@ services: I18N_LOCALE: zh-CN SERVER_PORT: '5601' ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 healthcheck: test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] interval: 30s From 58ab341fe2fdc75428bd889cedf3f8da00a112b1 Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Fri, 14 Feb 2025 18:25:11 +0800 Subject: [PATCH 4/6] Revert "Upd" This reverts commit e2c5b64506935d1810f370952961fd1eae2201f4. --- dify/code/docker-compose-template.yaml | 6 +-- dify/code/docker-compose.yaml | 59 +++++++++++++------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/dify/code/docker-compose-template.yaml b/dify/code/docker-compose-template.yaml index c10c4d80d..8aafc6188 100644 --- a/dify/code/docker-compose-template.yaml +++ b/dify/code/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. @@ -25,7 +25,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. @@ -47,7 +47,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.3 + image: langgenius/dify-web:0.15.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/dify/code/docker-compose.yaml b/dify/code/docker-compose.yaml index 67207fd46..3f7d374b9 100644 --- a/dify/code/docker-compose.yaml +++ b/dify/code/docker-compose.yaml @@ -393,7 +393,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. @@ -416,7 +416,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:0.15.2 restart: always environment: # Use the shared environment variables. @@ -438,7 +438,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.3 + image: langgenius/dify-web:0.15.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -518,7 +518,14 @@ services: volumes: - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + 'sh', + '-c', + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i + 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && + /docker-entrypoint.sh" + ] environment: # pls clearly modify the squid env vars to fit your network environment. HTTP_PORT: ${SSRF_HTTP_PORT:-3128} @@ -565,7 +572,14 @@ services: - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/www:/var/www/html - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + 'sh', + '-c', + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i + 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && + /docker-entrypoint.sh" + ] environment: NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} @@ -586,9 +600,6 @@ services: depends_on: - api - web - ports: - - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' - - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' # The TiDB vector store. # For production use, please refer to https://github.com/pingcap/tidb-docker-compose @@ -653,7 +664,6 @@ services: - COUCHBASE_INDEX_RAM_SIZE=512 - COUCHBASE_FTS_RAM_SIZE=1024 hostname: couchbase-server - container_name: couchbase-server working_dir: /opt/couchbase stdin_open: true tty: true @@ -663,7 +673,13 @@ services: - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data healthcheck: # ensure bucket was created before proceeding - test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + test: + [ + "CMD-SHELL", + "curl -s -f -u Administrator:password + http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit + 1" + ] interval: 10s retries: 10 start_period: 30s @@ -760,7 +776,6 @@ services: # Milvus vector database services etcd: - container_name: milvus-etcd image: quay.io/coreos/etcd:v3.5.5 profiles: - milvus @@ -771,7 +786,8 @@ services: ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} volumes: - ./volumes/milvus/etcd:/etcd - command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls + http://0.0.0.0:2379 --data-dir /etcd healthcheck: test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] interval: 30s @@ -781,7 +797,6 @@ services: - milvus minio: - container_name: milvus-minio image: minio/minio:RELEASE.2023-03-20T20-16-18Z profiles: - milvus @@ -800,7 +815,6 @@ services: - milvus milvus-standalone: - container_name: milvus-standalone image: milvusdb/milvus:v2.5.0-beta profiles: - milvus @@ -820,22 +834,19 @@ services: depends_on: - etcd - minio - ports: - - 19530:19530 - - 9091:9091 networks: - milvus # Opensearch vector database opensearch: - container_name: opensearch image: opensearchproject/opensearch:latest profiles: - opensearch environment: discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} - OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} + -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} ulimits: memlock: @@ -850,7 +861,6 @@ services: - opensearch-net opensearch-dashboards: - container_name: opensearch-dashboards image: opensearchproject/opensearch-dashboards:latest profiles: - opensearch @@ -865,7 +875,6 @@ services: # MyScale vector database myscale: - container_name: myscale image: myscale/myscaledb:1.6.4 profiles: - myscale @@ -875,14 +884,11 @@ services: - ./volumes/myscale/data:/var/lib/clickhouse - ./volumes/myscale/log:/var/log/clickhouse-server - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml - ports: - - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 - container_name: elasticsearch profiles: - elasticsearch - elasticsearch-ja @@ -900,8 +906,6 @@ services: xpack.security.enabled: 'true' xpack.security.enrollment.enabled: 'false' xpack.security.http.ssl.enabled: 'false' - ports: - - ${ELASTICSEARCH_PORT:-9200}:9200 deploy: resources: limits: @@ -917,7 +921,6 @@ services: # https://www.elastic.co/guide/en/kibana/current/settings.html kibana: image: docker.elastic.co/kibana/kibana:8.14.3 - container_name: kibana profiles: - elasticsearch depends_on: @@ -933,8 +936,6 @@ services: I18N_LOCALE: zh-CN SERVER_PORT: '5601' ELASTICSEARCH_HOSTS: http://elasticsearch:9200 - ports: - - ${KIBANA_PORT:-5601}:5601 healthcheck: test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] interval: 30s From b12c7aae20bdb10e7997f59cc1a798e9e0fd71c4 Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Fri, 14 Feb 2025 18:27:43 +0800 Subject: [PATCH 5/6] Fix version --- dify/code/docker-compose-template.yaml | 6 +++--- dify/code/docker-compose.yaml | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/dify/code/docker-compose-template.yaml b/dify/code/docker-compose-template.yaml index 8aafc6188..c10c4d80d 100644 --- a/dify/code/docker-compose-template.yaml +++ b/dify/code/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -25,7 +25,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -47,7 +47,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.2 + image: langgenius/dify-web:0.15.3 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/dify/code/docker-compose.yaml b/dify/code/docker-compose.yaml index 3f7d374b9..4407e0a42 100644 --- a/dify/code/docker-compose.yaml +++ b/dify/code/docker-compose.yaml @@ -393,7 +393,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -416,7 +416,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.2 + image: langgenius/dify-api:0.15.3 restart: always environment: # Use the shared environment variables. @@ -438,7 +438,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.2 + image: langgenius/dify-web:0.15.3 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -664,6 +664,7 @@ services: - COUCHBASE_INDEX_RAM_SIZE=512 - COUCHBASE_FTS_RAM_SIZE=1024 hostname: couchbase-server + container_name: couchbase-server working_dir: /opt/couchbase stdin_open: true tty: true @@ -776,6 +777,7 @@ services: # Milvus vector database services etcd: + container_name: milvus-etcd image: quay.io/coreos/etcd:v3.5.5 profiles: - milvus @@ -797,6 +799,7 @@ services: - milvus minio: + container_name: milvus-minio image: minio/minio:RELEASE.2023-03-20T20-16-18Z profiles: - milvus @@ -815,6 +818,7 @@ services: - milvus milvus-standalone: + container_name: milvus-standalone image: milvusdb/milvus:v2.5.0-beta profiles: - milvus @@ -839,6 +843,7 @@ services: # Opensearch vector database opensearch: + container_name: opensearch image: opensearchproject/opensearch:latest profiles: - opensearch @@ -861,6 +866,7 @@ services: - opensearch-net opensearch-dashboards: + container_name: opensearch-dashboards image: opensearchproject/opensearch-dashboards:latest profiles: - opensearch @@ -875,6 +881,7 @@ services: # MyScale vector database myscale: + container_name: myscale image: myscale/myscaledb:1.6.4 profiles: - myscale @@ -889,6 +896,7 @@ services: # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch profiles: - elasticsearch - elasticsearch-ja @@ -921,6 +929,7 @@ services: # https://www.elastic.co/guide/en/kibana/current/settings.html kibana: image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana profiles: - elasticsearch depends_on: From c740063a694881a5dc551aa81ca828f2d888ac1d Mon Sep 17 00:00:00 2001 From: deos-coworking Date: Sat, 15 Feb 2025 03:25:56 +0800 Subject: [PATCH 6/6] test new --- code/docker-compose-template.yaml | 59 +++++++++++++++++-- code/docker-compose.middleware.yaml | 29 ++++++++++ code/docker-compose.yaml | 76 ++++++++++++++++++++++--- code/middleware.env.example | 26 +++++++++ code/nginx/conf.d/default.conf.template | 5 ++ code/nginx/proxy.conf.template | 1 - code/ssrf_proxy/squid.conf.template | 1 - code/tidb/config/pd.toml | 4 -- code/tidb/config/tiflash-learner.toml | 13 ----- code/tidb/config/tiflash.toml | 19 ------- code/tidb/docker-compose.yaml | 62 -------------------- dify/update.js | 4 +- 12 files changed, 185 insertions(+), 114 deletions(-) delete mode 100644 code/tidb/config/pd.toml delete mode 100644 code/tidb/config/tiflash-learner.toml delete mode 100644 code/tidb/config/tiflash.toml delete mode 100644 code/tidb/docker-compose.yaml diff --git a/code/docker-compose-template.yaml b/code/docker-compose-template.yaml index fc4e7d9c8..ed0b259e1 100644 --- a/code/docker-compose-template.yaml +++ b/code/docker-compose-template.yaml @@ -2,16 +2,27 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:1.0.0-beta.1 restart: always environment: # Use the shared environment variables. <<: *shared-api-worker-env # Startup mode, 'api' starts the API server. MODE: api + CONSOLE_API_URL: ${CONSOLE_API_URL:-http://localhost:5001} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-http://localhost:3000} SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} depends_on: - db - redis @@ -25,7 +36,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:1.0.0-beta.1 restart: always environment: # Use the shared environment variables. @@ -35,6 +46,12 @@ services: SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} depends_on: - db - redis @@ -47,7 +64,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.3 + image: langgenius/dify-web:1.0.0-beta.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -56,8 +73,9 @@ services: NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} # The postgres database. db: @@ -118,6 +136,29 @@ services: networks: - ssrf_proxy_network + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.1-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + ports: + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - ./volumes/plugin_daemon:/app/storage + + # ssrf_proxy server # for more information, please refer to # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed @@ -199,6 +240,16 @@ services: - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + # The TiDB vector store. + # For production use, please refer to https://github.com/pingcap/tidb-docker-compose + tidb: + image: pingcap/tidb:v8.4.0 + profiles: + - tidb + command: + - --store=unistore + restart: always + # The Weaviate vector store. weaviate: image: semitechnologies/weaviate:1.19.0 diff --git a/code/docker-compose.middleware.yaml b/code/docker-compose.middleware.yaml index 11f530219..8a20fff15 100644 --- a/code/docker-compose.middleware.yaml +++ b/code/docker-compose.middleware.yaml @@ -64,6 +64,35 @@ services: networks: - ssrf_proxy_network + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.1-local + restart: always + environment: + # Use the shared environment variables. + DB_HOST: ${DB_HOST:-db} + DB_PORT: ${DB_PORT:-5432} + DB_USERNAME: ${DB_USER:-postgres} + DB_PASSWORD: ${DB_PASSWORD:-difyai123456} + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + REDIS_HOST: ${REDIS_HOST:-redis} + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://host.docker.internal:5001} + DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + ports: + - "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}" + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - ./volumes/plugin_daemon:/app/storage + # ssrf_proxy server # for more information, please refer to # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed diff --git a/code/docker-compose.yaml b/code/docker-compose.yaml index f6b2bd443..b52f3db0f 100644 --- a/code/docker-compose.yaml +++ b/code/docker-compose.yaml @@ -377,10 +377,6 @@ x-shared-env: &shared-api-worker-env SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} - SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} - SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} - SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} - SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} @@ -393,20 +389,47 @@ x-shared-env: &shared-api-worker-env CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify-plugin} + EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev} services: # API service api: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:1.0.0-beta.1 restart: always environment: # Use the shared environment variables. <<: *shared-api-worker-env # Startup mode, 'api' starts the API server. MODE: api + CONSOLE_API_URL: ${CONSOLE_API_URL:-http://localhost:5001} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-http://localhost:3000} SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} depends_on: - db - redis @@ -420,7 +443,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.15.3 + image: langgenius/dify-api:1.0.0-beta.1 restart: always environment: # Use the shared environment variables. @@ -430,6 +453,12 @@ services: SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} depends_on: - db - redis @@ -442,7 +471,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.15.3 + image: langgenius/dify-web:1.0.0-beta.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -451,8 +480,9 @@ services: NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} # The postgres database. db: @@ -513,6 +543,26 @@ services: networks: - ssrf_proxy_network + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.1-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + volumes: + - ./volumes/plugin_daemon:/app/storage + # ssrf_proxy server # for more information, please refer to # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed @@ -605,6 +655,16 @@ services: - api - web + # The TiDB vector store. + # For production use, please refer to https://github.com/pingcap/tidb-docker-compose + tidb: + image: pingcap/tidb:v8.4.0 + profiles: + - tidb + command: + - --store=unistore + restart: always + # The Weaviate vector store. weaviate: image: semitechnologies/weaviate:1.19.0 diff --git a/code/middleware.env.example b/code/middleware.env.example index c4ce9f011..357a60fe1 100644 --- a/code/middleware.env.example +++ b/code/middleware.env.example @@ -87,3 +87,29 @@ EXPOSE_REDIS_PORT=6379 EXPOSE_SANDBOX_PORT=8194 EXPOSE_SSRF_PROXY_PORT=3128 EXPOSE_WEAVIATE_PORT=8080 + +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +EXPOSE_PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DAEMON_URL=http://host.docker.internal:5002 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_PPROF_ENABLED=false +PLUGIN_WORKING_PATH=/app/storage/cwd + +ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id} + +PLUGIN_DEBUGGING_PORT=5003 +PLUGIN_DEBUGGING_HOST=0.0.0.0 +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 + +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DIFY_INNER_API_URL=http://api:5001 + +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace-plugin.dify.dev diff --git a/code/nginx/conf.d/default.conf.template b/code/nginx/conf.d/default.conf.template index 9691122ce..bf86c7073 100644 --- a/code/nginx/conf.d/default.conf.template +++ b/code/nginx/conf.d/default.conf.template @@ -24,6 +24,11 @@ server { include proxy.conf; } + location /e { + proxy_pass http://plugin_daemon:5002; + include proxy.conf; + } + location / { proxy_pass http://web:3000; include proxy.conf; diff --git a/code/nginx/proxy.conf.template b/code/nginx/proxy.conf.template index 117f80614..6b52d2351 100644 --- a/code/nginx/proxy.conf.template +++ b/code/nginx/proxy.conf.template @@ -3,7 +3,6 @@ proxy_set_header Host $host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header X-Forwarded-Port $server_port; proxy_http_version 1.1; proxy_set_header Connection ""; proxy_buffering off; diff --git a/code/ssrf_proxy/squid.conf.template b/code/ssrf_proxy/squid.conf.template index 676fe7379..a0875a882 100644 --- a/code/ssrf_proxy/squid.conf.template +++ b/code/ssrf_proxy/squid.conf.template @@ -7,7 +7,6 @@ acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) acl localnet src fc00::/7 # RFC 4193 local private network range acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines acl SSL_ports port 443 -# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792 acl Safe_ports port 80 # http acl Safe_ports port 21 # ftp acl Safe_ports port 443 # https diff --git a/code/tidb/config/pd.toml b/code/tidb/config/pd.toml deleted file mode 100644 index 042b251e4..000000000 --- a/code/tidb/config/pd.toml +++ /dev/null @@ -1,4 +0,0 @@ -# PD Configuration File reference: -# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file -[replication] -max-replicas = 1 \ No newline at end of file diff --git a/code/tidb/config/tiflash-learner.toml b/code/tidb/config/tiflash-learner.toml deleted file mode 100644 index 5098829aa..000000000 --- a/code/tidb/config/tiflash-learner.toml +++ /dev/null @@ -1,13 +0,0 @@ -# TiFlash tiflash-learner.toml Configuration File reference: -# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file - -log-file = "/logs/tiflash_tikv.log" - -[server] -engine-addr = "tiflash:4030" -addr = "0.0.0.0:20280" -advertise-addr = "tiflash:20280" -status-addr = "tiflash:20292" - -[storage] -data-dir = "/data/flash" diff --git a/code/tidb/config/tiflash.toml b/code/tidb/config/tiflash.toml deleted file mode 100644 index 30ac13efc..000000000 --- a/code/tidb/config/tiflash.toml +++ /dev/null @@ -1,19 +0,0 @@ -# TiFlash tiflash.toml Configuration File reference: -# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file - -listen_host = "0.0.0.0" -path = "/data" - -[flash] -tidb_status_addr = "tidb:10080" -service_addr = "tiflash:4030" - -[flash.proxy] -config = "/tiflash-learner.toml" - -[logger] -errorlog = "/logs/tiflash_error.log" -log = "/logs/tiflash.log" - -[raft] -pd_addr = "pd0:2379" diff --git a/code/tidb/docker-compose.yaml b/code/tidb/docker-compose.yaml deleted file mode 100644 index fa1577017..000000000 --- a/code/tidb/docker-compose.yaml +++ /dev/null @@ -1,62 +0,0 @@ -services: - pd0: - image: pingcap/pd:v8.5.1 - # ports: - # - "2379" - volumes: - - ./config/pd.toml:/pd.toml:ro - - ./volumes/data:/data - - ./volumes/logs:/logs - command: - - --name=pd0 - - --client-urls=http://0.0.0.0:2379 - - --peer-urls=http://0.0.0.0:2380 - - --advertise-client-urls=http://pd0:2379 - - --advertise-peer-urls=http://pd0:2380 - - --initial-cluster=pd0=http://pd0:2380 - - --data-dir=/data/pd - - --config=/pd.toml - - --log-file=/logs/pd.log - restart: on-failure - tikv: - image: pingcap/tikv:v8.5.1 - volumes: - - ./volumes/data:/data - - ./volumes/logs:/logs - command: - - --addr=0.0.0.0:20160 - - --advertise-addr=tikv:20160 - - --status-addr=tikv:20180 - - --data-dir=/data/tikv - - --pd=pd0:2379 - - --log-file=/logs/tikv.log - depends_on: - - "pd0" - restart: on-failure - tidb: - image: pingcap/tidb:v8.5.1 - # ports: - # - "4000:4000" - volumes: - - ./volumes/logs:/logs - command: - - --advertise-address=tidb - - --store=tikv - - --path=pd0:2379 - - --log-file=/logs/tidb.log - depends_on: - - "tikv" - restart: on-failure - tiflash: - image: pingcap/tiflash:v8.5.1 - volumes: - - ./config/tiflash.toml:/tiflash.toml:ro - - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro - - ./volumes/data:/data - - ./volumes/logs:/logs - command: - - --config=/tiflash.toml - depends_on: - - "tikv" - - "tidb" - restart: on-failure diff --git a/dify/update.js b/dify/update.js index faff6b0be..0dd5bd6e6 100644 --- a/dify/update.js +++ b/dify/update.js @@ -1,6 +1,6 @@ import utils from "../utils.js"; -await utils.cloneOrPullRepo({ repo: "https://github.com/langgenius/dify.git" }); -await utils.copyDir("./repo/docker", "./code"); +// await utils.cloneOrPullRepo({ repo: "https://github.com/langgenius/dify.git" }); +// await utils.copyDir("./repo/docker", "./code"); await utils.removeContainerNames("./code/docker-compose.yaml"); await utils.removePorts("./code/docker-compose.yaml");