Chore/improve deployment flow (#4299)

Co-authored-by: 天魂 <365125264@qq.com>
This commit is contained in:
Chenhe Gu
2024-06-28 17:37:52 +08:00
committed by GitHub
parent dd5f3873da
commit 488e3c3d56
41 changed files with 2382 additions and 8104 deletions

View File

@@ -0,0 +1,13 @@
services:
# Chroma vector store.
chroma:
image: ghcr.io/chroma-core/chroma:0.5.1
restart: always
volumes:
- ./volumes/chroma:/chroma/chroma
environment:
CHROMA_SERVER_AUTHN_CREDENTIALS: difyai123456
CHROMA_SERVER_AUTHN_PROVIDER: chromadb.auth.token_authn.TokenAuthenticationServerProvider
IS_PERSISTENT: TRUE
ports:
- "8000:8000"

View File

@@ -0,0 +1,109 @@
version: '3'
services:
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
# The password for the default postgres user.
POSTGRES_PASSWORD: difyai123456
# The name of the default postgres database.
POSTGRES_DB: dify
# postgres data directory
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
ports:
- "5432:5432"
# The redis cache.
redis:
image: redis:6-alpine
restart: always
volumes:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass difyai123456
ports:
- "6379:6379"
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
restart: always
volumes:
# Mount the Weaviate data directory to the container.
- ./volumes/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
QUERY_DEFAULTS_LIMIT: 25
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
DEFAULT_VECTORIZER_MODULE: 'none'
CLUSTER_HOSTNAME: 'node1'
AUTHENTICATION_APIKEY_ENABLED: 'true'
AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
AUTHORIZATION_ADMINLIST_ENABLED: 'true'
AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
ports:
- "8080:8080"
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.1
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: dify-sandbox
GIN_MODE: 'release'
WORKER_TIMEOUT: 15
ENABLE_NETWORK: 'true'
HTTP_PROXY: 'http://ssrf_proxy:3128'
HTTPS_PROXY: 'http://ssrf_proxy:3128'
SANDBOX_PORT: 8194
volumes:
- ./volumes/sandbox/dependencies:/dependencies
networks:
- ssrf_proxy_network
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
ports:
- "3128:3128"
- "8194:8194"
volumes:
# pls clearly modify the squid.conf file to fit your network environment.
- ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
networks:
- ssrf_proxy_network
- default
# Qdrant vector store.
# uncomment to use qdrant as vector store.
# (if uncommented, you need to comment out the weaviate service above,
# and set VECTOR_STORE to qdrant in the api & worker service.)
# qdrant:
# image: qdrant/qdrant:1.7.3
# restart: always
# volumes:
# - ./volumes/qdrant:/qdrant/storage
# environment:
# QDRANT_API_KEY: 'difyai123456'
# ports:
# - "6333:6333"
# - "6334:6334"
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true

View File

@@ -0,0 +1,64 @@
version: '3.5'
services:
etcd:
container_name: milvus-etcd
image: quay.io/coreos/etcd:v3.5.5
environment:
- ETCD_AUTO_COMPACTION_MODE=revision
- ETCD_AUTO_COMPACTION_RETENTION=1000
- ETCD_QUOTA_BACKEND_BYTES=4294967296
- ETCD_SNAPSHOT_COUNT=50000
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 30s
timeout: 20s
retries: 3
minio:
container_name: milvus-minio
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
environment:
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
ports:
- "9001:9001"
- "9000:9000"
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
milvus-standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.3.1
command: ["milvus", "run", "standalone"]
environment:
ETCD_ENDPOINTS: etcd:2379
MINIO_ADDRESS: minio:9000
common.security.authorizationEnabled: true
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
ports:
- "19530:19530"
- "9091:9091"
depends_on:
- "etcd"
- "minio"
networks:
default:
name: milvus

View File

@@ -0,0 +1,40 @@
services:
opensearch: # This is also the hostname of the container within the Docker network (i.e. https://opensearch/)
image: opensearchproject/opensearch:latest # Specifying the latest available image - modify if you want a specific version
container_name: opensearch
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx1024m" # Set min and max JVM heap sizes to at least 50% of system RAM
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 # Sets the demo admin user password when using demo configuration, required for OpenSearch 2.12 and later
ulimits:
memlock:
soft: -1 # Set memlock to unlimited (no soft or hard limit)
hard: -1
nofile:
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
hard: 65536
volumes:
- ./volumes/opensearch/data:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
ports:
- 9200:9200 # REST API
- 9600:9600 # Performance Analyzer
networks:
- opensearch-net # All of the containers will join the same Docker bridge network
opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
container_name: opensearch-dashboards
ports:
- 5601:5601 # Map host port 5601 to container port 5601
expose:
- "5601" # Expose port 5601 for web access to OpenSearch Dashboards
environment:
OPENSEARCH_HOSTS: '["https://opensearch:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query
volumes:
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
networks:
- opensearch-net
networks:
opensearch-net:
driver: bridge

View File

@@ -0,0 +1,17 @@
services:
# oracle 23 ai vector store.
oracle:
image: container-registry.oracle.com/database/free:latest
restart: always
ports:
- 1521:1521
volumes:
- type: volume
source: oradata_vector
target: /opt/oracle/oradata
- ./startupscripts:/opt/oracle/scripts/startup
environment:
- ORACLE_PWD=Dify123456
- ORACLE_CHARACTERSET=AL32UTF8
volumes:
oradata_vector:

View File

@@ -0,0 +1,23 @@
services:
# The pgvecto—rs database.
pgvecto-rs:
image: tensorchord/pgvecto-rs:pg16-v0.2.0
restart: always
environment:
PGUSER: postgres
# The password for the default postgres user.
POSTGRES_PASSWORD: difyai123456
# The name of the default postgres database.
POSTGRES_DB: dify
# postgres data directory
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- ./volumes/pgvectors/data:/var/lib/postgresql/data
# uncomment to expose db(postgresql) port to host
ports:
- "5431:5432"
healthcheck:
test: [ "CMD", "pg_isready" ]
interval: 1s
timeout: 3s
retries: 30

View File

@@ -0,0 +1,23 @@
services:
# Qdrant vector store.
pgvector:
image: pgvector/pgvector:pg16
restart: always
environment:
PGUSER: postgres
# The password for the default postgres user.
POSTGRES_PASSWORD: difyai123456
# The name of the default postgres database.
POSTGRES_DB: dify
# postgres data directory
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
# uncomment to expose db(postgresql) port to host
ports:
- "5433:5432"
healthcheck:
test: [ "CMD", "pg_isready" ]
interval: 1s
timeout: 3s
retries: 30

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@@ -0,0 +1,12 @@
services:
# Qdrant vector store.
qdrant:
image: langgenius/qdrant:v1.7.3
restart: always
volumes:
- ./volumes/qdrant:/qdrant/storage
environment:
QDRANT_API_KEY: 'difyai123456'
ports:
- "6333:6333"
- "6334:6334"

View File

@@ -0,0 +1,588 @@
version: '3'
services:
# API service
api:
image: langgenius/dify-api:0.6.11
restart: always
environment:
# Startup mode, 'api' starts the API server.
MODE: api
# The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL: INFO
# enable DEBUG mode to output more logs
# DEBUG : true
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
# The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_WEB_URL: ''
# Password for admin user initialization.
# If left unset, admin user will not be prompted for a password when creating the initial admin account.
INIT_PASSWORD: ''
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_API_URL: ''
# The URL prefix for Service API endpoints, refers to the base URL of the current API service if api domain is
# different from console domain.
# example: http://api.dify.ai
SERVICE_API_URL: ''
# The URL prefix for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain.
# example: http://udify.app
APP_WEB_URL: ''
# File preview or download Url prefix.
# used to display File preview or download Url to the front-end or as Multi-model inputs;
# Url is signed and has expiration time.
FILES_URL: ''
# File Access Time specifies a time interval in seconds for the file to be accessed.
# The default value is 300 seconds.
FILES_ACCESS_TIMEOUT: 300
# When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
MIGRATION_ENABLED: 'true'
# The configurations of postgres database connection.
# It is consistent with the configuration in the 'db' service below.
DB_USERNAME: postgres
DB_PASSWORD: difyai123456
DB_HOST: db
DB_PORT: 5432
DB_DATABASE: dify
# The configurations of redis connection.
# It is consistent with the configuration in the 'redis' service below.
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_USERNAME: ''
REDIS_PASSWORD: difyai123456
REDIS_USE_SSL: 'false'
# use redis db 0 for redis cache
REDIS_DB: 0
# The configurations of celery broker.
# Use redis as the broker, and redis db 1 for celery broker.
CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
# Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
WEB_API_CORS_ALLOW_ORIGINS: '*'
# Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
CONSOLE_CORS_ALLOW_ORIGINS: '*'
# CSRF Cookie settings
# Controls whether a cookie is sent with cross-site requests,
# providing some protection against cross-site request forgery attacks
#
# Default: `SameSite=Lax, Secure=false, HttpOnly=true`
# This default configuration supports same-origin requests using either HTTP or HTTPS,
# but does not support cross-origin requests. It is suitable for local debugging purposes.
#
# If you want to enable cross-origin support,
# you must use the HTTPS protocol and set the configuration to `SameSite=None, Secure=true, HttpOnly=true`.
#
# The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
STORAGE_TYPE: local
# The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
# only available when STORAGE_TYPE is `local`.
STORAGE_LOCAL_PATH: storage
# The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
S3_USE_AWS_MANAGED_IAM: 'false'
S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
S3_BUCKET_NAME: 'difyai'
S3_ACCESS_KEY: 'ak-difyai'
S3_SECRET_KEY: 'sk-difyai'
S3_REGION: 'us-east-1'
# The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
AZURE_BLOB_ACCOUNT_NAME: 'difyai'
AZURE_BLOB_ACCOUNT_KEY: 'difyai'
AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
AZURE_BLOB_ACCOUNT_URL: 'https://<your_account_name>.blob.core.windows.net'
# The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
# if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
# The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
ALIYUN_OSS_REGION: 'ap-southeast-1'
ALIYUN_OSS_AUTH_VERSION: 'v4'
# The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
TENCENT_COS_SECRET_KEY: 'your-secret-key'
TENCENT_COS_SECRET_ID: 'your-secret-id'
TENCENT_COS_REGION: 'your-region'
TENCENT_COS_SCHEME: 'your-scheme'
# The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`,`pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT: http://weaviate:8080
# The Weaviate API key.
WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL: http://qdrant:6333
# The Qdrant API key.
QDRANT_API_KEY: difyai123456
# The Qdrant client timeout setting.
QDRANT_CLIENT_TIMEOUT: 20
# The Qdrant client enable gRPC mode.
QDRANT_GRPC_ENABLED: 'false'
# The Qdrant server gRPC mode PORT.
QDRANT_GRPC_PORT: 6334
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus host.
MILVUS_HOST: 127.0.0.1
# The milvus host.
MILVUS_PORT: 19530
# The milvus username.
MILVUS_USER: root
# The milvus password.
MILVUS_PASSWORD: Milvus
# The milvus tls switch.
MILVUS_SECURE: 'false'
# relyt configurations
RELYT_HOST: db
RELYT_PORT: 5432
RELYT_USER: postgres
RELYT_PASSWORD: difyai123456
RELYT_DATABASE: postgres
# pgvector configurations
PGVECTOR_HOST: pgvector
PGVECTOR_PORT: 5432
PGVECTOR_USER: postgres
PGVECTOR_PASSWORD: difyai123456
PGVECTOR_DATABASE: dify
# tidb vector configurations
TIDB_VECTOR_HOST: tidb
TIDB_VECTOR_PORT: 4000
TIDB_VECTOR_USER: xxx.root
TIDB_VECTOR_PASSWORD: xxxxxx
TIDB_VECTOR_DATABASE: dify
# oracle configurations
ORACLE_HOST: oracle
ORACLE_PORT: 1521
ORACLE_USER: dify
ORACLE_PASSWORD: dify
ORACLE_DATABASE: FREEPDB1
# Chroma configuration
CHROMA_HOST: 127.0.0.1
CHROMA_PORT: 8000
CHROMA_TENANT: default_tenant
CHROMA_DATABASE: default_database
CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS: xxxxxx
# Mail configuration, support: resend, smtp
MAIL_TYPE: ''
# default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply <no-reply@dify.ai>)'
SMTP_SERVER: ''
SMTP_PORT: 465
SMTP_USERNAME: ''
SMTP_PASSWORD: ''
SMTP_USE_TLS: 'true'
SMTP_OPPORTUNISTIC_TLS: 'false'
# the api-key for resend (https://resend.com)
RESEND_API_KEY: ''
RESEND_API_URL: https://api.resend.com
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN: ''
# The sample rate for Sentry events. Default: `1.0`
SENTRY_TRACES_SAMPLE_RATE: 1.0
# The sample rate for Sentry profiles. Default: `1.0`
SENTRY_PROFILES_SAMPLE_RATE: 1.0
# Notion import configuration, support public and internal
NOTION_INTEGRATION_TYPE: public
NOTION_CLIENT_SECRET: you-client-secret
NOTION_CLIENT_ID: you-client-id
NOTION_INTERNAL_SECRET: you-internal-secret
# The sandbox service endpoint.
CODE_EXECUTION_ENDPOINT: "http://sandbox:8194"
CODE_EXECUTION_API_KEY: dify-sandbox
CODE_MAX_NUMBER: 9223372036854775807
CODE_MIN_NUMBER: -9223372036854775808
CODE_MAX_STRING_LENGTH: 80000
TEMPLATE_TRANSFORM_MAX_LENGTH: 80000
CODE_MAX_STRING_ARRAY_LENGTH: 30
CODE_MAX_OBJECT_ARRAY_LENGTH: 30
CODE_MAX_NUMBER_ARRAY_LENGTH: 1000
# SSRF Proxy server
SSRF_PROXY_HTTP_URL: 'http://ssrf_proxy:3128'
SSRF_PROXY_HTTPS_URL: 'http://ssrf_proxy:3128'
# Indexing configuration
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
# uncomment to expose dify-api port to host
# ports:
# - "5001:5001"
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.6.11
restart: always
environment:
CONSOLE_WEB_URL: ''
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
# --- All the configurations below are the same as those in the 'api' service. ---
# The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL: INFO
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
# same as the API service
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
# The configurations of postgres database connection.
# It is consistent with the configuration in the 'db' service below.
DB_USERNAME: postgres
DB_PASSWORD: difyai123456
DB_HOST: db
DB_PORT: 5432
DB_DATABASE: dify
# The configurations of redis cache connection.
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_USERNAME: ''
REDIS_PASSWORD: difyai123456
REDIS_DB: 0
REDIS_USE_SSL: 'false'
# The configurations of celery broker.
CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
# The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
STORAGE_TYPE: local
STORAGE_LOCAL_PATH: storage
# The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
S3_USE_AWS_MANAGED_IAM: 'false'
S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
S3_BUCKET_NAME: 'difyai'
S3_ACCESS_KEY: 'ak-difyai'
S3_SECRET_KEY: 'sk-difyai'
S3_REGION: 'us-east-1'
# The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
AZURE_BLOB_ACCOUNT_NAME: 'difyai'
AZURE_BLOB_ACCOUNT_KEY: 'difyai'
AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
AZURE_BLOB_ACCOUNT_URL: 'https://<your_account_name>.blob.core.windows.net'
# The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
# if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
# The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
ALIYUN_OSS_REGION: 'ap-southeast-1'
ALIYUN_OSS_AUTH_VERSION: 'v4'
# The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
TENCENT_COS_SECRET_KEY: 'your-secret-key'
TENCENT_COS_SECRET_ID: 'your-secret-id'
TENCENT_COS_REGION: 'your-region'
TENCENT_COS_SCHEME: 'your-scheme'
# The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT: http://weaviate:8080
# The Weaviate API key.
WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL: http://qdrant:6333
# The Qdrant API key.
QDRANT_API_KEY: difyai123456
# The Qdrant client timeout setting.
QDRANT_CLIENT_TIMEOUT: 20
# The Qdrant client enable gRPC mode.
QDRANT_GRPC_ENABLED: 'false'
# The Qdrant server gRPC mode PORT.
QDRANT_GRPC_PORT: 6334
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus host.
MILVUS_HOST: 127.0.0.1
# The milvus host.
MILVUS_PORT: 19530
# The milvus username.
MILVUS_USER: root
# The milvus password.
MILVUS_PASSWORD: Milvus
# The milvus tls switch.
MILVUS_SECURE: 'false'
# Mail configuration, support: resend
MAIL_TYPE: ''
# default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply <no-reply@dify.ai>)'
SMTP_SERVER: ''
SMTP_PORT: 465
SMTP_USERNAME: ''
SMTP_PASSWORD: ''
SMTP_USE_TLS: 'true'
SMTP_OPPORTUNISTIC_TLS: 'false'
# the api-key for resend (https://resend.com)
RESEND_API_KEY: ''
RESEND_API_URL: https://api.resend.com
# relyt configurations
RELYT_HOST: db
RELYT_PORT: 5432
RELYT_USER: postgres
RELYT_PASSWORD: difyai123456
RELYT_DATABASE: postgres
# tencent configurations
TENCENT_VECTOR_DB_URL: http://127.0.0.1
TENCENT_VECTOR_DB_API_KEY: dify
TENCENT_VECTOR_DB_TIMEOUT: 30
TENCENT_VECTOR_DB_USERNAME: dify
TENCENT_VECTOR_DB_DATABASE: dify
TENCENT_VECTOR_DB_SHARD: 1
TENCENT_VECTOR_DB_REPLICAS: 2
# OpenSearch configuration
OPENSEARCH_HOST: 127.0.0.1
OPENSEARCH_PORT: 9200
OPENSEARCH_USER: admin
OPENSEARCH_PASSWORD: admin
OPENSEARCH_SECURE: 'true'
# pgvector configurations
PGVECTOR_HOST: pgvector
PGVECTOR_PORT: 5432
PGVECTOR_USER: postgres
PGVECTOR_PASSWORD: difyai123456
PGVECTOR_DATABASE: dify
# tidb vector configurations
TIDB_VECTOR_HOST: tidb
TIDB_VECTOR_PORT: 4000
TIDB_VECTOR_USER: xxx.root
TIDB_VECTOR_PASSWORD: xxxxxx
TIDB_VECTOR_DATABASE: dify
# oracle configurations
ORACLE_HOST: oracle
ORACLE_PORT: 1521
ORACLE_USER: dify
ORACLE_PASSWORD: dify
ORACLE_DATABASE: FREEPDB1
# Chroma configuration
CHROMA_HOST: 127.0.0.1
CHROMA_PORT: 8000
CHROMA_TENANT: default_tenant
CHROMA_DATABASE: default_database
CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS: xxxxxx
# Notion import configuration, support public and internal
NOTION_INTEGRATION_TYPE: public
NOTION_CLIENT_SECRET: you-client-secret
NOTION_CLIENT_ID: you-client-id
NOTION_INTERNAL_SECRET: you-internal-secret
# Indexing configuration
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# Frontend web application.
web:
image: langgenius/dify-web:0.6.11
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_API_URL: ''
# The URL for Web APP api server, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain.
# example: http://udify.app
APP_API_URL: ''
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN: ''
# uncomment to expose dify-web port to host
# ports:
# - "3000:3000"
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
PGUSER: postgres
# The password for the default postgres user.
POSTGRES_PASSWORD: difyai123456
# The name of the default postgres database.
POSTGRES_DB: dify
# postgres data directory
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
# notice!: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
# if you use the following config, you need to uncomment the volumes configuration below at the end of the file.
# - postgres:/var/lib/postgresql/data
# uncomment to expose db(postgresql) port to host
# ports:
# - "5432:5432"
healthcheck:
test: [ "CMD", "pg_isready" ]
interval: 1s
timeout: 3s
retries: 30
# The redis cache.
redis:
image: redis:6-alpine
restart: always
volumes:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass difyai123456
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
# uncomment to expose redis port to host
# ports:
# - "6379:6379"
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
restart: always
volumes:
# Mount the Weaviate data directory to the container.
- ./volumes/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
QUERY_DEFAULTS_LIMIT: 25
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
DEFAULT_VECTORIZER_MODULE: 'none'
CLUSTER_HOSTNAME: 'node1'
AUTHENTICATION_APIKEY_ENABLED: 'true'
AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
AUTHORIZATION_ADMINLIST_ENABLED: 'true'
AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
# uncomment to expose weaviate port to host
# ports:
# - "8080:8080"
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.1
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: dify-sandbox
GIN_MODE: 'release'
WORKER_TIMEOUT: 15
ENABLE_NETWORK: 'true'
HTTP_PROXY: 'http://ssrf_proxy:3128'
HTTPS_PROXY: 'http://ssrf_proxy:3128'
SANDBOX_PORT: 8194
volumes:
- ./volumes/sandbox/dependencies:/dependencies
networks:
- ssrf_proxy_network
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
volumes:
# pls clearly modify the squid.conf file to fit your network environment.
- ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
networks:
- ssrf_proxy_network
- default
# Qdrant vector store.
# uncomment to use qdrant as vector store.
# (if uncommented, you need to comment out the weaviate service above,
# and set VECTOR_STORE to qdrant in the api & worker service.)
# qdrant:
# image: langgenius/qdrant:v1.7.3
# restart: always
# volumes:
# - ./volumes/qdrant:/qdrant/storage
# environment:
# QDRANT_API_KEY: 'difyai123456'
# # uncomment to expose qdrant port to host
# # ports:
# # - "6333:6333"
# # - "6334:6334"
# The pgvector vector database.
# Uncomment to use qdrant as vector store.
# pgvector:
# image: pgvector/pgvector:pg16
# restart: always
# environment:
# PGUSER: postgres
# # The password for the default postgres user.
# POSTGRES_PASSWORD: difyai123456
# # The name of the default postgres database.
# POSTGRES_DB: dify
# # postgres data directory
# PGDATA: /var/lib/postgresql/data/pgdata
# volumes:
# - ./volumes/pgvector/data:/var/lib/postgresql/data
# # uncomment to expose db(postgresql) port to host
# # ports:
# # - "5433:5432"
# healthcheck:
# test: [ "CMD", "pg_isready" ]
# interval: 1s
# timeout: 3s
# retries: 30
# The oracle vector database.
# Uncomment to use oracle23ai as vector store. Also need to Uncomment volumes block
# oracle:
# image: container-registry.oracle.com/database/free:latest
# restart: always
# ports:
# - 1521:1521
# volumes:
# - type: volume
# source: oradata
# target: /opt/oracle/oradata
# - ./startupscripts:/opt/oracle/scripts/startup
# environment:
# - ORACLE_PWD=Dify123456
# - ORACLE_CHARACTERSET=AL32UTF8
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
nginx:
image: nginx:latest
restart: always
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
- ./nginx/conf.d:/etc/nginx/conf.d
#- ./nginx/ssl:/etc/ssl
depends_on:
- api
- web
ports:
- "80:80"
#- "443:443"
# notice: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
# volumes:
#   postgres:
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true
#volumes:
# oradata:

View File

@@ -0,0 +1,38 @@
server {
listen 80;
server_name _;
location /console/api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /api {
proxy_pass http://api:5001;
include proxy.conf;
}
location /v1 {
proxy_pass http://api:5001;
include proxy.conf;
}
location /files {
proxy_pass http://api:5001;
include proxy.conf;
}
location / {
proxy_pass http://web:3000;
include proxy.conf;
}
# If you want to support HTTPS, please uncomment the code snippet below
#listen 443 ssl;
#ssl_certificate ./../ssl/your_cert_file.cer;
#ssl_certificate_key ./../ssl/your_cert_key.key;
#ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
#ssl_prefer_server_ciphers on;
#ssl_session_cache shared:SSL:10m;
#ssl_session_timeout 10m;
}

View File

@@ -0,0 +1,32 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
client_max_body_size 15M;
include /etc/nginx/conf.d/*.conf;
}

View File

@@ -0,0 +1,8 @@
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,5 @@
show pdbs;
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
alter session set container= freepdb1;
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
grant DB_DEVELOPER_ROLE to dify;

View File

@@ -0,0 +1,222 @@
---
# Copyright OpenSearch Contributors
# SPDX-License-Identifier: Apache-2.0
# Description:
# Default configuration for OpenSearch Dashboards
# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
# server.port: 5601
# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
# server.host: "localhost"
# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
# server.basePath: ""
# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# server.rewriteBasePath: false
# The maximum payload size in bytes for incoming server requests.
# server.maxPayloadBytes: 1048576
# The OpenSearch Dashboards server's name. This is used for display purposes.
# server.name: "your-hostname"
# The URLs of the OpenSearch instances to use for all your queries.
# opensearch.hosts: ["http://localhost:9200"]
# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
# opensearchDashboards.index: ".opensearch_dashboards"
# The default application to load.
# opensearchDashboards.defaultAppId: "home"
# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
# This settings should be used for large clusters or for clusters with ingest heavy nodes.
# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
#
# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
# opensearch.optimizedHealthcheckId: "cluster_id"
# If your OpenSearch is protected with basic authentication, these settings provide
# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
# is proxied through the OpenSearch Dashboards server.
# opensearch.username: "opensearch_dashboards_system"
# opensearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
# server.ssl.enabled: false
# server.ssl.certificate: /path/to/your/server.crt
# server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
# xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
# opensearch.ssl.certificate: /path/to/your/client.crt
# opensearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your OpenSearch instance.
# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
# opensearch.ssl.verificationMode: full
# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
# the opensearch.requestTimeout setting.
# opensearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or OpenSearch. This value
# must be a positive integer.
# opensearch.requestTimeout: 30000
# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
# headers, set this value to [] (an empty list).
# opensearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
# opensearch.customHeaders: {}
# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
# opensearch.shardTimeout: 30000
# Logs queries sent to OpenSearch. Requires logging.verbose set to true.
# opensearch.logQueries: false
# Specifies the path where OpenSearch Dashboards creates the process ID file.
# pid.file: /var/run/opensearchDashboards.pid
# Enables you to specify a file where OpenSearch Dashboards stores log output.
# logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
# logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
# logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
# logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
# ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
# i18n.locale: "en"
# Set the allowlist to check input graphite Url. Allowlist is the default check list.
# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
# Set the blocklist to check input graphite Url. Blocklist is an IP list.
# Below is an example for reference
# vis_type_timeline.graphiteBlockedIPs: [
# //Loopback
# '127.0.0.0/8',
# '::1/128',
# //Link-local Address for IPv6
# 'fe80::/10',
# //Private IP address for IPv4
# '10.0.0.0/8',
# '172.16.0.0/12',
# '192.168.0.0/16',
# //Unique local address (ULA)
# 'fc00::/7',
# //Reserved IP address
# '0.0.0.0/8',
# '100.64.0.0/10',
# '192.0.0.0/24',
# '192.0.2.0/24',
# '198.18.0.0/15',
# '192.88.99.0/24',
# '198.51.100.0/24',
# '203.0.113.0/24',
# '224.0.0.0/4',
# '240.0.0.0/4',
# '255.255.255.255/32',
# '::/128',
# '2001:db8::/32',
# 'ff00::/8',
# ]
# vis_type_timeline.graphiteBlockedIPs: []
# opensearchDashboards.branding:
# logo:
# defaultUrl: ""
# darkModeUrl: ""
# mark:
# defaultUrl: ""
# darkModeUrl: ""
# loadingLogo:
# defaultUrl: ""
# darkModeUrl: ""
# faviconUrl: ""
# applicationTitle: ""
# Set the value of this setting to true to capture region blocked warnings and errors
# for your map rendering services.
# map.showRegionBlockedWarning: false%
# Set the value of this setting to false to suppress search usage telemetry
# for reducing the load of OpenSearch cluster.
# data.search.usageTelemetry.enabled: false
# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
# Set the value of this setting to false to disable VisBuilder
# functionality in Visualization.
# vis_builder.enabled: false
# 2.4 New Experimental Feature
# Set the value of this setting to true to enable the experimental multiple data source
# support feature. Use with caution.
# data_source.enabled: false
# Set the value of these settings to customize crypto materials to encryption saved credentials
# in data sources.
# data_source.encryption.wrappingKeyName: 'changeme'
# data_source.encryption.wrappingKeyNamespace: 'changeme'
# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# 2.6 New ML Commons Dashboards Feature
# Set the value of this setting to true to enable the ml commons dashboards
# ml_commons_dashboards.enabled: false
# 2.12 New experimental Assistant Dashboards Feature
# Set the value of this setting to true to enable the assistant dashboards
# assistant.chat.enabled: false
# 2.13 New Query Assistant Feature
# Set the value of this setting to false to disable the query assistant
# observability.query_assist.enabled: false
# 2.14 Enable Ui Metric Collectors in Usage Collector
# Set the value of this setting to true to enable UI Metric collections
# usageCollection.uiMetric.enabled: false
opensearch.hosts: [https://localhost:9200]
opensearch.ssl.verificationMode: none
opensearch.username: admin
opensearch.password: 'Qazwsxedc!@#123'
opensearch.requestHeadersWhitelist: [authorization, securitytenant]
opensearch_security.multitenancy.enabled: true
opensearch_security.multitenancy.tenants.preferred: [Private, Global]
opensearch_security.readonly_mode.roles: [kibana_read_only]
# Use this setting if you are running opensearch-dashboards without https
opensearch_security.cookie.secure: false
server.host: '0.0.0.0'

View File

@@ -0,0 +1,49 @@
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
http_access allow localhost
include /etc/squid/conf.d/*.conf
http_access deny all
################################## Proxy Server ################################
http_port 3128
coredump_dir /var/spool/squid
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern . 0 20% 4320
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
################################## Reverse Proxy To Sandbox ################################
http_port 8194 accel vhost
cache_peer sandbox parent 8194 0 no-query originserver
acl src_all src all
http_access allow src_all