mirror of
https://github.com/BillyOutlast/posthog.git
synced 2026-02-04 03:01:23 +01:00
500 lines
16 KiB
YAML
500 lines
16 KiB
YAML
#
|
|
# docker-compose base file used for local development, hobby deploys, and other compose use cases.
|
|
#
|
|
|
|
services:
|
|
proxy:
|
|
image: caddy
|
|
entrypoint: sh
|
|
restart: always
|
|
command: -c 'set -x && echo "$$CADDYFILE" > /etc/caddy/Caddyfile && exec caddy run -c /etc/caddy/Caddyfile'
|
|
volumes:
|
|
- /root/.caddy
|
|
environment:
|
|
CADDY_TLS_BLOCK: ''
|
|
CADDY_HOST: 'http://localhost:8000'
|
|
CADDYFILE: |
|
|
{
|
|
${CADDY_TLS_BLOCK:-}
|
|
}
|
|
${CADDY_HOST:-http://localhost:8000} {
|
|
@replay-capture {
|
|
path /s
|
|
path /s/
|
|
path /s/*
|
|
}
|
|
|
|
@capture {
|
|
path /e
|
|
path /e/
|
|
path /e/*
|
|
path /i/v0
|
|
path /i/v0/
|
|
path /i/v0/*
|
|
path /batch
|
|
path /batch/
|
|
path /batch/*
|
|
path /capture
|
|
path /capture/
|
|
path /capture/*
|
|
}
|
|
|
|
@flags {
|
|
path /flags
|
|
path /flags/
|
|
path /flags/*
|
|
}
|
|
|
|
@webhooks {
|
|
path /public/webhooks
|
|
path /public/webhooks/
|
|
path /public/webhooks/*
|
|
path /public/m/
|
|
path /public/m/*
|
|
}
|
|
|
|
handle @capture {
|
|
reverse_proxy capture:3000
|
|
}
|
|
|
|
handle @replay-capture {
|
|
reverse_proxy replay-capture:3000
|
|
}
|
|
|
|
handle @flags {
|
|
reverse_proxy feature-flags:3001
|
|
}
|
|
|
|
handle @webhooks {
|
|
reverse_proxy plugins:6738
|
|
}
|
|
|
|
handle {
|
|
reverse_proxy web:8000
|
|
}
|
|
}
|
|
|
|
db:
|
|
image: postgres:12-alpine
|
|
restart: on-failure
|
|
environment:
|
|
POSTGRES_USER: posthog
|
|
POSTGRES_DB: posthog
|
|
POSTGRES_PASSWORD: posthog
|
|
healthcheck:
|
|
test: ['CMD-SHELL', 'pg_isready -U posthog']
|
|
interval: 5s
|
|
timeout: 5s
|
|
volumes:
|
|
- ./docker/postgres-init-scripts:/docker-entrypoint-initdb.d
|
|
|
|
redis:
|
|
image: redis:6.2.7-alpine
|
|
restart: on-failure
|
|
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
|
|
healthcheck:
|
|
test: ['CMD', 'redis-cli', 'ping']
|
|
interval: 3s
|
|
timeout: 10s
|
|
retries: 10
|
|
|
|
redis7:
|
|
image: redis:7.2-alpine
|
|
restart: on-failure
|
|
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
|
|
healthcheck:
|
|
test: ['CMD', 'redis-cli', 'ping']
|
|
interval: 3s
|
|
timeout: 10s
|
|
retries: 10
|
|
|
|
clickhouse:
|
|
#
|
|
# Note: please keep the default version in sync across
|
|
# `posthog` and the `charts-clickhouse` repos
|
|
#
|
|
image: ${CLICKHOUSE_SERVER_IMAGE:-clickhouse/clickhouse-server:25.8.11.66}
|
|
restart: on-failure
|
|
environment:
|
|
CLICKHOUSE_SKIP_USER_SETUP: 1
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
|
|
zookeeper:
|
|
image: zookeeper:3.7.0
|
|
restart: on-failure
|
|
|
|
kafka:
|
|
image: docker.redpanda.com/redpandadata/redpanda:v25.1.9
|
|
restart: on-failure
|
|
command:
|
|
- redpanda
|
|
- start
|
|
- --kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092
|
|
- --advertise-kafka-addr internal://kafka:9092,external://localhost:19092
|
|
- --pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082
|
|
- --advertise-pandaproxy-addr internal://kafka:8082,external://localhost:18082
|
|
- --schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081
|
|
- --rpc-addr kafka:33145
|
|
- --advertise-rpc-addr kafka:33145
|
|
- --mode dev-container
|
|
- --smp 2
|
|
- --memory 3G
|
|
- --reserve-memory 500M
|
|
- --overprovisioned
|
|
- --set redpanda.empty_seed_starts_cluster=false
|
|
- --seeds kafka:33145
|
|
- --set redpanda.auto_create_topics_enabled=true
|
|
volumes:
|
|
- redpanda-data:/var/lib/redpanda/data
|
|
environment:
|
|
ALLOW_PLAINTEXT_LISTENER: 'true'
|
|
healthcheck:
|
|
test: curl -f http://localhost:9644/v1/status/ready || exit 1
|
|
interval: 3s
|
|
timeout: 10s
|
|
retries: 10
|
|
|
|
kafka_ui:
|
|
image: provectuslabs/kafka-ui:latest
|
|
restart: on-failure
|
|
environment:
|
|
KAFKA_CLUSTERS_0_NAME: local
|
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
|
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://kafka:8081
|
|
DYNAMIC_CONFIG_ENABLED: 'true'
|
|
|
|
objectstorage:
|
|
image: minio/minio:RELEASE.2025-04-22T22-12-26Z
|
|
restart: on-failure
|
|
environment:
|
|
MINIO_ROOT_USER: object_storage_root_user
|
|
MINIO_ROOT_PASSWORD: object_storage_root_password
|
|
entrypoint: sh
|
|
command: -c 'mkdir -p /data/posthog && minio server --address ":19000" --console-address ":19001" /data' # create the 'posthog' bucket before starting the service
|
|
|
|
maildev:
|
|
image: maildev/maildev:2.0.5
|
|
restart: on-failure
|
|
healthcheck:
|
|
test: wget -qO- http://127.0.0.1:1080/healthz || exit 1
|
|
interval: 3s
|
|
timeout: 1s
|
|
retries: 3
|
|
|
|
flower:
|
|
image: mher/flower:2.0.0
|
|
restart: on-failure
|
|
environment:
|
|
FLOWER_PORT: 5555
|
|
CELERY_BROKER_URL: redis://redis:6379
|
|
|
|
worker: &worker
|
|
command: ./bin/docker-worker-celery --with-scheduler
|
|
restart: on-failure
|
|
environment: &worker_env
|
|
OTEL_SDK_DISABLED: 'true'
|
|
DISABLE_SECURE_SSL_REDIRECT: 'true'
|
|
IS_BEHIND_PROXY: 'true'
|
|
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
CLICKHOUSE_HOST: 'clickhouse'
|
|
CLICKHOUSE_DATABASE: 'posthog'
|
|
CLICKHOUSE_SECURE: 'false'
|
|
CLICKHOUSE_VERIFY: 'false'
|
|
CLICKHOUSE_API_USER: 'api'
|
|
CLICKHOUSE_API_PASSWORD: 'apipass'
|
|
CLICKHOUSE_APP_USER: 'app'
|
|
CLICKHOUSE_APP_PASSWORD: 'apppass'
|
|
API_QUERIES_PER_TEAM: '{"1": 100}'
|
|
KAFKA_HOSTS: 'kafka'
|
|
REDIS_URL: 'redis://redis:6379/'
|
|
PGHOST: db
|
|
PGUSER: posthog
|
|
PGPASSWORD: posthog
|
|
DEPLOYMENT: hobby
|
|
CDP_API_URL: 'http://plugins:6738'
|
|
FLAGS_REDIS_ENABLED: false
|
|
|
|
web:
|
|
<<: *worker
|
|
command: ./bin/start-backend & ./bin/start-frontend
|
|
restart: on-failure
|
|
|
|
capture:
|
|
image: ghcr.io/posthog/posthog/capture:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: capture
|
|
restart: on-failure
|
|
environment:
|
|
ADDRESS: '0.0.0.0:3000'
|
|
KAFKA_TOPIC: 'events_plugin_ingestion'
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
REDIS_URL: 'redis://redis:6379/'
|
|
CAPTURE_MODE: events
|
|
RUST_LOG: 'info,rdkafka=warn'
|
|
|
|
replay-capture:
|
|
image: ghcr.io/posthog/posthog/capture:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: capture
|
|
restart: on-failure
|
|
environment:
|
|
ADDRESS: '0.0.0.0:3000'
|
|
KAFKA_TOPIC: 'session_recording_snapshot_item_events'
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
REDIS_URL: 'redis://redis:6379/'
|
|
CAPTURE_MODE: recordings
|
|
|
|
capture-logs:
|
|
image: ghcr.io/posthog/posthog/capture-logs:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: capture-logs
|
|
restart: on-failure
|
|
environment:
|
|
BIND_HOST: '0.0.0.0'
|
|
BIND_PORT: '4318'
|
|
RUST_LOG: info,rdkafka=warn
|
|
RUST_BACKTRACE: '1'
|
|
KAFKA_HOSTS: kafka:9092
|
|
JWT_SECRET: '<randomly generated secret key>'
|
|
KAFKA_TOPIC: logs_ingestion
|
|
networks:
|
|
- otel_network
|
|
- default
|
|
|
|
property-defs-rs:
|
|
image: ghcr.io/posthog/posthog/property-defs-rs:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: property-defs-rs
|
|
restart: on-failure
|
|
environment:
|
|
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
SKIP_WRITES: 'false'
|
|
SKIP_READS: 'false'
|
|
FILTER_MODE: 'opt-out'
|
|
|
|
feature-flags:
|
|
image: ghcr.io/posthog/posthog/feature-flags:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: feature-flags
|
|
restart: on-failure
|
|
volumes:
|
|
- ./share:/share
|
|
environment:
|
|
WRITE_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
READ_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
MAXMIND_DB_PATH: '/share/GeoLite2-City.mmdb'
|
|
# Shared Redis for non-critical path (analytics, billing, cookieless)
|
|
REDIS_URL: 'redis://redis:6379/'
|
|
# Optional: Use separate Redis URL for read replicas
|
|
# REDIS_READER_URL: 'redis://redis-replica:6379/'
|
|
# Optional: Increase Redis timeout (default is 100ms)
|
|
# REDIS_TIMEOUT_MS: 200
|
|
# Dedicated Redis database for critical path (team cache + flags cache)
|
|
# Hobby deployments start in Mode 1 (shared-only). Developers override in docker-compose.dev.yml for Mode 2.
|
|
# FLAGS_REDIS_URL: 'redis://redis:6379/1'
|
|
# Optional: Use separate Flags Redis URL for read replicas
|
|
# FLAGS_REDIS_READER_URL: 'redis://redis-replica:6379/1'
|
|
ADDRESS: '0.0.0.0:3001'
|
|
RUST_LOG: 'info'
|
|
COOKIELESS_REDIS_HOST: redis7
|
|
COOKIELESS_REDIS_PORT: 6379
|
|
|
|
plugins:
|
|
command: ./bin/plugin-server --no-restart-loop
|
|
restart: on-failure
|
|
environment:
|
|
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
PERSONS_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
BEHAVIORAL_COHORTS_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
REDIS_URL: 'redis://redis:6379/'
|
|
CLICKHOUSE_HOST: 'clickhouse'
|
|
CLICKHOUSE_DATABASE: 'posthog'
|
|
CLICKHOUSE_SECURE: 'false'
|
|
CLICKHOUSE_VERIFY: 'false'
|
|
COOKIELESS_REDIS_HOST: redis7
|
|
COOKIELESS_REDIS_PORT: 6379
|
|
|
|
livestream:
|
|
image: 'ghcr.io/posthog/livestream:main'
|
|
restart: on-failure
|
|
depends_on:
|
|
kafka:
|
|
condition: service_started
|
|
|
|
migrate:
|
|
<<: *worker
|
|
command: sh -c "
|
|
python manage.py migrate
|
|
&& python manage.py migrate_clickhouse
|
|
&& python manage.py run_async_migrations
|
|
"
|
|
restart: 'no'
|
|
deploy:
|
|
replicas: 0
|
|
|
|
asyncmigrationscheck:
|
|
<<: *worker
|
|
command: python manage.py run_async_migrations --check
|
|
restart: 'no'
|
|
deploy:
|
|
replicas: 0
|
|
environment:
|
|
<<: *worker_env
|
|
SKIP_ASYNC_MIGRATIONS_SETUP: 0
|
|
|
|
# Temporal containers
|
|
elasticsearch:
|
|
environment:
|
|
- cluster.routing.allocation.disk.threshold_enabled=true
|
|
- cluster.routing.allocation.disk.watermark.low=512mb
|
|
- cluster.routing.allocation.disk.watermark.high=256mb
|
|
- cluster.routing.allocation.disk.watermark.flood_stage=128mb
|
|
- discovery.type=single-node
|
|
- ES_JAVA_OPTS=-Xms256m -Xmx256m
|
|
- xpack.security.enabled=false
|
|
image: elasticsearch:7.17.28
|
|
expose:
|
|
- 9200
|
|
volumes:
|
|
- /var/lib/elasticsearch/data
|
|
temporal:
|
|
restart: on-failure
|
|
environment:
|
|
- DB=postgresql
|
|
- DB_PORT=5432
|
|
- POSTGRES_USER=posthog
|
|
- POSTGRES_PWD=posthog
|
|
- POSTGRES_SEEDS=db
|
|
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml
|
|
- ENABLE_ES=true
|
|
- ES_SEEDS=elasticsearch
|
|
- ES_VERSION=v7
|
|
image: temporalio/auto-setup:1.20.0
|
|
ports:
|
|
- 7233:7233
|
|
labels:
|
|
kompose.volume.type: configMap
|
|
volumes:
|
|
- ./docker/temporal/dynamicconfig:/etc/temporal/config/dynamicconfig
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
elasticsearch:
|
|
condition: service_started
|
|
temporal-admin-tools:
|
|
environment:
|
|
- TEMPORAL_CLI_ADDRESS=temporal:7233
|
|
image: temporalio/admin-tools:1.20.0
|
|
stdin_open: true
|
|
tty: true
|
|
temporal-ui:
|
|
depends_on:
|
|
- temporal
|
|
environment:
|
|
- TEMPORAL_ADDRESS=temporal:7233
|
|
- TEMPORAL_CORS_ORIGINS=http://localhost:3000
|
|
- TEMPORAL_CSRF_COOKIE_INSECURE=true
|
|
- TEMPORAL_CODEC_ENDPOINT=http://localhost:8000
|
|
image: temporalio/ui:2.31.2
|
|
ports:
|
|
- 8081:8080
|
|
temporal-django-worker:
|
|
<<: *worker
|
|
command: ./bin/temporal-django-worker
|
|
restart: on-failure
|
|
environment:
|
|
<<: *worker_env
|
|
TEMPORAL_HOST: temporal
|
|
|
|
cyclotron-janitor:
|
|
image: ghcr.io/posthog/posthog/cyclotron-janitor:master
|
|
build:
|
|
context: rust/
|
|
args:
|
|
BIN: cyclotron-janitor
|
|
restart: on-failure
|
|
environment:
|
|
DATABASE_URL: 'postgres://posthog:posthog@db:5432/cyclotron'
|
|
KAFKA_HOSTS: 'kafka:9092'
|
|
KAFKA_TOPIC: 'clickhouse_app_metrics2'
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
kafka:
|
|
condition: service_started
|
|
otel-collector:
|
|
image: otel/opentelemetry-collector-contrib:latest
|
|
container_name: otel-collector-local
|
|
command: [--config=/etc/otel-collector-config.yaml]
|
|
user: '0:0' # have to run as root to read the docker logs
|
|
volumes:
|
|
- ./otel-collector-config.dev.yaml:/etc/otel-collector-config.yaml
|
|
# mount docker containers dir so we can ship the docker logs to local posthog
|
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
|
ports:
|
|
- '4317:4317' # OTLP gRPC receiver (mapped to host)
|
|
- '4318:4318' # OTLP HTTP receiver (mapped to host)
|
|
- '13133:13133' # health_check extension
|
|
- '55679:55679' # zpages extension
|
|
depends_on:
|
|
- jaeger
|
|
networks:
|
|
- otel_network
|
|
|
|
echo_server:
|
|
image: docker.io/library/caddy:2
|
|
container_name: echo-server
|
|
restart: on-failure
|
|
ports:
|
|
- '18081:8081'
|
|
volumes:
|
|
- ./rust/docker/echo-server/Caddyfile:/etc/caddy/Caddyfile
|
|
|
|
jaeger:
|
|
image: jaegertracing/all-in-one:latest
|
|
container_name: jaeger-local
|
|
ports:
|
|
- '16686:16686' # Jaeger UI
|
|
- '14268:14268' # Accepts jaeger.thrift directly from clients (optional for this flow)
|
|
- '14250:14250' # Accepts model.proto (optional for this flow)
|
|
networks:
|
|
- otel_network
|
|
|
|
localstack:
|
|
container_name: '${LOCALSTACK_DOCKER_NAME:-localstack-main}'
|
|
image: localstack/localstack
|
|
ports:
|
|
- '127.0.0.1:4566:4566' # LocalStack Gateway
|
|
- '127.0.0.1:4510-4559:4510-4559' # external services port range
|
|
environment:
|
|
- DEBUG=${DEBUG:-0}
|
|
volumes:
|
|
- '/var/run/docker.sock:/var/run/docker.sock'
|
|
|
|
seaweedfs:
|
|
container_name: '${SEAWEEDFS_DOCKER_NAME:-seaweedfs-main}'
|
|
image: chrislusf/seaweedfs:latest
|
|
ports:
|
|
- '127.0.0.1:8333:8333' # S3 API
|
|
- '127.0.0.1:9333:9333' # Master server (for admin UI)
|
|
command: server -s3 -s3.port=8333 -dir=/data
|
|
|
|
networks:
|
|
otel_network:
|
|
driver: bridge
|
|
|
|
volumes:
|
|
redpanda-data:
|