Files
posthog/docker-compose.base.yml
2025-09-03 16:13:04 +02:00

446 lines
14 KiB
YAML

#
# docker-compose base file used for local development, hobby deploys, and other compose use cases.
#
services:
proxy:
image: caddy
entrypoint: sh
restart: always
command: -c 'set -x && echo "$$CADDYFILE" > /etc/caddy/Caddyfile && exec caddy run -c /etc/caddy/Caddyfile'
volumes:
- /root/.caddy
environment:
CADDY_TLS_BLOCK: ''
CADDY_HOST: 'http://localhost:8000'
CADDYFILE: |
{
${CADDY_TLS_BLOCK:-}
}
${CADDY_HOST:-'http://localhost:8000'} {
@replay-capture {
path /s
path /s/
path /s/*
}
@capture {
path /e
path /e/
path /e/*
path /i/v0
path /i/v0/
path /i/v0/*
path /batch
path /batch/
path /batch/*
path /capture
path /capture/
path /capture/*
}
@flags {
path /flags
path /flags/
path /flags/*
}
@webhooks {
path /public/webhooks
path /public/webhooks/
path /public/webhooks/*
path /public/m/
path /public/m/*
}
handle @capture {
reverse_proxy capture:3000
}
handle @replay-capture {
reverse_proxy replay-capture:3000
}
handle @flags {
reverse_proxy feature-flags:3001
}
handle @webhooks {
reverse_proxy plugins:6738
}
handle {
reverse_proxy web:8000
}
}
db:
image: postgres:12-alpine
restart: on-failure
environment:
POSTGRES_USER: posthog
POSTGRES_DB: posthog
POSTGRES_PASSWORD: posthog
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U posthog']
interval: 5s
timeout: 5s
volumes:
- ./docker/postgres-init-scripts:/docker-entrypoint-initdb.d
redis:
image: redis:6.2.7-alpine
restart: on-failure
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 3s
timeout: 10s
retries: 10
redis7:
image: redis:7.2-alpine
restart: on-failure
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 3s
timeout: 10s
retries: 10
clickhouse:
#
# Note: please keep the default version in sync across
# `posthog` and the `charts-clickhouse` repos
#
image: ${CLICKHOUSE_SERVER_IMAGE:-clickhouse/clickhouse-server:25.6.9.98}
restart: on-failure
environment:
CLICKHOUSE_SKIP_USER_SETUP: 1
zookeeper:
image: zookeeper:3.7.0
restart: on-failure
kafka:
image: docker.redpanda.com/redpandadata/redpanda:v25.1.9
restart: on-failure
command:
- redpanda
- start
- --kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092
- --advertise-kafka-addr internal://kafka:9092,external://localhost:19092
- --pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082
- --advertise-pandaproxy-addr internal://kafka:8082,external://localhost:18082
- --schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081
- --rpc-addr kafka:33145
- --advertise-rpc-addr kafka:33145
- --mode dev-container
- --smp 1
- --memory 1G
- --reserve-memory 200M
- --overprovisioned
- --set redpanda.empty_seed_starts_cluster=false
- --seeds kafka:33145
- --set redpanda.auto_create_topics_enabled=true
environment:
ALLOW_PLAINTEXT_LISTENER: 'true'
healthcheck:
test: curl -f http://localhost:9644/v1/status/ready || exit 1
interval: 3s
timeout: 10s
retries: 10
kafka_ui:
image: provectuslabs/kafka-ui:latest
restart: on-failure
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://kafka:8081
DYNAMIC_CONFIG_ENABLED: 'true'
objectstorage:
image: minio/minio:RELEASE.2025-04-22T22-12-26Z
restart: on-failure
environment:
MINIO_ROOT_USER: object_storage_root_user
MINIO_ROOT_PASSWORD: object_storage_root_password
entrypoint: sh
command: -c 'mkdir -p /data/posthog && minio server --address ":19000" --console-address ":19001" /data' # create the 'posthog' bucket before starting the service
maildev:
image: maildev/maildev:2.0.5
restart: on-failure
healthcheck:
test: wget -qO- http://127.0.0.1:1080/healthz || exit 1
interval: 3s
timeout: 1s
retries: 3
flower:
image: mher/flower:2.0.0
restart: on-failure
environment:
FLOWER_PORT: 5555
CELERY_BROKER_URL: redis://redis:6379
worker: &worker
command: ./bin/docker-worker-celery --with-scheduler
restart: on-failure
environment: &worker_env
OTEL_SDK_DISABLED: 'true'
DISABLE_SECURE_SSL_REDIRECT: 'true'
IS_BEHIND_PROXY: 'true'
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_DATABASE: 'posthog'
CLICKHOUSE_SECURE: 'false'
CLICKHOUSE_VERIFY: 'false'
CLICKHOUSE_API_USER: 'api'
CLICKHOUSE_API_PASSWORD: 'apipass'
CLICKHOUSE_APP_USER: 'app'
CLICKHOUSE_APP_PASSWORD: 'apppass'
API_QUERIES_PER_TEAM: '{"1": 100}'
KAFKA_HOSTS: 'kafka'
REDIS_URL: 'redis://redis:6379/'
PGHOST: db
PGUSER: posthog
PGPASSWORD: posthog
DEPLOYMENT: hobby
CDP_API_URL: 'http://plugins:6738'
web:
<<: *worker
command: ./bin/start-backend & ./bin/start-frontend
restart: on-failure
capture:
image: ghcr.io/posthog/posthog/capture:master
build:
context: rust/
args:
BIN: capture
restart: on-failure
environment:
ADDRESS: '0.0.0.0:3000'
KAFKA_TOPIC: 'events_plugin_ingestion'
KAFKA_HOSTS: 'kafka:9092'
REDIS_URL: 'redis://redis:6379/'
CAPTURE_MODE: events
RUST_LOG: 'info,rdkafka=warn'
replay-capture:
image: ghcr.io/posthog/posthog/capture:master
build:
context: rust/
args:
BIN: capture
restart: on-failure
environment:
ADDRESS: '0.0.0.0:3000'
KAFKA_TOPIC: 'session_recording_snapshot_item_events'
KAFKA_HOSTS: 'kafka:9092'
REDIS_URL: 'redis://redis:6379/'
CAPTURE_MODE: recordings
property-defs-rs:
image: ghcr.io/posthog/posthog/property-defs-rs:master
build:
context: rust/
args:
BIN: property-defs-rs
restart: on-failure
environment:
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
KAFKA_HOSTS: 'kafka:9092'
SKIP_WRITES: 'false'
SKIP_READS: 'false'
FILTER_MODE: 'opt-out'
feature-flags:
image: ghcr.io/posthog/posthog/feature-flags:master
build:
context: rust/
args:
BIN: feature-flags
restart: on-failure
volumes:
- ./share:/share
environment:
WRITE_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
READ_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
MAXMIND_DB_PATH: '/share/GeoLite2-City.mmdb'
REDIS_URL: 'redis://redis:6379/'
# Optional: Use separate Redis URLs for read/write separation
# REDIS_READER_URL: 'redis://redis-replica:6379/'
# REDIS_WRITER_URL: 'redis://redis-primary:6379/'
# Optional: Increase Redis timeout (default is 100ms)
# REDIS_TIMEOUT_MS: 200
ADDRESS: '0.0.0.0:3001'
RUST_LOG: 'info'
COOKIELESS_REDIS_HOST: redis7
COOKIELESS_REDIS_PORT: 6379
plugins:
command: ./bin/plugin-server --no-restart-loop
restart: on-failure
environment:
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
PERSONS_DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
KAFKA_HOSTS: 'kafka:9092'
REDIS_URL: 'redis://redis:6379/'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_DATABASE: 'posthog'
CLICKHOUSE_SECURE: 'false'
CLICKHOUSE_VERIFY: 'false'
COOKIELESS_REDIS_HOST: redis7
COOKIELESS_REDIS_PORT: 6379
livestream:
image: 'ghcr.io/posthog/livestream:main'
restart: on-failure
depends_on:
kafka:
condition: service_started
migrate:
<<: *worker
command: sh -c "
python manage.py migrate
&& python manage.py migrate_clickhouse
&& python manage.py run_async_migrations
"
restart: 'no'
deploy:
replicas: 0
asyncmigrationscheck:
<<: *worker
command: python manage.py run_async_migrations --check
restart: 'no'
deploy:
replicas: 0
environment:
<<: *worker_env
SKIP_ASYNC_MIGRATIONS_SETUP: 0
# Temporal containers
elasticsearch:
environment:
- cluster.routing.allocation.disk.threshold_enabled=true
- cluster.routing.allocation.disk.watermark.low=512mb
- cluster.routing.allocation.disk.watermark.high=256mb
- cluster.routing.allocation.disk.watermark.flood_stage=128mb
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms256m -Xmx256m
- xpack.security.enabled=false
image: elasticsearch:7.17.28
expose:
- 9200
volumes:
- /var/lib/elasticsearch/data
temporal:
restart: on-failure
environment:
- DB=postgresql
- DB_PORT=5432
- POSTGRES_USER=posthog
- POSTGRES_PWD=posthog
- POSTGRES_SEEDS=db
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml
- ENABLE_ES=true
- ES_SEEDS=elasticsearch
- ES_VERSION=v7
image: temporalio/auto-setup:1.20.0
ports:
- 7233:7233
labels:
kompose.volume.type: configMap
volumes:
- ./docker/temporal/dynamicconfig:/etc/temporal/config/dynamicconfig
depends_on:
db:
condition: service_healthy
elasticsearch:
condition: service_started
temporal-admin-tools:
environment:
- TEMPORAL_CLI_ADDRESS=temporal:7233
image: temporalio/admin-tools:1.20.0
stdin_open: true
tty: true
temporal-ui:
depends_on:
- temporal
environment:
- TEMPORAL_ADDRESS=temporal:7233
- TEMPORAL_CORS_ORIGINS=http://localhost:3000
- TEMPORAL_CSRF_COOKIE_INSECURE=true
- TEMPORAL_CODEC_ENDPOINT=http://localhost:8000
image: temporalio/ui:2.31.2
ports:
- 8081:8080
temporal-django-worker:
<<: *worker
command: ./bin/temporal-django-worker
restart: on-failure
environment:
<<: *worker_env
TEMPORAL_HOST: temporal
cyclotron-janitor:
image: ghcr.io/posthog/posthog/cyclotron-janitor:master
build:
context: rust/
args:
BIN: cyclotron-janitor
restart: on-failure
environment:
DATABASE_URL: 'postgres://posthog:posthog@db:5432/cyclotron'
KAFKA_HOSTS: 'kafka:9092'
KAFKA_TOPIC: 'clickhouse_app_metrics2'
depends_on:
db:
condition: service_healthy
kafka:
condition: service_started
otel-collector:
image: otel/opentelemetry-collector-contrib:latest
container_name: otel-collector-local
command: [--config=/etc/otel-collector-config.yaml]
volumes:
- ./otel-collector-config.dev.yaml:/etc/otel-collector-config.yaml
ports:
- '4317:4317' # OTLP gRPC receiver (mapped to host)
- '4318:4318' # OTLP HTTP receiver (mapped to host)
- '13133:13133' # health_check extension
- '55679:55679' # zpages extension
depends_on:
- jaeger
networks:
- otel_network
echo_server:
image: docker.io/library/caddy:2
container_name: echo-server
restart: on-failure
ports:
- '18081:8081'
volumes:
- ./rust/docker/echo-server/Caddyfile:/etc/caddy/Caddyfile
jaeger:
image: jaegertracing/all-in-one:latest
container_name: jaeger-local
ports:
- '16686:16686' # Jaeger UI
- '14268:14268' # Accepts jaeger.thrift directly from clients (optional for this flow)
- '14250:14250' # Accepts model.proto (optional for this flow)
networks:
- otel_network
networks:
otel_network:
driver: bridge