mirror of
https://github.com/element-hq/element-docker-demo.git
synced 2026-01-24 22:26:13 +03:00
325 lines
10 KiB
YAML
325 lines
10 KiB
YAML
# FIXME: define a frontend & backend network, and only expose backend services to the frontend (nginx)
|
|
|
|
networks:
|
|
backend:
|
|
|
|
secrets:
|
|
postgres_password:
|
|
file: secrets/postgres/postgres_password
|
|
synapse_signing_key:
|
|
file: secrets/synapse/signing.key
|
|
livekit_api_key:
|
|
file: secrets/livekit/livekit_api_key
|
|
livekit_secret_key:
|
|
file: secrets/livekit/livekit_secret_key
|
|
|
|
services:
|
|
# XXX: consider factor out secret generation from the compose.yml
|
|
|
|
# dependencies for optionally generating default configs + secrets
|
|
generate-synapse-secrets:
|
|
image: ghcr.io/element-hq/synapse:latest
|
|
user: $USER_ID:$GROUP_ID
|
|
restart: "no"
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/synapse:/data:rw
|
|
- ${VOLUME_PATH}/init/generate-synapse-secrets.sh:/entrypoint.sh
|
|
env_file: .env
|
|
environment:
|
|
SYNAPSE_CONFIG_DIR: /data
|
|
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml.default
|
|
SYNAPSE_SERVER_NAME: ${DOMAIN}
|
|
SYNAPSE_REPORT_STATS: ${REPORT_STATS}
|
|
entrypoint: "/entrypoint.sh"
|
|
|
|
generate-mas-secrets:
|
|
restart: "no"
|
|
image: ghcr.io/element-hq/matrix-authentication-service:latest
|
|
user: $USER_ID:$GROUP_ID
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/mas:/data:rw
|
|
# FIXME: stop this regenerating a spurious default config every time
|
|
# We can't do the same approach as synapse (unless use a debug image of MAS) as MAS is distroless and has no bash.
|
|
command: "config generate -o /data/config.yaml.default"
|
|
|
|
# dependency for templating /data-template into /data (having extracted any secrets from any default generated configs)
|
|
init:
|
|
build: init
|
|
user: $USER_ID:$GROUP_ID
|
|
restart: "no"
|
|
volumes:
|
|
- ${VOLUME_PATH}/secrets:/secrets
|
|
- ${VOLUME_PATH}/data:/data
|
|
- ${VOLUME_PATH}/data-template:/data-template
|
|
- ${VOLUME_PATH}/init/init.sh:/init.sh
|
|
command: "/init.sh"
|
|
env_file: .env
|
|
depends_on:
|
|
generate-synapse-secrets:
|
|
condition: service_completed_successfully
|
|
generate-mas-secrets:
|
|
condition: service_completed_successfully
|
|
|
|
nginx:
|
|
image: nginx:latest
|
|
restart: unless-stopped
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
- "8448:8448"
|
|
# shutdown fast so we can iterate rapidly on compose.yml
|
|
stop_grace_period: 0s
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/nginx/conf.d:/etc/nginx/conf.d
|
|
- ${VOLUME_PATH}/data/nginx/www:/var/www
|
|
- ${VOLUME_PATH}/data/ssl:/etc/nginx/ssl
|
|
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
|
|
networks:
|
|
backend:
|
|
aliases: # so our containers can resolve the LB
|
|
- $DOMAIN
|
|
- $HOMESERVER_FQDN
|
|
- $ELEMENT_WEB_FQDN
|
|
- $ELEMENT_CALL_FQDN
|
|
- $MAS_FQDN
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
synapse:
|
|
condition: service_started
|
|
|
|
certbot:
|
|
image: certbot/certbot:latest
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/certbot/conf:/etc/letsencrypt
|
|
- ${VOLUME_PATH}/data/certbot/www:/var/www/certbot
|
|
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
nginx:
|
|
condition: service_started
|
|
|
|
postgres:
|
|
image: postgres:latest
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/postgres:/var/lib/postgresql/data:rw
|
|
- ${VOLUME_PATH}/scripts/create-multiple-postgresql-databases.sh:/docker-entrypoint-initdb.d/create-multiple-postgresql-databases.sh
|
|
networks:
|
|
- backend
|
|
environment:
|
|
POSTGRES_MULTIPLE_DATABASES: synapse,mas
|
|
POSTGRES_USER: matrix # FIXME: use different username+passwords for synapse & MAS DBs.
|
|
POSTGRES_PASSWORD_FILE: /run/secrets/postgres_password
|
|
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
|
|
PGDATA: /var/lib/postgresql/data/data # otherwise it clashes with .gitkeep in the parent dir
|
|
secrets:
|
|
- postgres_password
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "pg_isready -U matrix"]
|
|
start_period: "1s"
|
|
interval: "1s"
|
|
timeout: "5s"
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
|
|
redis:
|
|
image: redis:latest
|
|
restart: unless-stopped
|
|
# healthcheck:
|
|
# test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
|
# interval: 1s
|
|
# timeout: 3s
|
|
# retries: 5
|
|
networks:
|
|
- backend
|
|
|
|
synapse:
|
|
image: ghcr.io/element-hq/synapse:latest
|
|
user: $USER_ID:$GROUP_ID
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/synapse:/data:rw
|
|
- ${VOLUME_PATH}/data/nginx/ssl/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
|
|
# ports:
|
|
# - 8008:8008
|
|
networks:
|
|
- backend
|
|
environment:
|
|
SYNAPSE_CONFIG_DIR: /data
|
|
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
|
|
secrets:
|
|
- synapse_signing_key
|
|
depends_on:
|
|
redis:
|
|
condition: service_started
|
|
postgres:
|
|
condition: service_healthy
|
|
init:
|
|
condition: service_completed_successfully
|
|
|
|
synapse-generic-worker-1:
|
|
image: ghcr.io/element-hq/synapse:latest
|
|
user: $USER_ID:$GROUP_ID
|
|
restart: unless-stopped
|
|
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
|
|
start_period: "5s"
|
|
interval: "15s"
|
|
timeout: "5s"
|
|
networks:
|
|
- backend
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/synapse:/data:rw
|
|
- ${VOLUME_PATH}/data/nginx/ssl/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
|
|
environment:
|
|
SYNAPSE_WORKER: synapse.app.generic_worker
|
|
# Expose port if required so your reverse proxy can send requests to this worker
|
|
# Port configuration will depend on how the http listener is defined in the worker configuration file
|
|
# ports:
|
|
# - 8081:8081
|
|
secrets:
|
|
- synapse_signing_key
|
|
depends_on:
|
|
- synapse
|
|
|
|
synapse-federation-sender-1:
|
|
image: ghcr.io/element-hq/synapse:latest
|
|
user: $USER_ID:$GROUP_ID
|
|
restart: unless-stopped
|
|
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
|
|
healthcheck:
|
|
disable: true
|
|
networks:
|
|
- backend
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/synapse:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
|
|
- ${VOLUME_PATH}/data/nginx/ssl/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
|
|
environment:
|
|
SYNAPSE_WORKER: synapse.app.federation_sender
|
|
# ports:
|
|
# - 8082:8082
|
|
secrets:
|
|
- synapse_signing_key
|
|
depends_on:
|
|
- synapse
|
|
|
|
mas:
|
|
image: ghcr.io/element-hq/matrix-authentication-service:latest
|
|
restart: unless-stopped
|
|
# ports:
|
|
# - 8083:8080
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/mas:/data:rw
|
|
- ${VOLUME_PATH}/data/nginx/ssl/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
|
|
networks:
|
|
- backend
|
|
# FIXME: do we also need to sync the db?
|
|
command: "server --config=/data/config.yaml"
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
init:
|
|
condition: service_completed_successfully
|
|
|
|
# as a basic local MTA
|
|
mailhog:
|
|
image: mailhog/mailhog:latest
|
|
restart: unless-stopped
|
|
ports:
|
|
- 8025:8025
|
|
# - 1025:1025
|
|
networks:
|
|
- backend
|
|
|
|
element-web:
|
|
image: vectorim/element-web:latest
|
|
restart: unless-stopped
|
|
# ports:
|
|
# - 8080:80
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -fSs http://localhost:8080/version || exit 1"]
|
|
start_period: "5s"
|
|
interval: "15s"
|
|
timeout: "5s"
|
|
networks:
|
|
- backend
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/element-web/config.json:/app/config.json
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
|
|
element-call:
|
|
image: ghcr.io/element-hq/element-call
|
|
restart: unless-stopped
|
|
# ports:
|
|
# - 8082:80
|
|
networks:
|
|
- backend
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/element-call/config.json:/app/config.json
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
|
|
livekit:
|
|
image: livekit/livekit-server:latest
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/livekit/config.yaml:/etc/livekit.yaml
|
|
command: --config /etc/livekit.yaml --node-ip ${LIVEKIT_NODE_IP}
|
|
ports:
|
|
# - 7880:7880 # HTTP listener
|
|
- 7881:7881 # TCP WebRTC transport, advertised via SDP
|
|
|
|
# TODO: expose livekit-turn on TCP & UDP 443 via nginx
|
|
# At least this would allow UDP turn on port 443 for better perf.
|
|
|
|
# You can't expose a massive range here as it literally sets up 10,000 userland listeners, which takes forever
|
|
# and will clash with any existing high-numbered ports.
|
|
# So for now, tunnel everything via TCP 7881. FIXME!
|
|
#- 50000-60000:50000-60000/tcp # TCP media
|
|
#- 50000-60000:50000-60000/udp # UDP media
|
|
networks:
|
|
- backend
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
redis:
|
|
condition: service_started
|
|
|
|
livekit-jwt:
|
|
build:
|
|
# evil hack to pull in bash so we can run an entrypoint.sh
|
|
# FIXME: it's a bit wasteful; the alternative would be to modify lk-jwt-service to pick up secrets from disk
|
|
# Another alternative would be to factor out secret generation from compose.yml and create an .env up front
|
|
dockerfile_inline: |
|
|
FROM ghcr.io/element-hq/lk-jwt-service:latest-ci AS builder
|
|
FROM alpine:latest
|
|
RUN apk update && apk add bash
|
|
COPY --from=builder /lk-jwt-service /
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${VOLUME_PATH}/data/nginx/ssl/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
|
|
- ${VOLUME_PATH}/scripts/livekit-jwt-entrypoint.sh:/entrypoint.sh
|
|
entrypoint: /entrypoint.sh
|
|
env_file: .env
|
|
deploy:
|
|
restart_policy:
|
|
condition: on-failure
|
|
networks:
|
|
- backend
|
|
secrets:
|
|
- livekit_api_key
|
|
- livekit_secret_key
|
|
depends_on:
|
|
init:
|
|
condition: service_completed_successfully
|
|
livekit:
|
|
condition: service_started
|