- Add GET /api/clients/crashed endpoint (process_status=crashed or stale heartbeat) - Add restart_app command action with same lifecycle + lockout as reboot_host - Scheduler: crash auto-recovery loop (CRASH_RECOVERY_ENABLED flag, lockout, MQTT publish) - Scheduler: unconditional command expiry sweep per poll cycle (sweep_expired_commands) - Listener: subscribe to infoscreen/+/service_failed; persist service_failed_at + unit - Listener: extract broker_connection block from health payload; persist reconnect_count + last_disconnect_at - DB migration b1c2d3e4f5a6: service_failed_at, service_failed_unit, mqtt_reconnect_count, mqtt_last_disconnect_at on clients - Add GET /api/clients/service_failed and POST /api/clients/<uuid>/clear_service_failed - Monitoring overview API: include mqtt_reconnect_count + mqtt_last_disconnect_at per client - Frontend: orange service-failed alert panel (hidden when empty, auto-refresh, quittieren action) - Frontend: MQTT reconnect count + last disconnect in client detail panel - MQTT auth hardening: listener/scheduler/server use env credentials; broker enforces allow_anonymous false - Client command lifecycle foundation: ClientCommand model, reboot_host/shutdown_host, full ACK lifecycle - Docs: TECH-CHANGELOG, DEV-CHANGELOG, MQTT_EVENT_PAYLOAD_GUIDE, copilot-instructions updated - Add implementation-plans/, RESTART_VALIDATION_CHECKLIST.md, TODO.md
248 lines
7.5 KiB
YAML
248 lines
7.5 KiB
YAML
networks:
|
|
infoscreen-net:
|
|
driver: bridge
|
|
|
|
services:
|
|
listener:
|
|
build:
|
|
context: .
|
|
dockerfile: listener/Dockerfile
|
|
image: infoscreen-listener:latest
|
|
container_name: infoscreen-listener
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
mqtt:
|
|
condition: service_healthy
|
|
environment:
|
|
- DB_CONN=mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}
|
|
- DB_URL=mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}
|
|
- API_BASE_URL=http://server:8000
|
|
- MQTT_BROKER_HOST=${MQTT_BROKER_HOST:-mqtt}
|
|
- MQTT_BROKER_PORT=${MQTT_BROKER_PORT:-1883}
|
|
- MQTT_USER=${MQTT_USER}
|
|
- MQTT_PASSWORD=${MQTT_PASSWORD}
|
|
- ENV=${ENV:-development}
|
|
- FLASK_SECRET_KEY=${FLASK_SECRET_KEY:-dev-secret-key-change-in-production}
|
|
- DEFAULT_SUPERADMIN_USERNAME=${DEFAULT_SUPERADMIN_USERNAME:-superadmin}
|
|
- DEFAULT_SUPERADMIN_PASSWORD=${DEFAULT_SUPERADMIN_PASSWORD}
|
|
# 🔧 ENTFERNT: Volume-Mount ist nur für die Entwicklung
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
proxy:
|
|
image: nginx:1.25 # 🔧 GEÄNDERT: Spezifische Version
|
|
container_name: infoscreen-proxy
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
volumes:
|
|
- ./nginx.conf:/etc/nginx/nginx.conf:ro # 🔧 GEÄNDERT: Relativer Pfad
|
|
- ./certs:/etc/nginx/certs:ro # 🔧 GEÄNDERT: Relativer Pfad
|
|
# Mount media volume so nginx can serve uploaded files
|
|
- media-data:/opt/infoscreen/server/media:ro
|
|
depends_on:
|
|
- server
|
|
- dashboard
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
db:
|
|
image: mariadb:11.2 # 🔧 GEÄNDERT: Spezifische Version
|
|
container_name: infoscreen-db
|
|
restart: unless-stopped
|
|
environment:
|
|
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
|
|
MYSQL_DATABASE: ${DB_NAME}
|
|
MYSQL_USER: ${DB_USER}
|
|
MYSQL_PASSWORD: ${DB_PASSWORD}
|
|
volumes:
|
|
- db-data:/var/lib/mysql
|
|
ports:
|
|
- "3306:3306"
|
|
networks:
|
|
- infoscreen-net
|
|
healthcheck:
|
|
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
|
|
interval: 30s
|
|
timeout: 5s
|
|
retries: 3
|
|
start_period: 30s
|
|
|
|
mqtt:
|
|
image: eclipse-mosquitto:2.0.21 # ✅ GUT: Version ist bereits spezifisch
|
|
container_name: infoscreen-mqtt
|
|
restart: unless-stopped
|
|
command: >
|
|
sh -c 'set -eu;
|
|
: "$${MQTT_USER:?MQTT_USER not set}";
|
|
: "$${MQTT_PASSWORD:?MQTT_PASSWORD not set}";
|
|
touch /mosquitto/config/passwd;
|
|
chmod 600 /mosquitto/config/passwd;
|
|
mosquitto_passwd -b /mosquitto/config/passwd "$${MQTT_USER}" "$${MQTT_PASSWORD}";
|
|
if [ -n "$${MQTT_CANARY_USER:-}" ] && [ -n "$${MQTT_CANARY_PASSWORD:-}" ]; then
|
|
mosquitto_passwd -b /mosquitto/config/passwd "$${MQTT_CANARY_USER}" "$${MQTT_CANARY_PASSWORD}";
|
|
fi;
|
|
exec mosquitto -c /mosquitto/config/mosquitto.conf'
|
|
volumes:
|
|
- ./mosquitto/config:/mosquitto/config
|
|
- ./mosquitto/data:/mosquitto/data
|
|
- ./mosquitto/log:/mosquitto/log
|
|
ports:
|
|
- "1883:1883" # Standard MQTT
|
|
- "9001:9001" # WebSocket (falls benötigt)
|
|
environment:
|
|
- MQTT_USER=${MQTT_USER}
|
|
- MQTT_PASSWORD=${MQTT_PASSWORD}
|
|
- MQTT_CANARY_USER=${MQTT_CANARY_USER:-}
|
|
- MQTT_CANARY_PASSWORD=${MQTT_CANARY_PASSWORD:-}
|
|
networks:
|
|
- infoscreen-net
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD-SHELL",
|
|
"mosquitto_pub -h localhost -u $$MQTT_USER -P $$MQTT_PASSWORD -t test -m 'health' || exit 1",
|
|
]
|
|
interval: 30s
|
|
timeout: 5s
|
|
retries: 3
|
|
start_period: 10s
|
|
|
|
server:
|
|
build:
|
|
context: .
|
|
dockerfile: server/Dockerfile
|
|
image: infoscreen-api:latest
|
|
container_name: infoscreen-api
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
mqtt:
|
|
condition: service_healthy
|
|
environment:
|
|
DB_CONN: "mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}"
|
|
FLASK_ENV: ${FLASK_ENV}
|
|
ENV_FILE: ${ENV_FILE}
|
|
MQTT_BROKER_URL: ${MQTT_BROKER_URL}
|
|
MQTT_USER: ${MQTT_USER}
|
|
MQTT_PASSWORD: ${MQTT_PASSWORD}
|
|
REDIS_URL: "${REDIS_URL:-redis://redis:6379/0}"
|
|
GOTENBERG_URL: "${GOTENBERG_URL:-http://gotenberg:3000}"
|
|
ports:
|
|
- "8000:8000"
|
|
networks:
|
|
- infoscreen-net
|
|
volumes:
|
|
- media-data:/app/server/media
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
|
interval: 30s
|
|
timeout: 5s
|
|
retries: 3
|
|
start_period: 40s
|
|
|
|
# ✅ GEÄNDERT: Dashboard jetzt mit Node.js/React statt Python/Dash
|
|
dashboard:
|
|
build:
|
|
context: ./dashboard
|
|
dockerfile: Dockerfile
|
|
args:
|
|
- VITE_API_URL=${API_URL}
|
|
image: infoscreen-dashboard:latest
|
|
container_name: infoscreen-dashboard
|
|
restart: unless-stopped
|
|
depends_on:
|
|
server:
|
|
condition: service_healthy
|
|
environment:
|
|
- NODE_ENV=production
|
|
- VITE_API_URL=${API_URL}
|
|
# 🔧 ENTFERNT: Port wird in Produktion nicht direkt freigegeben, Zugriff via Proxy
|
|
networks:
|
|
- infoscreen-net
|
|
healthcheck:
|
|
# 🔧 GEÄNDERT: Healthcheck prüft den Nginx-Server im Container
|
|
test: ["CMD", "curl", "-f", "http://localhost/"]
|
|
interval: 30s
|
|
timeout: 5s
|
|
retries: 3
|
|
# 🔧 ERHÖHT: Gibt dem Backend mehr Zeit zum Starten, bevor dieser
|
|
# Container als "gesund" markiert wird.
|
|
start_period: 60s
|
|
|
|
scheduler:
|
|
build:
|
|
context: .
|
|
dockerfile: scheduler/Dockerfile
|
|
image: infoscreen-scheduler:latest
|
|
container_name: infoscreen-scheduler
|
|
restart: unless-stopped
|
|
depends_on:
|
|
# HINZUGEFÜGT: Stellt sicher, dass die DB vor dem Scheduler startet
|
|
db:
|
|
condition: service_healthy
|
|
mqtt:
|
|
condition: service_healthy
|
|
environment:
|
|
# HINZUGEFÜGT: Datenbank-Verbindungsstring
|
|
- DB_CONN=mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}
|
|
- MQTT_BROKER_HOST=${MQTT_BROKER_HOST:-mqtt}
|
|
- MQTT_BROKER_PORT=${MQTT_BROKER_PORT:-1883}
|
|
- MQTT_USER=${MQTT_USER}
|
|
- MQTT_PASSWORD=${MQTT_PASSWORD}
|
|
- POLL_INTERVAL_SECONDS=${POLL_INTERVAL_SECONDS:-30}
|
|
- POWER_INTENT_PUBLISH_ENABLED=${POWER_INTENT_PUBLISH_ENABLED:-false}
|
|
- POWER_INTENT_HEARTBEAT_ENABLED=${POWER_INTENT_HEARTBEAT_ENABLED:-true}
|
|
- POWER_INTENT_EXPIRY_MULTIPLIER=${POWER_INTENT_EXPIRY_MULTIPLIER:-3}
|
|
- POWER_INTENT_MIN_EXPIRY_SECONDS=${POWER_INTENT_MIN_EXPIRY_SECONDS:-90}
|
|
- CRASH_RECOVERY_ENABLED=${CRASH_RECOVERY_ENABLED:-false}
|
|
- CRASH_RECOVERY_GRACE_SECONDS=${CRASH_RECOVERY_GRACE_SECONDS:-180}
|
|
- CRASH_RECOVERY_LOCKOUT_MINUTES=${CRASH_RECOVERY_LOCKOUT_MINUTES:-15}
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
volumes:
|
|
- ./scheduler:/app/scheduler
|
|
|
|
redis:
|
|
image: redis:7-alpine
|
|
container_name: infoscreen-redis
|
|
restart: unless-stopped
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
gotenberg:
|
|
image: gotenberg/gotenberg:8
|
|
container_name: infoscreen-gotenberg
|
|
restart: unless-stopped
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
worker:
|
|
build:
|
|
context: .
|
|
dockerfile: server/Dockerfile
|
|
image: infoscreen-worker:latest
|
|
container_name: infoscreen-worker
|
|
restart: unless-stopped
|
|
depends_on:
|
|
- redis
|
|
- gotenberg
|
|
- db
|
|
environment:
|
|
DB_CONN: "mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}"
|
|
REDIS_URL: "${REDIS_URL:-redis://redis:6379/0}"
|
|
GOTENBERG_URL: "${GOTENBERG_URL:-http://gotenberg:3000}"
|
|
PYTHONPATH: /app
|
|
command: ["rq", "worker", "conversions"]
|
|
networks:
|
|
- infoscreen-net
|
|
|
|
volumes:
|
|
server-pip-cache:
|
|
db-data:
|
|
media-data:
|