additions and corrections for

deployment instructions
This commit is contained in:
2025-09-04 16:45:29 +00:00
parent 86b1bdbd91
commit 76629b8e30
23 changed files with 561 additions and 57 deletions

View File

@@ -11,6 +11,18 @@
},
"dependencies": {
"@syncfusion/ej2-base": "^30.2.6",
"@syncfusion/ej2-buttons": "^30.2.4",
"@syncfusion/ej2-calendars": "^30.2.4",
"@syncfusion/ej2-dropdowns": "^30.2.6",
"@syncfusion/ej2-grids": "^30.2.6",
"@syncfusion/ej2-icons": "^30.2.4",
"@syncfusion/ej2-inputs": "^30.2.6",
"@syncfusion/ej2-kanban": "^30.2.4",
"@syncfusion/ej2-layouts": "^30.2.4",
"@syncfusion/ej2-lists": "^30.2.4",
"@syncfusion/ej2-navigations": "^30.2.7",
"@syncfusion/ej2-notifications": "^30.2.4",
"@syncfusion/ej2-popups": "^30.2.4",
"@syncfusion/ej2-react-buttons": "^30.1.37",
"@syncfusion/ej2-react-calendars": "^30.1.37",
"@syncfusion/ej2-react-dropdowns": "^30.1.37",
@@ -23,6 +35,7 @@
"@syncfusion/ej2-react-notifications": "^30.1.37",
"@syncfusion/ej2-react-popups": "^30.1.37",
"@syncfusion/ej2-react-schedule": "^30.1.37",
"@syncfusion/ej2-splitbuttons": "^30.2.4",
"cldr-data": "^36.0.4",
"lucide-react": "^0.522.0",
"react": "^19.1.0",

View File

@@ -11,6 +11,42 @@ importers:
'@syncfusion/ej2-base':
specifier: ^30.2.6
version: 30.2.6
'@syncfusion/ej2-buttons':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-calendars':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-dropdowns':
specifier: ^30.2.6
version: 30.2.6
'@syncfusion/ej2-grids':
specifier: ^30.2.6
version: 30.2.6
'@syncfusion/ej2-icons':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-inputs':
specifier: ^30.2.6
version: 30.2.6
'@syncfusion/ej2-kanban':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-layouts':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-lists':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-navigations':
specifier: ^30.2.7
version: 30.2.7
'@syncfusion/ej2-notifications':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-popups':
specifier: ^30.2.4
version: 30.2.4
'@syncfusion/ej2-react-buttons':
specifier: ^30.1.37
version: 30.2.4
@@ -47,6 +83,9 @@ importers:
'@syncfusion/ej2-react-schedule':
specifier: ^30.1.37
version: 30.2.7
'@syncfusion/ej2-splitbuttons':
specifier: ^30.2.4
version: 30.2.4
cldr-data:
specifier: ^36.0.4
version: 36.0.4

View File

@@ -25,10 +25,10 @@ import CustomEventModal from './components/CustomEventModal';
import { fetchMediaById } from './apiClients';
import { Presentation, Globe, Video, MessageSquare, School } from 'lucide-react';
import { renderToStaticMarkup } from 'react-dom/server';
import caGregorian from './cldr-data/ca-gregorian.json';
import numbers from './cldr-data/numbers.json';
import timeZoneNames from './cldr-data/timeZoneNames.json';
import numberingSystems from './cldr-data/numberingSystems.json';
import caGregorian from './cldr/ca-gregorian.json';
import numbers from './cldr/numbers.json';
import timeZoneNames from './cldr/timeZoneNames.json';
import numberingSystems from './cldr/numberingSystems.json';
// Typ für Gruppe ergänzen
type Group = {

376
deployment.md Normal file
View File

@@ -0,0 +1,376 @@
# Infoscreen Deployment Guide
Komplette Anleitung für das Deployment des Infoscreen-Systems auf einem Ubuntu-Server mit GitHub Container Registry.
## 📋 Übersicht
- **Phase 0**: Docker Installation (optional)
- **Phase 1**: Images bauen und zur Registry pushen
- **Phase 2**: Ubuntu-Server Installation
- **Phase 3**: System-Konfiguration und Start
---
## 🐳 Phase 0: Docker Installation (optional)
Falls Docker noch nicht installiert ist, wählen Sie eine der folgenden Optionen:
### Option A: Ubuntu Repository (schnell)
```bash
# Standard Ubuntu Docker-Pakete
sudo apt update
sudo apt install docker.io docker-compose-plugin -y
sudo systemctl enable docker
sudo systemctl start docker
```
### Option B: Offizielle Docker-Installation (empfohlen)
```bash
# Alte Docker-Versionen entfernen
sudo apt remove docker docker-engine docker.io containerd runc -y
# Abhängigkeiten installieren
sudo apt update
sudo apt install ca-certificates curl gnupg lsb-release -y
# Docker GPG-Key hinzufügen
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
# Docker Repository hinzufügen
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# Docker installieren (neueste Version)
sudo apt update
sudo apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
# Docker aktivieren und starten
sudo systemctl enable docker
sudo systemctl start docker
# User zur Docker-Gruppe hinzufügen
sudo usermod -aG docker $USER
# Neuanmeldung für Gruppenänderung erforderlich
exit
# Neu einloggen via SSH
```
### Docker-Installation testen
```bash
# Test-Container ausführen
docker run hello-world
# Docker-Version prüfen
docker --version
docker compose version
```
---
## 🏗️ Phase 1: Images bauen und pushen (Entwicklungsmaschine)
### 1. GitHub Container Registry Login
```bash
# GitHub Personal Access Token mit write:packages Berechtigung erstellen
echo $GITHUB_TOKEN | docker login ghcr.io -u robbstarkaustria --password-stdin
# Oder interaktiv:
docker login ghcr.io
# Username: robbstarkaustria
# Password: [GITHUB_TOKEN]
```
### 2. Images bauen und taggen
```bash
cd /workspace
# Server-Image bauen
docker build -f server/Dockerfile -t ghcr.io/robbstarkaustria/infoscreen-api:latest .
# Dashboard-Image bauen
docker build -f dashboard/Dockerfile -t ghcr.io/robbstarkaustria/infoscreen-dashboard:latest .
# Listener-Image bauen (falls vorhanden)
docker build -f listener/Dockerfile -t ghcr.io/robbstarkaustria/infoscreen-listener:latest .
# Scheduler-Image bauen (falls vorhanden)
docker build -f scheduler/Dockerfile -t ghcr.io/robbstarkaustria/infoscreen-scheduler:latest .
```
### 3. Images zur Registry pushen
```bash
# Alle Images pushen
docker push ghcr.io/robbstarkaustria/infoscreen-api:latest
docker push ghcr.io/robbstarkaustria/infoscreen-dashboard:latest
docker push ghcr.io/robbstarkaustria/infoscreen-listener:latest
docker push ghcr.io/robbstarkaustria/infoscreen-scheduler:latest
# Status prüfen
docker images | grep ghcr.io
```
---
## 🖥️ Phase 2: Ubuntu-Server Installation
### 4. Ubuntu Server vorbereiten
```bash
sudo apt update && sudo apt upgrade -y
# Grundlegende Tools installieren
sudo apt install git curl wget -y
# Docker installieren (siehe Phase 0)
```
### 5. Deployment-Dateien übertragen
```bash
# Deployment-Ordner erstellen
mkdir -p ~/infoscreen-deployment
cd ~/infoscreen-deployment
# Dateien vom Dev-System kopieren (über SCP)
scp user@dev-machine:/workspace/docker-compose.prod.yml .
scp user@dev-machine:/workspace/.env .
scp user@dev-machine:/workspace/nginx.conf .
scp -r user@dev-machine:/workspace/certs ./
scp -r user@dev-machine:/workspace/mosquitto ./
# Alternative: Deployment-Paket verwenden
# Auf Dev-Maschine (/workspace):
# tar -czf infoscreen-deployment.tar.gz docker-compose.prod.yml .env nginx.conf certs/ mosquitto/
# scp infoscreen-deployment.tar.gz user@server:~/
# Auf Server: tar -xzf infoscreen-deployment.tar.gz
```
### 6. Mosquitto-Konfiguration vorbereiten
```bash
# Falls mosquitto-Ordner noch nicht vollständig vorhanden:
mkdir -p mosquitto/{config,data,log}
# Mosquitto-Konfiguration erstellen (falls nicht übertragen)
cat > mosquitto/config/mosquitto.conf << 'EOF'
# -----------------------------
# Netzwerkkonfiguration
# -----------------------------
listener 1883
allow_anonymous true
# password_file /mosquitto/config/passwd
# WebSocket (optional)
listener 9001
protocol websockets
# -----------------------------
# Persistence & Pfade
# -----------------------------
persistence true
persistence_location /mosquitto/data/
log_dest file /mosquitto/log/mosquitto.log
EOF
# Berechtigungen für Mosquitto setzen
sudo chown -R 1883:1883 mosquitto/data mosquitto/log
chmod 755 mosquitto/config mosquitto/data mosquitto/log
```
### 7. Environment-Variablen anpassen
```bash
# .env für Produktionsumgebung anpassen
nano .env
# Wichtige Anpassungen:
# API_URL=http://YOUR_SERVER_IP:8000
# DB_HOST=db (sollte bereits korrekt sein)
# Alle Passwörter für Produktion ändern
```
---
## 🚀 Phase 3: System-Start und Konfiguration
### 8. Images von Registry pullen
```bash
# GitHub Container Registry Login (falls private Repository)
echo $GITHUB_TOKEN | docker login ghcr.io -u robbstarkaustria --password-stdin
# Images pullen
docker compose -f docker-compose.prod.yml pull
```
### 9. System starten
```bash
# Container starten
docker compose -f docker-compose.prod.yml up -d
# Status prüfen
docker compose ps
docker compose logs -f
```
### 10. Firewall konfigurieren
```bash
sudo ufw enable
sudo ufw allow ssh
sudo ufw allow 80/tcp
sudo ufw allow 443/tcp
sudo ufw allow 1883/tcp # MQTT
sudo ufw allow 9001/tcp # MQTT WebSocket
sudo ufw status
```
### 11. Installation validieren
```bash
# Health-Checks
curl http://localhost/api/health
curl https://localhost -k # -k für selbstsignierte Zertifikate
# Container-Status
docker compose ps
# Logs bei Problemen anzeigen
docker compose logs server
docker compose logs dashboard
docker compose logs mqtt
```
### 12. Automatischer Start (optional)
```bash
# Systemd-Service erstellen
sudo tee /etc/systemd/system/infoscreen.service > /dev/null << 'EOF'
[Unit]
Description=Infoscreen Application
Requires=docker.service
After=docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/home/$USER/infoscreen-deployment
ExecStart=/usr/bin/docker compose -f docker-compose.prod.yml up -d
ExecStop=/usr/bin/docker compose -f docker-compose.prod.yml down
TimeoutStartSec=300
[Install]
WantedBy=multi-user.target
EOF
# Service aktivieren
sudo systemctl enable infoscreen.service
sudo systemctl start infoscreen.service
```
---
## 🌐 Zugriff auf die Anwendung
Nach erfolgreichem Deployment ist die Anwendung unter folgenden URLs erreichbar:
- **HTTPS Dashboard**: `https://YOUR_SERVER_IP`
- **HTTP Dashboard**: `http://YOUR_SERVER_IP` (Redirect zu HTTPS)
- **API**: `http://YOUR_SERVER_IP/api/`
- **MQTT**: `YOUR_SERVER_IP:1883`
- **MQTT WebSocket**: `YOUR_SERVER_IP:9001`
---
## 🔧 Troubleshooting
### Container-Status prüfen
```bash
# Alle Container anzeigen
docker compose ps
# Spezifische Logs anzeigen
docker compose logs -f [service-name]
# Container einzeln neustarten
docker compose restart [service-name]
```
### System neustarten
```bash
# Komplett neu starten
docker compose down
docker compose up -d
# Images neu pullen
docker compose pull
docker compose up -d
```
### Häufige Probleme
| Problem | Lösung |
|---------|--------|
| Container startet nicht | `docker compose logs [service]` prüfen |
| Ports bereits belegt | `sudo netstat -tulpn \| grep :80` prüfen |
| Keine Berechtigung | User zu docker-Gruppe hinzufügen |
| DB-Verbindung fehlschlägt | Environment-Variablen in `.env` prüfen |
| Mosquitto startet nicht | Ordner-Berechtigungen für `1883:1883` setzen |
---
## 📊 Docker-Version Vergleich
| Aspekt | Ubuntu Repository | Offizielle Installation |
|--------|------------------|------------------------|
| **Installation** | ✅ Schnell (1 Befehl) | ⚠️ Mehrere Schritte |
| **Version** | ⚠️ Oft älter | ✅ Neueste Version |
| **Updates** | ✅ Via apt | ✅ Via apt (nach Setup) |
| **Stabilität** | ✅ Getestet | ✅ Aktuell |
| **Features** | ⚠️ Möglicherweise eingeschränkt | ✅ Alle Features |
**Empfehlung:** Für Produktion die offizielle Docker-Installation verwenden.
---
## 📝 Wartung
### Regelmäßige Updates
```bash
# Images aktualisieren
docker compose pull
docker compose up -d
# System-Updates
sudo apt update && sudo apt upgrade -y
```
### Backup
```bash
# Container-Daten sichern
docker compose down
sudo tar -czf infoscreen-backup-$(date +%Y%m%d).tar.gz mosquitto/data/ certs/
# Backup wiederherstellen
sudo tar -xzf infoscreen-backup-YYYYMMDD.tar.gz
docker compose up -d
```
---
**Das Infoscreen-System ist jetzt vollständig über GitHub

View File

@@ -1,5 +1,3 @@
version: '3.8'
networks:
infoscreen-net:
driver: bridge
@@ -46,7 +44,7 @@ services:
container_name: infoscreen-mqtt
restart: unless-stopped
volumes:
- ./mosquitto.conf:/mosquitto/config/mosquitto.conf:ro
- ./mosquitto/config/mosquitto.conf:/mosquitto/config/mosquitto.conf:ro
ports:
- "1883:1883"
- "9001:9001"
@@ -61,7 +59,7 @@ services:
# Verwende fertige Images statt Build
server:
image: ghcr.io/robbstarkaustria/infoscreen-api:latest # Oder wo auch immer Ihre Images liegen
image: ghcr.io/robbstarkaustria/infoscreen-api:latest
container_name: infoscreen-api
restart: unless-stopped
depends_on:
@@ -71,6 +69,11 @@ services:
condition: service_healthy
environment:
DB_CONN: "mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}"
DB_USER: ${DB_USER}
DB_PASSWORD: ${DB_PASSWORD}
DB_NAME: ${DB_NAME}
DB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
DB_HOST: db
FLASK_ENV: production
MQTT_BROKER_URL: mqtt://mqtt:1883
MQTT_USER: ${MQTT_USER}
@@ -83,6 +86,10 @@ services:
timeout: 5s
retries: 3
start_period: 40s
command: >
bash -c "alembic -c /app/server/alembic.ini upgrade head &&
python /app/server/init_defaults.py &&
exec gunicorn server.wsgi:app --bind 0.0.0.0:8000"
dashboard:
image: ghcr.io/robbstarkaustria/infoscreen-dashboard:latest # Oder wo auch immer Ihre Images liegen
@@ -107,7 +114,11 @@ services:
mqtt:
condition: service_healthy
environment:
DB_URL: mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}
DB_CONN: "mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}"
DB_USER: ${DB_USER}
DB_PASSWORD: ${DB_PASSWORD}
DB_NAME: ${DB_NAME}
DB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
networks:
- infoscreen-net
@@ -122,6 +133,10 @@ services:
condition: service_healthy
environment:
DB_CONN: "mysql+pymysql://${DB_USER}:${DB_PASSWORD}@db/${DB_NAME}"
DB_USER: ${DB_USER}
DB_PASSWORD: ${DB_PASSWORD}
DB_NAME: ${DB_NAME}
DB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
MQTT_BROKER_URL: mqtt
MQTT_PORT: 1883
networks:

View File

@@ -3,9 +3,11 @@ FROM python:3.13-slim
WORKDIR /app
COPY requirements.txt ./
COPY listener/requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
COPY listener/ ./listener
COPY models/ ./models
CMD ["python", "listener.py"]
ENV PYTHONPATH=/app
CMD ["python", "listener/listener.py"]

View File

@@ -1,18 +1,17 @@
import os
import json
import logging
import threading
import time
# ...requests entfernt...
import datetime
import paho.mqtt.client as mqtt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models.models import Client
from dotenv import load_dotenv
load_dotenv("/workspace/.env")
if os.getenv("ENV", "development") == "development":
from dotenv import load_dotenv
load_dotenv(".env")
# ENV-abhängige Konfiguration
ENV = os.getenv("ENV", "development")
@@ -28,8 +27,6 @@ logging.basicConfig(level=logging.DEBUG,
engine = create_engine(DB_URL)
Session = sessionmaker(bind=engine)
# ...externe Zeitsynchronisation entfernt...
# MQTT-Callback
@@ -82,6 +79,16 @@ def on_message(client, userdata, msg):
except Exception as e:
logging.error(f"Fehler bei Verarbeitung: {e}")
topic_parts = msg.topic.split('/')
if len(topic_parts) == 3 and topic_parts[0] == "infoscreen" and topic_parts[1] == "request_group_id":
client_id = topic_parts[2]
session = Session()
client_obj = session.query(Client).filter_by(uuid=client_id).first()
group_id = client_obj.group_id if client_obj else None
session.close()
response_topic = f"infoscreen/response_group_id/{client_id}"
client.publish(response_topic, json.dumps({"group_id": group_id}))
def main():
mqtt_client = mqtt.Client(protocol=mqtt.MQTTv311, callback_api_version=2)
@@ -89,8 +96,9 @@ def main():
mqtt_client.connect("mqtt", 1883)
mqtt_client.subscribe("infoscreen/discovery")
mqtt_client.subscribe("infoscreen/+/heartbeat")
mqtt_client.subscribe("infoscreen/request_group_id/#")
logging.info(
"Listener gestartet und abonniert auf infoscreen/discovery und infoscreen/+/heartbeat")
"Listener gestartet und abonniert auf infoscreen/discovery, infoscreen/+/heartbeat und infoscreen/request_group_id/#")
mqtt_client.loop_forever()

View File

@@ -1,6 +1,8 @@
FROM python:3.13-slim
WORKDIR /app
COPY requirements.txt .
COPY scheduler/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "scheduler.py"]
COPY scheduler/ ./scheduler
COPY models/ ./models
ENV PYTHONPATH=/app
CMD ["python", "scheduler/scheduler.py"]

View File

@@ -2,7 +2,7 @@
import os
import logging
from scheduler.db_utils import get_active_events
from db_utils import get_active_events
import paho.mqtt.client as mqtt
import json
import datetime

View File

@@ -13,7 +13,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
# Kopiert nur die requirements.txt, um den Docker-Cache optimal zu nutzen
COPY requirements.txt .
COPY /server/requirements.txt .
# Installiert die Python-Pakete in ein separates Verzeichnis
RUN pip install --no-cache-dir --prefix="/install" -r requirements.txt
@@ -27,7 +27,7 @@ WORKDIR /app
# Installiert nur die für die Laufzeit notwendigen Systemabhängigkeiten
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
libmariadb-dev-compat locales \
libmariadb-dev-compat locales curl \
&& rm -rf /var/lib/apt/lists/*
# --- Locale konfigurieren ---
@@ -41,7 +41,8 @@ COPY --from=builder /install /usr/local
# --- Applikationscode ---
# Kopiert den Server-Code in das Arbeitsverzeichnis
COPY server/ .
COPY server/ ./server
COPY models/ ./models
# --- Non-Root User anlegen und Rechte setzen ---
ARG USER_ID=1000
@@ -56,5 +57,5 @@ USER infoscreen_taa
EXPOSE 8000
# --- Startbefehl für Gunicorn ---
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8000", "wsgi:app"]
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8000", "server.wsgi:app"]

View File

@@ -1,5 +1,10 @@
# isort: skip_file
from alembic import context
from sqlalchemy import pool
from sqlalchemy import engine_from_config
from logging.config import fileConfig
from dotenv import load_dotenv
from models.models import Base
import os
import sys
sys.path.insert(0, '/workspace')
@@ -8,12 +13,6 @@ print("models dir exists:", os.path.isdir('/workspace/models'))
print("models/models.py exists:", os.path.isfile('/workspace/models/models.py'))
print("models/__init__.py exists:",
os.path.isfile('/workspace/models/__init__.py'))
from models.models import Base
from dotenv import load_dotenv
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
print("sys.path:", sys.path)
print("models dir exists:", os.path.isdir('/workspace/models'))
@@ -27,12 +26,17 @@ env_path = os.path.abspath(os.path.join(
print(f"Loading environment variables from: {env_path}")
load_dotenv(env_path)
DB_CONN = os.getenv("DB_CONN")
if DB_CONN:
DATABASE_URL = DB_CONN
else:
# Datenbank-Zugangsdaten aus .env
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST = os.getenv("DB_HOST", "localhost")
DB_HOST = os.getenv("DB_HOST", "db") # Default jetzt 'db'
DB_PORT = os.getenv("DB_PORT", "3306")
DB_NAME = os.getenv("DB_NAME")
DATABASE_URL = f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
@@ -43,10 +47,6 @@ config = context.config
if config.config_file_name is not None:
fileConfig(config.config_file_name)
DATABASE_URL = (
f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
)
print(f"Using DATABASE_URL: {DATABASE_URL}")
config.set_main_option("sqlalchemy.url", DATABASE_URL)

View File

@@ -1,14 +1,23 @@
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from dotenv import load_dotenv
import os
# Umgebungsvariablen
# Nur im Dev-Modus .env laden
if os.getenv("ENV", "development") == "development":
load_dotenv(dotenv_path=os.path.join(
os.path.dirname(__file__), '..', '.env'))
# Prod: DB_CONN direkt aus Umgebungsvariable (von Compose gesetzt)
DB_URL = os.getenv("DB_CONN")
if not DB_URL:
# Dev: DB-URL aus Einzelwerten bauen
DB_USER = os.getenv("DB_USER", "infoscreen_admin")
DB_PASSWORD = os.getenv("DB_PASSWORD", "KqtpM7wmNd&mFKs")
DB_HOST = os.getenv("DB_HOST", "db")
DB_HOST = os.getenv("DB_HOST", "db") # IMMER 'db' als Host im Container!
DB_NAME = os.getenv("DB_NAME", "infoscreen_by_taa")
DB_URL = f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}"
print(f"Using DB_URL: {DB_URL}") # Debug-Ausgabe
engine = create_engine(DB_URL, echo=False)
Session = sessionmaker(bind=engine)

38
server/init_defaults.py Normal file
View File

@@ -0,0 +1,38 @@
from sqlalchemy import create_engine, text
import os
from dotenv import load_dotenv
import bcrypt
# .env laden
load_dotenv()
DB_URL = f"mysql+pymysql://{os.getenv('DB_USER')}:{os.getenv('DB_PASSWORD')}@{os.getenv('DB_HOST')}:3306/{os.getenv('DB_NAME')}"
engine = create_engine(DB_URL, isolation_level="AUTOCOMMIT")
with engine.connect() as conn:
# Default-Gruppe mit id=1 anlegen, falls nicht vorhanden
result = conn.execute(
text("SELECT COUNT(*) FROM client_groups WHERE id=1"))
if result.scalar() == 0:
conn.execute(
text(
"INSERT INTO client_groups (id, name, is_active) VALUES (1, 'Nicht zugeordnet', 1)")
)
print("✅ Default-Gruppe mit id=1 angelegt.")
# Admin-Benutzer anlegen, falls nicht vorhanden
admin_user = os.getenv("DEFAULT_ADMIN_USERNAME", "infoscreen_admin")
admin_pw = os.getenv("DEFAULT_ADMIN_PASSWORD", "Info_screen_admin25!")
# Passwort hashen wie in init_db.py
hashed_pw = bcrypt.hashpw(admin_pw.encode(
'utf-8'), bcrypt.gensalt()).decode('utf-8')
# Prüfen, ob User existiert
result = conn.execute(text(
"SELECT COUNT(*) FROM users WHERE username=:username"), {"username": admin_user})
if result.scalar() == 0:
# Rolle: 1 = Admin (ggf. anpassen je nach Modell)
conn.execute(
text("INSERT INTO users (username, password_hash, role, is_active) VALUES (:username, :password_hash, 1, 1)"),
{"username": admin_user, "password_hash": hashed_pw}
)
print(f"✅ Admin-Benutzer '{admin_user}' angelegt.")

View File

@@ -5,3 +5,4 @@ PyMySQL>=1.1.1
python-dotenv>=1.1.0
SQLAlchemy>=2.0.41
flask
gunicorn

View File

@@ -1,4 +1,4 @@
from database import Session
from server.database import Session
from models.models import Client, ClientGroup
from flask import Blueprint, request, jsonify
import sys

View File

@@ -1,6 +1,6 @@
from re import A
from flask import Blueprint, request, jsonify, send_from_directory
from database import Session
from server.database import Session
from models.models import EventMedia, MediaType
import os

View File

@@ -1,5 +1,5 @@
from flask import Blueprint, request, jsonify
from database import Session
from server.database import Session
from models.models import Event, EventMedia, MediaType
from datetime import datetime, timezone
from sqlalchemy import and_

View File

@@ -1,7 +1,7 @@
from models.models import Client
# Neue Route: Liefert alle Gruppen mit zugehörigen Clients und deren Alive-Status
from database import Session
from server.database import Session
from models.models import ClientGroup
from flask import Blueprint, request, jsonify
from sqlalchemy import func

View File

@@ -3,7 +3,7 @@ from server.routes.eventmedia import eventmedia_bp
from server.routes.events import events_bp
from server.routes.groups import groups_bp
from server.routes.clients import clients_bp
from database import Session, engine
from server.database import Session, engine
from flask import Flask, jsonify, send_from_directory, request
import glob
import os