Pool sensor v2: VCC monitoring, database resilience, receiver improvements

- Added voltage monitoring table and storage pipeline
- Extended pool payload to 17 bytes with VCC field (protocol v2)
- Improved database connection pool resilience (reduced pool size, aggressive recycling, pool disposal on failure)
- Added environment variable support for database configuration
- Fixed receiver MQTT deprecation warning (CallbackAPIVersion.VERSION2)
- Silenced excessive RSSI status logging in receiver
- Added reset flag tracking and reporting
- Updated Docker compose with DB config and log rotation limits
This commit is contained in:
2026-01-25 11:25:15 +00:00
parent d1c1f63cb9
commit f55c1fe6f1
9 changed files with 1512 additions and 101 deletions

View File

@@ -19,11 +19,15 @@ from sqlalchemy.engine import URL
# from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timezone
from data_tables import Sensor, TemperatureInside,TemperatureOutside, HumidityOutside, HumidityInside, AirPressure, Wind, Precipitation
from data_tables import Sensor, TemperatureInside,TemperatureOutside, HumidityOutside, HumidityInside, AirPressure, Wind, Precipitation, Voltage, Base
# Load .env file so environment variables from .env are available at startup
load_dotenv()
# Strip quotes from password if they were included (defensive)
DB_PASSWORD_RAW = os.getenv("DB_PASSWORD", "cfCUswMHfK82!")
DB_PASSWORD = DB_PASSWORD_RAW.strip("'\"") # Remove surrounding quotes if present
# Configure logging with environment-based log level
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
logger = logging.getLogger(__name__)
@@ -73,6 +77,7 @@ sensor_ids = []
sensor_names = []
sensor_by_name_room = {}
pool_sensors_cache = {}
pool_reset_flags_seen = {}
# Track sensor failure states to avoid repeated logging
sensor_failure_logged = {}
@@ -164,11 +169,58 @@ sqlite_conn.commit()
logger.info(f"SQLite database initialized at: {sqlite_db_path}")
# Database connection (hardcoded for private intranet)
DB_HOST = "192.168.43.102"
DB_PORT = 3306
sql_engine = create_engine('mysql+mysqlconnector://weatherdata:cfCU$swM!HfK82%*@192.168.43.102/weatherdata',
connect_args={"connection_timeout": 5})
# Database connection configuration (read from environment variables or use defaults)
DB_HOST = os.getenv("DB_HOST", "192.168.43.102")
DB_PORT = int(os.getenv("DB_PORT", "3306"))
DB_USER = os.getenv("DB_USER", "weatherdata")
DB_PASSWORD_RAW = os.getenv("DB_PASSWORD", "cfCUswMHfK82!")
DB_PASSWORD = DB_PASSWORD_RAW.strip("'\"") # Remove any surrounding quotes
DB_NAME = os.getenv("DB_NAME", "weatherdata")
DB_CONNECT_TIMEOUT = int(os.getenv("DB_CONNECT_TIMEOUT", "5"))
# Log database configuration at startup
logger.info(f"DB config: host={DB_HOST}:{DB_PORT}, user={DB_USER}, db={DB_NAME}")
if DB_PASSWORD:
logger.debug(f"DB_PASSWORD length: {len(DB_PASSWORD)}, chars: {[c for c in DB_PASSWORD]}")
# Build connection URL (password will be passed separately in connect_args for proper handling)
db_url = f"mysql+mysqlconnector://{DB_USER}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
# Create engine with connection pool resilience
# Key settings to handle intermittent network issues:
# - pool_pre_ping: Verify connection is alive before using
# - pool_recycle: Aggressively recycle to avoid stale connections
# - pool_size: Conservative pool to avoid resource exhaustion
# - max_overflow: Limited to prevent connection thrashing
# - pool_reset_on_return: Use "none" to avoid failed rollback on dead connections
sql_engine = create_engine(
db_url,
connect_args={
"user": DB_USER,
"password": DB_PASSWORD, # Pass password separately to avoid URL encoding issues
"host": DB_HOST,
"port": DB_PORT,
"database": DB_NAME,
"connection_timeout": DB_CONNECT_TIMEOUT,
"autocommit": False, # Let SQLAlchemy manage transactions properly
"raise_on_warnings": False, # Suppress MySQL warnings that clutter logs
},
pool_pre_ping=True, # Test connection before using it (detects stale connections)
pool_recycle=300, # Recycle connections every 5 minutes (aggressive, handles server timeouts)
pool_timeout=10, # Wait up to 10 seconds to get a connection from pool
pool_size=3, # Keep only 3 steady connections (was 5)
max_overflow=5, # Allow only 5 overflow connections (was 10, prevents thrashing)
pool_reset_on_return="none", # Avoid rollback on return to prevent "Lost connection" errors
echo=False, # Set to True for SQL logging if debugging
echo_pool=False, # Set to True for connection pool logging if debugging
)
# Ensure tables exist (safe: creates only missing ones)
try:
Base.metadata.create_all(sql_engine)
logger.info("Verified/created database tables")
except Exception as e:
logger.warning(f"Could not auto-create tables: {e}")
# DB availability tracking for resilient mode
db_available = False
@@ -234,10 +286,32 @@ def parse_radio_frame(byte_data):
return None
PAYLOAD_SIZE = 15 # bytes in pool payload
POOL_PAYLOAD_FORMATS = [
{
"size": 17, # New payload with VCC
"struct": "<BBBBHhhHHHB",
"includes_vcc": True,
"label": "v2_vcc",
},
{
"size": 15, # Legacy payload without VCC
"struct": "<BBBBHhhHHB",
"includes_vcc": False,
"label": "v1_legacy",
},
]
MIN_POOL_PAYLOAD_SIZE = min(fmt["size"] for fmt in POOL_PAYLOAD_FORMATS)
MAGIC1 = 0x42
MAGIC2 = 0x99
# Reset flags use AVR MCUSR bit mapping in the version byte's upper nibble
RESET_FLAG_MAP = [
(0x1, "PORF (power-on)"),
(0x2, "EXTRF (external reset)"),
(0x4, "BORF (brown-out)"),
(0x8, "WDRF (watchdog)"),
]
def crc8_xor(data: bytes) -> int:
"""Simple XOR checksum used by the pool payload."""
@@ -247,67 +321,121 @@ def crc8_xor(data: bytes) -> int:
return c
def decode_pool_payload(candidate_bytes: bytes, expected_seq: Optional[int] = None):
"""Scan a byte stream for a plausible pool payload.
def parse_version_and_reset_flags(version_byte: int):
"""Return protocol version (low nibble) and decoded reset flags (high nibble)."""
protocol_version = version_byte & 0x0F
reset_flags = (version_byte >> 4) & 0x0F
reset_causes = [desc for bit, desc in RESET_FLAG_MAP if reset_flags & bit]
return protocol_version, reset_flags, reset_causes
Slides a 15-byte window, validates with CRC, version/nodeId, and range checks,
and scores candidates. Returns the best decoded dict or None.
def decode_pool_payload(candidate_bytes: bytes, expected_seq: Optional[int] = None):
"""Scan a byte stream for a plausible pool payload (v1 legacy + v2 with VCC).
Slides a window across the stream for each supported payload size, validates CRC,
performs plausibility checks, and scores candidates. Returns the best decoded dict
or None. VCC is decoded for the new format but ignored when storing for now.
"""
# Drop leading preamble (0xAA) if present
while candidate_bytes.startswith(b"\xaa"):
candidate_bytes = candidate_bytes[1:]
best = None
best_score = -1
best_score = float('-inf')
for offset in range(0, len(candidate_bytes) - PAYLOAD_SIZE + 1):
chunk = candidate_bytes[offset:offset + PAYLOAD_SIZE]
try:
magic1, magic2, version, nodeId, seq, t_ds10, t_bme10, hum10, pres1, crc_received = struct.unpack(
'<BBBBHhhHHB', chunk
)
except struct.error:
continue
for offset in range(0, len(candidate_bytes) - MIN_POOL_PAYLOAD_SIZE + 1):
for fmt in POOL_PAYLOAD_FORMATS:
if offset + fmt["size"] > len(candidate_bytes):
continue
crc_calculated = crc8_xor(chunk[:-1])
if crc_calculated != crc_received:
continue
chunk = candidate_bytes[offset:offset + fmt["size"]]
try:
if fmt["includes_vcc"]:
(
magic1,
magic2,
version_byte,
nodeId,
seq,
t_ds10,
t_bme10,
hum10,
pres1,
vcc_mv,
crc_received,
) = struct.unpack(fmt["struct"], chunk)
else:
(
magic1,
magic2,
version_byte,
nodeId,
seq,
t_ds10,
t_bme10,
hum10,
pres1,
crc_received,
) = struct.unpack(fmt["struct"], chunk)
vcc_mv = None
except struct.error:
continue
if version != 1 or nodeId != 1:
continue
crc_calculated = crc8_xor(chunk[:-1])
crc_valid = crc_calculated == crc_received
# Plausibility checks (unit scaled)
if not (-300 <= t_ds10 <= 600): # -30.0 to 60.0°C
continue
if not (-300 <= t_bme10 <= 600):
continue
if not (0 <= hum10 <= 1000): # 0.0100.0%
continue
if not (8000 <= pres1 <= 11000): # 800.01100.0 hPa
continue
protocol_version, reset_flags, reset_causes = parse_version_and_reset_flags(version_byte)
score = 0
if magic1 == MAGIC1 and magic2 == MAGIC2:
score += 2
if expected_seq is not None and seq == expected_seq:
score += 1
# CRC already validated; reward shorter offset to prefer first valid
score -= offset * 0.001
# Accept protocol version 1 (legacy) and 2 (future) to tolerate FW bumps
if protocol_version not in (1, 2) or nodeId != 1:
continue
if score > best_score:
best_score = score
best = {
"offset": offset,
"magic_ok": magic1 == MAGIC1 and magic2 == MAGIC2,
"version": version,
"nodeId": nodeId,
"sequence": seq,
"t_ds_c": t_ds10 / 10.0,
"t_bme_c": t_bme10 / 10.0,
"humidity": hum10 / 10.0,
"pressure_hpa": pres1 / 10.0,
"crc_valid": True,
}
# Plausibility checks (unit scaled)
if not (-300 <= t_ds10 <= 600): # -30.0 to 60.0°C
continue
if not (-300 <= t_bme10 <= 600):
continue
if not (0 <= hum10 <= 1000): # 0.0100.0%
continue
if not (8000 <= pres1 <= 11000): # 800.01100.0 hPa
continue
if vcc_mv is not None and not (1000 <= vcc_mv <= 5000):
continue
score = 0
if magic1 == MAGIC1 and magic2 == MAGIC2:
score += 2
if expected_seq is not None and seq == expected_seq:
score += 1
if fmt["includes_vcc"]:
score += 0.5 # Prefer new payload when both are valid
if crc_valid:
score += 3
else:
score -= 3 # Keep but penalize invalid CRC
# CRC already validated; reward shorter offset to prefer first valid
score -= offset * 0.001
if score > best_score:
best_score = score
best = {
"offset": offset,
"magic_ok": magic1 == MAGIC1 and magic2 == MAGIC2,
"version": protocol_version,
"version_byte": version_byte,
"reset_flags": reset_flags,
"reset_causes": reset_causes,
"nodeId": nodeId,
"sequence": seq,
"t_ds_c": t_ds10 / 10.0,
"t_bme_c": t_bme10 / 10.0,
"humidity": hum10 / 10.0,
"pressure_hpa": pres1 / 10.0,
"vcc_mv": vcc_mv,
"crc_valid": crc_valid,
"crc_expected": crc_calculated,
"format": fmt["label"],
}
return best
@@ -416,7 +544,7 @@ def get_sensor_keys(sensor_type):
'Oregon-v1': ['temperature_C', 'battery_ok'],
'Oregon-THGR122N': ['temperature_C', 'humidity', 'battery_ok'],
'inFactory-TH': ['temperature_C', 'humidity', 'battery_ok'],
'BME280': ['temperature_C', 'humidity', 'pressure_rel'], # Pool BME280
'BME280': ['temperature_C', 'humidity', 'pressure_rel', 'vcc_mv'], # Pool BME280 includes VCC
'DS18B20': ['temperature_C'], # Pool DS18B20
}
# Fallback for unknown types - try to match by substring
@@ -448,7 +576,11 @@ def save_json_locally(json_dict):
def ensure_db_connection(force: bool = False) -> bool:
"""Try to establish DB connectivity with throttling. Returns True if DB is reachable."""
"""Try to establish DB connectivity with throttling. Returns True if DB is reachable.
This function tests the connection and reinitializes the session if needed.
On failure, it disposes the pool to force reconnection next attempt.
"""
global db_available, last_db_check, session
now = time.time()
if not force and (now - last_db_check) < DB_RETRY_SECONDS:
@@ -456,6 +588,7 @@ def ensure_db_connection(force: bool = False) -> bool:
last_db_check = now
try:
# Test connection with explicit timeout
with sql_engine.connect() as conn:
conn.execute(text('SELECT 1'))
if not db_available:
@@ -463,12 +596,21 @@ def ensure_db_connection(force: bool = False) -> bool:
db_available = True
# Recreate session to ensure fresh connections
session = Session()
except Exception as e:
except exc.OperationalError as e:
# Connection failed - dispose pool to force fresh connections on next attempt
sql_engine.dispose()
if db_available:
logger.warning(f"Lost database connectivity: {e}")
else:
logger.info(f"Database still unreachable: {e}")
db_available = False
except Exception as e:
sql_engine.dispose()
if db_available:
logger.warning(f"Unexpected database error: {type(e).__name__}: {e}")
else:
logger.info(f"Database still unreachable: {type(e).__name__}: {e}")
db_available = False
return db_available
@@ -477,7 +619,7 @@ def on_connect(client, userdata, flags, reason_code, properties):
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(MQTT_TOPIC_PREFIX)
print(f"Connected with result code {reason_code}")
logger.info(f"Connected with result code {reason_code}")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
@@ -513,8 +655,8 @@ def on_message(client, userdata, msg):
except ValueError:
if LOG_MALFORMED_HEX:
malformed_hex_logger.info(f"Invalid hex: {hex_data}")
print(f"Invalid hex data: {hex_data}")
print(d)
logger.debug(f"Invalid hex data: {hex_data}")
logger.debug(f"Full message: {d}")
warte = ''
return
@@ -522,8 +664,8 @@ def on_message(client, userdata, msg):
if LOG_MALFORMED_HEX and candidate_meta.get("source") == "raw":
malformed_hex_logger.info(f"Pool using raw bytes (no sync match): {byte_data.hex()}")
print(f"Raw bytes ({len(byte_data)}): {byte_data.hex()}")
print(
logger.debug(f"Raw bytes ({len(byte_data)}): {byte_data.hex()}")
logger.debug(
f"Candidate payload ({len(candidate_bytes)}), source={candidate_meta.get('source')}: "
f"{candidate_bytes.hex()}"
)
@@ -541,17 +683,34 @@ def on_message(client, userdata, msg):
if not decoded:
if LOG_MALFORMED_HEX:
malformed_hex_logger.info(f"No valid payload found: {candidate_bytes.hex()}")
print("No valid pool payload found in candidate bytes")
logger.debug("No valid pool payload found in candidate bytes")
warte = ''
return
print(
f"Decoded payload at offset {decoded['offset']}: seq={decoded['sequence']}, "
f"t_ds={decoded['t_ds_c']}C, t_bme={decoded['t_bme_c']}C, "
f"hum={decoded['humidity']}%, pres={decoded['pressure_hpa']}hPa, "
f"magic_ok={decoded['magic_ok']}"
logger.debug(
f"Decoded payload at offset {decoded['offset']} ({decoded.get('format','')})"
f": seq={decoded['sequence']}, t_ds={decoded['t_ds_c']}C, "
f"t_bme={decoded['t_bme_c']}C, hum={decoded['humidity']}%, "
f"pres={decoded['pressure_hpa']}hPa, vcc={decoded.get('vcc_mv','n/a')}mV, "
f"magic_ok={decoded['magic_ok']}, crc_valid={decoded['crc_valid']}, "
f"crc_exp={decoded.get('crc_expected')}, reset_flags=0x{decoded['reset_flags']:X}"
)
reset_flags = decoded.get('reset_flags', 0)
reset_causes = decoded.get('reset_causes', [])
last_flags = pool_reset_flags_seen.get(decoded['nodeId'])
if last_flags != reset_flags:
causes_text = ", ".join(reset_causes) if reset_causes else "none set"
reset_msg = (
f"Pool node {decoded['nodeId']} MCU reset flags 0x{reset_flags:X}: {causes_text}"
)
if reset_flags & 0x0C: # BORF or WDRF -> warn
logger.warning(reset_msg)
else:
logger.info(reset_msg)
pool_reset_flags_seen[decoded['nodeId']] = reset_flags
original_time = d.get('time', datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S'))
bme_msg = {
@@ -562,6 +721,7 @@ def on_message(client, userdata, msg):
'temperature_C': decoded['t_bme_c'],
'humidity': decoded['humidity'],
'pressure_rel': decoded['pressure_hpa'],
'vcc_mv': decoded.get('vcc_mv'),
'mic': 'CRC'
}
@@ -575,7 +735,7 @@ def on_message(client, userdata, msg):
}
for msg_data in [bme_msg, ds_msg]:
print(f"Received message from {msg_data['model']}: \n {msg_data}")
logger.debug(f"Received message from {msg_data['model']}: {msg_data}")
sensor_id = msg_data['id']
sensor_key = f"{msg_data['model']}_{sensor_id}"
@@ -588,7 +748,7 @@ def on_message(client, userdata, msg):
return
else:
# Process non-pool sensors
print(f"Received message from {model}: \n {d}")
logger.debug(f"Received message from {model}: {d}")
id = d['id']
if model == 'Bresser-6in1':
if d['flags'] == 0:
@@ -602,7 +762,7 @@ def on_message(client, userdata, msg):
# Define a function to update the data in the database
def update_data(utc_time, mqtt_id, temperature_c, humidity, pressure_rel, battery, average_speed, direction, gust, rain_mm):
def update_data(utc_time, mqtt_id, temperature_c, humidity, pressure_rel, battery, average_speed, direction, gust, rain_mm, vcc_mv=None):
values = {
"utc_time": utc_time,
"mqtt_id": mqtt_id,
@@ -613,7 +773,8 @@ def update_data(utc_time, mqtt_id, temperature_c, humidity, pressure_rel, batter
"average_speed": average_speed,
"direction": direction,
"gust": gust,
"rain_mm": rain_mm
"rain_mm": rain_mm,
"vcc_mv": vcc_mv,
}
if ensure_db_connection():
new_data_queue.append(values)
@@ -782,7 +943,7 @@ def handle_pool_nodeid_change(old_node_id, new_mqtt_id):
refresh_sensor_cache()
def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, battery, average_speed, direction, gust, rain_mm):
def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, battery, average_speed, direction, gust, rain_mm, vcc_mv=None):
mqtt_name, mqtt_id = mqtt_name_id.split('_', 1) # Use maxsplit=1 to handle IDs with underscores
# Get the sensor object from the database (with auto-update for battery changes)
@@ -798,7 +959,8 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
'average_speed': average_speed,
'direction': direction,
'gust': gust,
'rain_mm': rain_mm
'rain_mm': rain_mm,
'vcc_mv': vcc_mv,
})
return
@@ -816,7 +978,8 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
'average_speed': average_speed,
'direction': direction,
'gust': gust,
'rain_mm': rain_mm
'rain_mm': rain_mm,
'vcc_mv': vcc_mv,
})
return
@@ -825,9 +988,14 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
# Update the sensor's battery level
sensor.battery = battery
# Update last contact time for pool sensors
# Update last contact time
# Pool sensors: update every contact (critical for monitoring)
# Other sensors: only update if >5 minutes to reduce DB writes
now = datetime.now(timezone.utc)
if mqtt_name == 'pool':
sensor.last_contact = datetime.now(timezone.utc)
sensor.last_contact = now
elif sensor.last_contact is None or (now - sensor.last_contact.replace(tzinfo=timezone.utc)).total_seconds() > 300:
sensor.last_contact = now
# Update the temperature data
if temperature_c is not None:
@@ -868,6 +1036,14 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
air_pressure = AirPressure(timestamp=utc_time, sensor_id=sensor.id, pressure_rel=pressure_rel)
session.add(air_pressure)
# Store voltage if provided (associate with this sensor)
if vcc_mv is not None:
try:
voltage_entry = Voltage(timestamp=utc_time, sensor_id=sensor.id, vcc_mv=int(vcc_mv))
session.add(voltage_entry)
except Exception as e:
logger.warning(f"Failed to store voltage for {mqtt_name_id}: {e}")
if average_speed is not None or gust is not None or direction is not None:
wind_value = session.query(Wind).filter_by(sensor_id=sensor.id).order_by(Wind.timestamp.desc()).first()
if wind_value is None or (average_speed is not None and wind_value.average_speed != average_speed) or (gust is not None and wind_value.gust != gust) or (direction is not None and wind_value.direction != direction):
@@ -903,7 +1079,10 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
session.commit()
except exc.SQLAlchemyError as e:
logger.error(f"SQLAlchemyError on commit: {e}")
session.rollback()
try:
session.rollback()
except Exception:
pass # Ignore rollback errors if connection is lost
save_json_locally({
'utc_time': utc_time,
'mqtt_id': mqtt_name_id,
@@ -914,11 +1093,15 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
'average_speed': average_speed,
'direction': direction,
'gust': gust,
'rain_mm': rain_mm
'rain_mm': rain_mm,
'vcc_mv': vcc_mv,
})
except Exception as e:
logger.error(f"Error on commit: {e}")
session.rollback()
try:
session.rollback()
except Exception:
pass # Ignore rollback errors if connection is lost
save_json_locally({
'utc_time': utc_time,
'mqtt_id': mqtt_name_id,
@@ -929,7 +1112,8 @@ def store_in_db(utc_time, mqtt_name_id, temperature_c, humidity, pressure_rel, b
'average_speed': average_speed,
'direction': direction,
'gust': gust,
'rain_mm': rain_mm
'rain_mm': rain_mm,
'vcc_mv': vcc_mv,
})
@@ -988,12 +1172,12 @@ def debug_sended_data(seen_messages, averages, sensor):
if not debug:
return
print(f'Averages for {sensor}:')
logger.debug(f'Averages for {sensor}:')
for key, value in averages.items():
print(f"{key}: {value}")
logger.debug(f"{key}: {value}")
print(f"Remaining data {sensor}:")
print(seen_messages[sensor])
logger.debug(f"Remaining data {sensor}:")
logger.debug(f"{seen_messages[sensor]}")
def process_sensor_data(utc_time, sensor_key, data_list, keys_to_average, mqtt_id_override=None):
"""Helper function to process any sensor data consistently"""
@@ -1021,7 +1205,8 @@ def process_sensor_data(utc_time, sensor_key, data_list, keys_to_average, mqtt_i
averages.get('wind_avg_m_s'),
averages.get('wind_dir_deg'),
averages.get('wind_max_m_s'),
averages.get('rain_mm')
averages.get('rain_mm'),
averages.get('vcc_mv')
)
debug_sended_data({sensor_key: remaining}, averages, sensor_key)
return remaining
@@ -1093,6 +1278,8 @@ def get_and_delete_json_data():
# Funktion zum Synchronisieren der Daten
def sync_data():
global session
if not ensure_db_connection(force=True):
# MariaDB nicht verfügbar - speichere in SQLite
while new_data_queue:
@@ -1114,14 +1301,17 @@ def sync_data():
store_in_db(data['utc_time'], data['mqtt_id'], data.get('temperature_c'),
data.get('humidity'), data.get('pressure_rel'), data.get('battery', 1),
data.get('average_speed'), data.get('direction'),
data.get('gust'), data.get('rain_mm'))
data.get('gust'), data.get('rain_mm'), data.get('vcc_mv'))
if not local_data_written:
utc_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S %Z")
logger.info(f"{utc_time}: Restoring data from local SQLite backup to MariaDB")
local_data_written = True
except exc.SQLAlchemyError as e:
logger.error(f"SQLAlchemyError syncing local data: {e}")
session.rollback()
try:
session.rollback()
except Exception:
pass # Ignore rollback errors if connection is lost
# Rette den Datensatz zurück in SQLite
save_json_locally(data)
except Exception as e:
@@ -1135,10 +1325,13 @@ def sync_data():
if isinstance(data, dict) and 'mqtt_id' in data:
store_in_db(data['utc_time'], data['mqtt_id'], data['temperature_c'], data['humidity'],
data['pressure_rel'], data['battery'], data['average_speed'], data['direction'],
data['gust'], data['rain_mm'])
data['gust'], data['rain_mm'], data.get('vcc_mv'))
except exc.SQLAlchemyError as e:
logger.error(f"SQLAlchemyError: {e}")
session.rollback()
try:
session.rollback()
except Exception:
pass # Ignore rollback errors if connection is lost
save_json_locally(data)
except Exception as e:
logger.error(f"Error writing data: {e}")
@@ -1259,7 +1452,7 @@ if __name__ == '__main__':
else:
logger.warning(f"Starting without database; will cache data locally and retry every {DB_RETRY_SECONDS}s")
print('start data collection')
logger.info('Starting data collection')
try:
while True:
# Periodically retry DB connection if currently down