feat(academic-periods): period selector, active period

API, holiday indicators; UI polish; bump version

Dashboard:

Add Syncfusion academic period dropdown next to group selector
Navigate scheduler to today's month/day within selected period year on change
Show adjacent holiday plan badge; keep "holidays in view" counter on the right
Compact dropdown widths for a tighter toolbar
Default blocking of scheduling on holidays; block entries styled like all-day; black text styling
API:

Add academic periods routes: list, get active, set active (POST), for_date
Register blueprint in wsgi
Holidays:

Support TXT/CSV upload; headerless TXT uses columns 2-4; region remains null
Docs:

Update shared Copilot instructions with academic periods endpoints and dashboard integration details
This commit is contained in:
2025-09-21 14:35:38 +00:00
parent 41194000a4
commit eaf6e32446
9 changed files with 640 additions and 44 deletions

View File

@@ -24,9 +24,14 @@ def list_holidays():
@holidays_bp.route("/upload", methods=["POST"])
def upload_holidays():
"""
Accepts a CSV file upload (multipart/form-data) with columns like:
name,start_date,end_date,region
Dates can be in ISO (YYYY-MM-DD) or common European format (DD.MM.YYYY).
Accepts a CSV/TXT file upload (multipart/form-data).
Supported formats:
1) Headered CSV with columns (case-insensitive): name, start_date, end_date[, region]
- Dates: YYYY-MM-DD, DD.MM.YYYY, YYYY/MM/DD, or YYYYMMDD
2) Headerless CSV/TXT lines with columns:
[internal, name, start_yyyymmdd, end_yyyymmdd, optional_internal]
- Only columns 2-4 are used; 1 and 5 are ignored.
"""
if "file" not in request.files:
return jsonify({"error": "No file part"}), 400
@@ -35,26 +40,36 @@ def upload_holidays():
return jsonify({"error": "No selected file"}), 400
try:
content = file.read().decode("utf-8", errors="ignore")
# Try to auto-detect delimiter; default ','
raw = file.read()
# Try UTF-8 first (strict), then cp1252, then latin-1 as last resort
try:
content = raw.decode("utf-8")
except UnicodeDecodeError:
try:
content = raw.decode("cp1252")
except UnicodeDecodeError:
content = raw.decode("latin-1", errors="replace")
sniffer = csv.Sniffer()
dialect = None
try:
dialect = sniffer.sniff(content[:1024])
sample = content[:2048]
# Some files may contain a lot of quotes; allow Sniffer to guess delimiter
dialect = sniffer.sniff(sample)
except Exception:
pass
reader = csv.DictReader(io.StringIO(
content), dialect=dialect) if dialect else csv.DictReader(io.StringIO(content))
required = {"name", "start_date", "end_date"}
if not required.issubset(set(h.lower() for h in reader.fieldnames or [])):
return jsonify({"error": "CSV must contain headers: name, start_date, end_date"}), 400
def parse_date(s: str):
s = (s or "").strip()
if not s:
return None
# Try ISO first
# Numeric YYYYMMDD
if s.isdigit() and len(s) == 8:
try:
return datetime.strptime(s, "%Y%m%d").date()
except ValueError:
pass
# Common formats
for fmt in ("%Y-%m-%d", "%d.%m.%Y", "%Y/%m/%d"):
try:
return datetime.strptime(s, fmt).date()
@@ -65,16 +80,18 @@ def upload_holidays():
session = Session()
inserted = 0
updated = 0
for row in reader:
# Normalize headers to lower-case keys
norm = {k.lower(): (v or "").strip() for k, v in row.items()}
name = norm.get("name")
start_date = parse_date(norm.get("start_date"))
end_date = parse_date(norm.get("end_date"))
region = norm.get("region") or None
if not name or not start_date or not end_date:
continue
# First, try headered CSV via DictReader
dict_reader = csv.DictReader(io.StringIO(
content), dialect=dialect) if dialect else csv.DictReader(io.StringIO(content))
fieldnames_lower = [h.lower() for h in (dict_reader.fieldnames or [])]
has_required_headers = {"name", "start_date",
"end_date"}.issubset(set(fieldnames_lower))
def upsert(name: str, start_date, end_date, region=None):
nonlocal inserted, updated
if not name or not start_date or not end_date:
return
existing = (
session.query(SchoolHoliday)
.filter(
@@ -86,9 +103,7 @@ def upload_holidays():
)
.first()
)
if existing:
# Optionally update region or source_file_name
existing.region = region
existing.source_file_name = file.filename
updated += 1
@@ -102,6 +117,41 @@ def upload_holidays():
))
inserted += 1
if has_required_headers:
for row in dict_reader:
norm = {k.lower(): (v or "").strip() for k, v in row.items()}
name = norm.get("name")
try:
start_date = parse_date(norm.get("start_date"))
end_date = parse_date(norm.get("end_date"))
except ValueError:
# Skip rows with unparseable dates
continue
region = (norm.get("region")
or None) if "region" in norm else None
upsert(name, start_date, end_date, region)
else:
# Fallback: headerless rows -> use columns [1]=name, [2]=start, [3]=end
reader = csv.reader(io.StringIO(
content), dialect=dialect) if dialect else csv.reader(io.StringIO(content))
for row in reader:
if not row:
continue
# tolerate varying column counts (4 or 5); ignore first and optional last
cols = [c.strip() for c in row]
if len(cols) < 4:
# Not enough data
continue
name = cols[1].strip().strip('"')
start_raw = cols[2]
end_raw = cols[3]
try:
start_date = parse_date(start_raw)
end_date = parse_date(end_raw)
except ValueError:
continue
upsert(name, start_date, end_date, None)
session.commit()
session.close()
return jsonify({"success": True, "inserted": inserted, "updated": updated})