commit c952a3b56e8fcd0be300bfc32a981987ff81af57 Author: chrisryn Date: Fri Jan 30 21:03:25 2026 -0600 Initial commit: Homelab Infrastructure Dashboard Features: - Real-time Proxmox cluster monitoring (nodes, LXC containers) - Camera integration with go2rtc streams - Arr stack download progress monitoring - PBS backup status - Docker container status - Uptime Kuma service health - FastAPI backend with HTMX frontend diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d80f23c --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +venv/ +ENV/ +.env + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# Logs +*.log + +# Local config +config.local.py diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/config.py b/app/config.py new file mode 100644 index 0000000..4209d87 --- /dev/null +++ b/app/config.py @@ -0,0 +1,177 @@ +"""Enhanced Dashboard configuration v2.""" +from dataclasses import dataclass, field +from typing import Optional, List, Dict +import json +import os + +@dataclass +class Service: + name: str + url: str + ip: str + port: int + category: str + icon: str = "server" + favorite: bool = False + critical: bool = False + group: Optional[str] = None + +# Proxmox configuration +PROXMOX_NODES = [ + {"name": "empire", "ip": "192.168.1.75", "port": 8006}, + {"name": "republic", "ip": "192.168.1.80", "port": 8006}, + {"name": "Jedi", "ip": "192.168.1.40", "port": 8006}, + {"name": "hoth", "ip": "192.168.1.76", "port": 8006}, +] + +PROXMOX_API_TOKEN = "root@pam!dashboard" +PROXMOX_API_SECRET = "edb7e222-f45d-4b15-8089-0c7eee397ab5" + +# PBS configuration +PBS_URL = "https://192.168.1.159:8007" +PBS_API_TOKEN = "root@pam!dashboard" +PBS_API_SECRET = "edb7e222-f45d-4b15-8089-0c7eee397ab5" + +# OPNsense configuration +OPNSENSE_URL = "https://192.168.1.1" +OPNSENSE_API_KEY = "" +OPNSENSE_API_SECRET = "" + +# Prometheus configuration +PROMETHEUS_URL = "http://192.168.1.163:9090" + +# Camera configuration +GO2RTC_URL = "http://192.168.1.241:1985" +CAMERAS = ["FPE", "Front_Porch", "Driveway", "Driveway_door", "Backyard", "House_side", "Street_side", "Porch_Downstairs"] + +# Sabnzbd configuration +SABNZBD_URL = "http://192.168.1.66:7777" +SABNZBD_API_KEY = "" + +# Uptime Kuma configuration +UPTIME_KUMA_URL = "http://192.168.1.155:3001" +UPTIME_KUMA_STATUS_PAGE = "uptime" + +# Docker hosts +DOCKER_HOSTS = [ + {"name": "c3p0", "ip": "192.168.1.54", "port": 2375}, + {"name": "frigate", "ip": "192.168.1.241", "port": 2375}, +] + +# Service groups +SERVICE_GROUPS = { + "Arr Stack": ["Radarr", "Sonarr", "Lidarr", "Prowlarr", "Sabnzbd"], + "Media Players": ["Jellyfin", "Jellyseerr", "Tdarr"], + "Cameras": ["Frigate", "go2rtc"], +} + +# Icon SVG paths +SERVICE_ICONS = { + "shield": '', + "globe": '', + "archive": '', + "lock": '', + "film": '', + "star": '', + "video": '', + "tv": '', + "music": '', + "search": '', + "download": '', + "cog": '', + "chart": '', + "heartbeat": '', + "key": '', + "git": '', + "workflow": '', + "book": '', + "image": '', + "brain": '', + "home": '', + "camera": '', + "message": '', + "server": '', + "database": '', +} + +# Services +SERVICES = [ + Service("OPNsense", "https://192.168.1.1", "192.168.1.1", 443, "Infrastructure", "shield", critical=True), + Service("NPM", "https://192.168.1.38:81", "192.168.1.38", 81, "Infrastructure", "globe", critical=True), + Service("PBS", "https://192.168.1.159:8007", "192.168.1.159", 8007, "Infrastructure", "archive", critical=True), + Service("Authentik", "https://auth.deathstar-home.one", "192.168.1.200", 9000, "Infrastructure", "lock"), + Service("Jellyfin", "https://jellyfin.deathstar-home.one", "192.168.1.49", 8096, "Media", "film", favorite=True, group="Media Players"), + Service("Jellyseerr", "https://request.deathstar-home.one", "192.168.1.12", 5055, "Media", "star", favorite=True, group="Media Players"), + Service("Radarr", "https://radarr.deathstar-home.one", "192.168.1.56", 7878, "Media", "film", group="Arr Stack"), + Service("Sonarr", "https://sonarr.deathstar-home.one", "192.168.1.62", 8989, "Media", "tv", group="Arr Stack"), + Service("Lidarr", "https://lidarr.deathstar-home.one", "192.168.1.23", 8686, "Media", "music", group="Arr Stack"), + Service("Prowlarr", "https://prowlarr.deathstar-home.one", "192.168.1.58", 9696, "Media", "search", group="Arr Stack"), + Service("Sabnzbd", "https://sabnzbd.deathstar-home.one", "192.168.1.66", 7777, "Media", "download", group="Arr Stack"), + Service("Tdarr", "https://tdarr.deathstar-home.one", "192.168.1.182", 8265, "Media", "cog", group="Media Players"), + Service("Grafana", "https://grafana.deathstar-home.one", "192.168.1.163", 3000, "Monitoring", "chart", favorite=True), + Service("Uptime Kuma", "https://status.deathstar-home.one", "192.168.1.155", 3001, "Monitoring", "heartbeat"), + Service("Vaultwarden", "https://vault.deathstar-home.one", "192.168.1.154", 80, "Apps", "key", favorite=True), + Service("Gitea", "https://git.deathstar-home.one", "192.168.1.135", 3000, "Apps", "git"), + Service("n8n", "https://n8n.deathstar-home.one", "192.168.1.254", 5678, "Apps", "workflow"), + Service("BookStack", "https://docs.deathstar-home.one", "192.168.1.194", 80, "Apps", "book"), + Service("Immich", "https://cloud.deathstar-home.one", "192.168.1.54", 2283, "Apps", "image", favorite=True), + Service("Open WebUI", "https://ai.deathstar-home.one", "192.168.1.63", 8080, "Apps", "brain"), + Service("Home Assistant", "https://astro.deathstar-home.one", "192.168.1.50", 8123, "Home", "home", favorite=True, critical=True), + Service("Frigate", "https://frigate.deathstar-home.one", "192.168.1.241", 5000, "Home", "camera", group="Cameras"), + Service("go2rtc", "http://192.168.1.241:1985", "192.168.1.241", 1985, "Home", "video", group="Cameras"), + Service("Matrix", "https://chat.deathstar-home.one", "192.168.1.162", 8080, "Home", "message"), +] + +CATEGORIES = { + "Infrastructure": {"color": "blue", "icon": "server"}, + "Media": {"color": "purple", "icon": "film"}, + "Monitoring": {"color": "amber", "icon": "activity"}, + "Apps": {"color": "emerald", "icon": "grid"}, + "Home": {"color": "cyan", "icon": "home"}, +} + +SETTINGS_FILE = "/opt/dashboard/settings.json" +DEFAULT_SETTINGS = { + "refresh_interval": 30, + "theme": "dark", + "favorites": ["Jellyfin", "Jellyseerr", "Grafana", "Vaultwarden", "Immich", "Home Assistant"], + "collapsed_categories": [], + "show_response_times": True, + "show_icons": True, +} + +def load_settings(): + try: + if os.path.exists(SETTINGS_FILE): + with open(SETTINGS_FILE, 'r') as f: + return {**DEFAULT_SETTINGS, **json.load(f)} + except: + pass + return DEFAULT_SETTINGS.copy() + +def save_settings(settings): + try: + with open(SETTINGS_FILE, 'w') as f: + json.dump(settings, f, indent=2) + return True + except: + return False + +def get_services_by_category(): + categories = {} + for service in SERVICES: + if service.category not in categories: + categories[service.category] = [] + categories[service.category].append(service) + return categories + +def get_favorites(): + settings = load_settings() + fav_names = settings.get("favorites", []) + return [s for s in SERVICES if s.name in fav_names or s.favorite] + +def get_critical_services(): + return [s for s in SERVICES if s.critical] + +def get_service_icon(icon_name): + return SERVICE_ICONS.get(icon_name, SERVICE_ICONS["server"]) diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..76c6104 --- /dev/null +++ b/app/main.py @@ -0,0 +1,265 @@ +"""Enhanced FastAPI dashboard v2 with all integrations.""" +from fastapi import FastAPI, Request, Form +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse, RedirectResponse +import httpx +from datetime import datetime + +from app.config import ( + SERVICES, PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET, + PBS_URL, PBS_API_TOKEN, PBS_API_SECRET, + CATEGORIES, GO2RTC_URL, CAMERAS, SABNZBD_URL, SABNZBD_API_KEY, + UPTIME_KUMA_URL, UPTIME_KUMA_STATUS_PAGE, DOCKER_HOSTS, + PROMETHEUS_URL, SERVICE_ICONS, SERVICE_GROUPS, + get_services_by_category, get_favorites, get_critical_services, get_service_icon, + load_settings, save_settings, DEFAULT_SETTINGS +) +from app.services.health import ( + check_all_services, get_all_proxmox_metrics, get_camera_list, + get_sabnzbd_queue, get_uptime_kuma_status, get_docker_containers, + get_docker_container_counts, get_pbs_status, get_storage_pools, + get_recent_events, get_cluster_uptime, get_prometheus_metrics +) + +app = FastAPI(title="DeathStar Dashboard", version="2.0.0") +app.mount("/static", StaticFiles(directory="static"), name="static") +templates = Jinja2Templates(directory="app/templates") + +# Add custom filters +templates.env.globals["get_service_icon"] = get_service_icon +templates.env.globals["SERVICE_ICONS"] = SERVICE_ICONS + +last_check = {"time": None} + +@app.get("/", response_class=HTMLResponse) +async def dashboard(request: Request): + """Main dashboard page.""" + services_status = await check_all_services(SERVICES) + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + docker_counts = await get_docker_container_counts(DOCKER_HOSTS) + settings = load_settings() + + online_count = sum(1 for s in services_status.values() if s.status == "online") + total_count = len(services_status) + critical_down = [s.name for s in get_critical_services() + if services_status.get(s.name) and services_status[s.name].status != "online"] + cluster_uptime = get_cluster_uptime(nodes_status) + + last_check["time"] = datetime.now() + + return templates.TemplateResponse("dashboard.html", { + "request": request, + "services_by_category": get_services_by_category(), + "services_status": services_status, + "nodes_status": nodes_status, + "favorites": get_favorites(), + "categories": CATEGORIES, + "online_count": online_count, + "total_count": total_count, + "critical_down": critical_down, + "last_check": last_check["time"].strftime("%H:%M:%S") if last_check["time"] else "Never", + "cameras": CAMERAS, + "go2rtc_url": GO2RTC_URL, + "docker_counts": docker_counts, + "cluster_uptime": cluster_uptime, + "settings": settings, + "service_groups": SERVICE_GROUPS, + }) + +@app.get("/health") +async def health(): + return {"status": "healthy"} + +@app.get("/api/services", response_class=HTMLResponse) +async def api_services(request: Request): + """HTMX endpoint for services.""" + services_status = await check_all_services(SERVICES) + return templates.TemplateResponse("partials/services.html", { + "request": request, + "services_by_category": get_services_by_category(), + "services_status": services_status, + "categories": CATEGORIES, + "service_groups": SERVICE_GROUPS, + }) + +@app.get("/api/nodes", response_class=HTMLResponse) +async def api_nodes(request: Request): + """HTMX endpoint for Proxmox nodes.""" + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + cluster_uptime = get_cluster_uptime(nodes_status) + return templates.TemplateResponse("partials/nodes.html", { + "request": request, + "nodes_status": nodes_status, + "cluster_uptime": cluster_uptime, + }) + +@app.get("/api/nodes-expanded", response_class=HTMLResponse) +async def api_nodes_expanded(request: Request): + """HTMX endpoint for expanded Proxmox nodes with VMs/containers.""" + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + return templates.TemplateResponse("partials/nodes_expanded.html", { + "request": request, + "nodes_status": nodes_status, + }) + +@app.get("/api/favorites", response_class=HTMLResponse) +async def api_favorites(request: Request): + """HTMX endpoint for favorites.""" + services_status = await check_all_services(SERVICES) + return templates.TemplateResponse("partials/favorites.html", { + "request": request, + "favorites": get_favorites(), + "services_status": services_status, + }) + +@app.get("/api/cameras", response_class=HTMLResponse) +async def api_cameras(request: Request): + """HTMX endpoint for cameras.""" + return templates.TemplateResponse("partials/cameras.html", { + "request": request, + "cameras": CAMERAS, + "go2rtc_url": GO2RTC_URL, + }) + +@app.get("/api/downloads", response_class=HTMLResponse) +async def api_downloads(request: Request): + """HTMX endpoint for downloads.""" + queue = await get_sabnzbd_queue(SABNZBD_URL, SABNZBD_API_KEY) + return templates.TemplateResponse("partials/downloads.html", { + "request": request, + "queue": queue, + }) + +@app.get("/api/status-banner", response_class=HTMLResponse) +async def api_status_banner(request: Request): + """HTMX endpoint for status banner.""" + services_status = await check_all_services(SERVICES) + critical_down = [s.name for s in get_critical_services() + if services_status.get(s.name) and services_status[s.name].status != "online"] + online_count = sum(1 for s in services_status.values() if s.status == "online") + last_check["time"] = datetime.now() + + return templates.TemplateResponse("partials/status_banner.html", { + "request": request, + "critical_down": critical_down, + "online_count": online_count, + "total_count": len(services_status), + "last_check": last_check["time"].strftime("%H:%M:%S"), + }) + +@app.get("/api/docker", response_class=HTMLResponse) +async def api_docker(request: Request): + """HTMX endpoint for Docker containers.""" + containers = await get_docker_containers(DOCKER_HOSTS) + counts = await get_docker_container_counts(DOCKER_HOSTS) + return templates.TemplateResponse("partials/docker.html", { + "request": request, + "containers": containers, + "hosts": DOCKER_HOSTS, + "counts": counts, + }) + +@app.get("/api/uptime", response_class=HTMLResponse) +async def api_uptime(request: Request): + """HTMX endpoint for Uptime Kuma status.""" + uptime_data = await get_uptime_kuma_status(UPTIME_KUMA_URL, UPTIME_KUMA_STATUS_PAGE) + return templates.TemplateResponse("partials/uptime.html", { + "request": request, + "uptime": uptime_data, + "uptime_kuma_url": UPTIME_KUMA_URL, + }) + +@app.get("/api/pbs", response_class=HTMLResponse) +async def api_pbs(request: Request): + """HTMX endpoint for PBS status.""" + pbs_status = await get_pbs_status(PBS_URL, PBS_API_TOKEN, PBS_API_SECRET) + return templates.TemplateResponse("partials/pbs.html", { + "request": request, + "pbs": pbs_status, + }) + +@app.get("/api/storage", response_class=HTMLResponse) +async def api_storage(request: Request): + """HTMX endpoint for storage pools.""" + pools = await get_storage_pools(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + return templates.TemplateResponse("partials/storage.html", { + "request": request, + "pools": pools, + }) + +@app.get("/api/events", response_class=HTMLResponse) +async def api_events(request: Request): + """HTMX endpoint for recent events.""" + events = get_recent_events() + return templates.TemplateResponse("partials/events.html", { + "request": request, + "events": events[-10:], + }) + +@app.get("/api/camera-snapshot/{camera}") +async def camera_snapshot(camera: str): + """Proxy camera snapshot from go2rtc.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(f"{GO2RTC_URL}/api/frame.jpeg?src={camera}") + if response.status_code == 200: + return StreamingResponse( + iter([response.content]), + media_type="image/jpeg", + headers={"Cache-Control": "no-cache"} + ) + except: + pass + return StreamingResponse(iter([b""]), media_type="image/jpeg") + +@app.get("/api/status") +async def api_status_json(): + """JSON endpoint for all status data.""" + services_status = await check_all_services(SERVICES) + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + + return { + "services": {name: {"status": s.status, "response_time_ms": s.response_time_ms} + for name, s in services_status.items()}, + "nodes": [{"name": n.name, "status": n.status, "cpu": n.cpu_percent, + "memory": n.memory_percent, "disk": n.disk_percent, "uptime_hours": n.uptime_hours} + for n in nodes_status] + } + +# Settings page +@app.get("/settings", response_class=HTMLResponse) +async def settings_page(request: Request): + """Settings page.""" + settings = load_settings() + return templates.TemplateResponse("settings.html", { + "request": request, + "settings": settings, + "all_services": SERVICES, + "categories": CATEGORIES, + }) + +@app.post("/settings") +async def save_settings_handler(request: Request): + """Save settings.""" + form = await request.form() + settings = load_settings() + + # Update settings from form + settings["refresh_interval"] = int(form.get("refresh_interval", 30)) + settings["theme"] = form.get("theme", "dark") + settings["show_response_times"] = form.get("show_response_times") == "on" + settings["show_icons"] = form.get("show_icons") == "on" + + # Handle favorites (multi-select) + favorites = form.getlist("favorites") + if favorites: + settings["favorites"] = favorites + + save_settings(settings) + return RedirectResponse(url="/settings?saved=1", status_code=303) + +@app.get("/api/settings", response_class=JSONResponse) +async def get_settings_api(): + """Get settings as JSON.""" + return load_settings() diff --git a/app/routers/__init__.py b/app/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/services/__init__.py b/app/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/services/health.py b/app/services/health.py new file mode 100644 index 0000000..2ceefaf --- /dev/null +++ b/app/services/health.py @@ -0,0 +1,391 @@ +"""Enhanced services module v2 with PBS, VM/LXC, storage pools, events.""" +import asyncio +from typing import Dict, Any, Optional, List +import httpx +from dataclasses import dataclass, field +from datetime import datetime +from collections import deque + +@dataclass +class HealthStatus: + name: str + status: str + response_time_ms: Optional[float] = None + error: Optional[str] = None + +@dataclass +class NodeStatus: + name: str + ip: str + status: str + cpu_percent: Optional[float] = None + memory_percent: Optional[float] = None + memory_used_gb: Optional[float] = None + memory_total_gb: Optional[float] = None + disk_percent: Optional[float] = None + uptime_hours: Optional[float] = None + vms: List[Dict] = field(default_factory=list) + containers: List[Dict] = field(default_factory=list) + +@dataclass +class DockerContainer: + name: str + status: str + state: str + image: str + host: str + +@dataclass +class UptimeMonitor: + id: int + name: str + status: int + ping: Optional[int] = None + heartbeats: Optional[List[Dict]] = None + +@dataclass +class PBSStatus: + status: str + datastore_usage: List[Dict] = field(default_factory=list) + last_backup: Optional[str] = None + total_size_gb: float = 0 + used_size_gb: float = 0 + +@dataclass +class StoragePool: + name: str + node: str + total_gb: float + used_gb: float + avail_gb: float + percent_used: float + pool_type: str + +@dataclass +class StatusEvent: + timestamp: datetime + service: str + old_status: str + new_status: str + +# Recent events storage (in-memory, last 20) +recent_events: deque = deque(maxlen=20) +last_status_cache: Dict[str, str] = {} + +SERVICE_CHECK_OVERRIDES = { + "OPNsense": ("https://192.168.1.1:8443/", 10.0), + "Vaultwarden": ("https://vault.deathstar-home.one/", 5.0), + "Immich": ("http://192.168.1.54:2283/", 5.0), +} + +async def check_service(client: httpx.AsyncClient, service) -> HealthStatus: + """Check if a service is reachable.""" + global last_status_cache, recent_events + + if service.name in SERVICE_CHECK_OVERRIDES: + check_url, timeout = SERVICE_CHECK_OVERRIDES[service.name] + else: + https_ports = [443, 8006, 8007, 8443, 9443] + scheme = "https" if service.port in https_ports else "http" + check_url = f"{scheme}://{service.ip}:{service.port}/" + timeout = 5.0 + + start = asyncio.get_event_loop().time() + try: + response = await client.get(check_url, timeout=timeout, follow_redirects=True) + elapsed = (asyncio.get_event_loop().time() - start) * 1000 + new_status = "online" if response.status_code < 500 else "degraded" + result = HealthStatus(name=service.name, status=new_status, response_time_ms=round(elapsed, 1)) + except: + new_status = "offline" + result = HealthStatus(name=service.name, status="offline") + + # Track status changes + old_status = last_status_cache.get(service.name) + if old_status and old_status != new_status: + recent_events.append(StatusEvent( + timestamp=datetime.now(), + service=service.name, + old_status=old_status, + new_status=new_status + )) + last_status_cache[service.name] = new_status + + return result + +async def check_all_services(services) -> Dict[str, HealthStatus]: + """Check all services concurrently.""" + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + tasks = [check_service(client, s) for s in services] + results = await asyncio.gather(*tasks) + return {r.name: r for r in results} + +async def get_proxmox_node_metrics(client: httpx.AsyncClient, node: Dict, token: str, secret: str) -> NodeStatus: + """Get Proxmox node metrics including VMs and containers.""" + base_url = f"https://{node['ip']}:{node['port']}/api2/json" + headers = {"Authorization": f"PVEAPIToken={token}={secret}"} + + result = NodeStatus(name=node["name"], ip=node["ip"], status="offline") + + try: + # Get node status + response = await client.get(f"{base_url}/nodes/{node['name']}/status", headers=headers, timeout=5.0) + if response.status_code == 200: + data = response.json()["data"] + cpu = data.get("cpu", 0) * 100 + mem_used = data.get("memory", {}).get("used", 0) + mem_total = data.get("memory", {}).get("total", 1) + mem_pct = (mem_used / mem_total) * 100 if mem_total else 0 + disk_used = data.get("rootfs", {}).get("used", 0) + disk_total = data.get("rootfs", {}).get("total", 1) + disk_pct = (disk_used / disk_total) * 100 if disk_total else 0 + uptime_sec = data.get("uptime", 0) + + result.status = "online" + result.cpu_percent = round(cpu, 1) + result.memory_percent = round(mem_pct, 1) + result.memory_used_gb = round(mem_used / (1024**3), 1) + result.memory_total_gb = round(mem_total / (1024**3), 1) + result.disk_percent = round(disk_pct, 1) + result.uptime_hours = round(uptime_sec / 3600, 1) + + # Get VMs + vm_response = await client.get(f"{base_url}/nodes/{node['name']}/qemu", headers=headers, timeout=5.0) + if vm_response.status_code == 200: + for vm in vm_response.json().get("data", []): + result.vms.append({ + "vmid": vm.get("vmid"), + "name": vm.get("name", f"VM {vm.get('vmid')}"), + "status": vm.get("status"), + "mem": round(vm.get("mem", 0) / (1024**3), 1) if vm.get("mem") else 0, + "cpu": round(vm.get("cpu", 0) * 100, 1) if vm.get("cpu") else 0, + }) + + # Get containers + ct_response = await client.get(f"{base_url}/nodes/{node['name']}/lxc", headers=headers, timeout=5.0) + if ct_response.status_code == 200: + for ct in ct_response.json().get("data", []): + result.containers.append({ + "vmid": ct.get("vmid"), + "name": ct.get("name", f"CT {ct.get('vmid')}"), + "status": ct.get("status"), + "mem": round(ct.get("mem", 0) / (1024**3), 1) if ct.get("mem") else 0, + "cpu": round(ct.get("cpu", 0) * 100, 1) if ct.get("cpu") else 0, + }) + except: + pass + + return result + +async def get_all_proxmox_metrics(nodes, token: str, secret: str) -> List[NodeStatus]: + """Get metrics for all Proxmox nodes.""" + async with httpx.AsyncClient(verify=False) as client: + tasks = [get_proxmox_node_metrics(client, n, token, secret) for n in nodes] + return await asyncio.gather(*tasks) + +async def get_pbs_status(url: str, token: str, secret: str) -> PBSStatus: + """Get PBS backup server status.""" + result = PBSStatus(status="offline") + headers = {"Authorization": f"PBSAPIToken={token}={secret}"} + + try: + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + # Get datastore status + ds_response = await client.get(f"{url}/api2/json/status/datastore-usage", headers=headers) + if ds_response.status_code == 200: + result.status = "online" + for ds in ds_response.json().get("data", []): + total = ds.get("total", 0) + used = ds.get("used", 0) + result.datastore_usage.append({ + "name": ds.get("store"), + "total_gb": round(total / (1024**3), 1), + "used_gb": round(used / (1024**3), 1), + "percent": round((used / total) * 100, 1) if total else 0, + }) + result.total_size_gb += total / (1024**3) + result.used_size_gb += used / (1024**3) + + # Try to get last backup task + tasks_response = await client.get(f"{url}/api2/json/nodes/localhost/tasks", headers=headers) + if tasks_response.status_code == 200: + tasks = tasks_response.json().get("data", []) + backup_tasks = [t for t in tasks if t.get("type") == "backup"] + if backup_tasks: + last = backup_tasks[0] + result.last_backup = datetime.fromtimestamp(last.get("starttime", 0)).strftime("%Y-%m-%d %H:%M") + except: + pass + + return result + +async def get_storage_pools(nodes, token: str, secret: str) -> List[StoragePool]: + """Get storage pool info from all Proxmox nodes.""" + pools = [] + headers = {"Authorization": f"PVEAPIToken={token}={secret}"} + + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + for node in nodes: + try: + url = f"https://{node['ip']}:{node['port']}/api2/json/nodes/{node['name']}/storage" + response = await client.get(url, headers=headers) + if response.status_code == 200: + for storage in response.json().get("data", []): + if storage.get("enabled") and storage.get("total"): + total = storage.get("total", 0) + used = storage.get("used", 0) + avail = storage.get("avail", 0) + pools.append(StoragePool( + name=storage.get("storage"), + node=node["name"], + total_gb=round(total / (1024**3), 1), + used_gb=round(used / (1024**3), 1), + avail_gb=round(avail / (1024**3), 1), + percent_used=round((used / total) * 100, 1) if total else 0, + pool_type=storage.get("type", "unknown"), + )) + except: + pass + + return pools + +async def get_docker_containers(hosts: List[Dict]) -> List[DockerContainer]: + """Get Docker containers via docker-socket-proxy.""" + containers = [] + async with httpx.AsyncClient(timeout=5.0) as client: + for host in hosts: + try: + url = f"http://{host['ip']}:{host['port']}/containers/json?all=true" + response = await client.get(url) + if response.status_code == 200: + for c in response.json(): + name = c.get("Names", ["/unknown"])[0].lstrip("/") + if name == "docker-socket-proxy": + continue + containers.append(DockerContainer( + name=name, + status=c.get("Status", ""), + state=c.get("State", "unknown"), + image=c.get("Image", "").split("/")[-1].split(":")[0], + host=host["name"] + )) + except: + pass + return containers + +async def get_docker_container_counts(hosts: List[Dict]) -> Dict[str, int]: + """Get container counts per host.""" + counts = {} + async with httpx.AsyncClient(timeout=5.0) as client: + for host in hosts: + try: + url = f"http://{host['ip']}:{host['port']}/containers/json" + response = await client.get(url) + if response.status_code == 200: + # Subtract 1 for docker-socket-proxy + count = len([c for c in response.json() if "docker-socket-proxy" not in c.get("Names", [""])[0]]) + counts[host["name"]] = count + except: + counts[host["name"]] = 0 + return counts + +async def get_uptime_kuma_status(url: str, status_page: str = "uptime") -> Dict: + """Get Uptime Kuma status.""" + result = {"monitors": [], "summary": {"up": 0, "down": 0, "total": 0}} + try: + async with httpx.AsyncClient(timeout=5.0) as client: + hb_response = await client.get(f"{url}/api/status-page/heartbeat/{status_page}") + info_response = await client.get(f"{url}/api/status-page/{status_page}") + + if hb_response.status_code == 200 and info_response.status_code == 200: + heartbeats = hb_response.json().get("heartbeatList", {}) + info = info_response.json() + + for group in info.get("publicGroupList", []): + for monitor in group.get("monitorList", []): + monitor_id = str(monitor.get("id")) + monitor_heartbeats = heartbeats.get(monitor_id, []) + latest_status = 0 + latest_ping = None + if monitor_heartbeats: + latest = monitor_heartbeats[-1] + latest_status = latest.get("status", 0) + latest_ping = latest.get("ping") + recent_hb = monitor_heartbeats[-20:] if monitor_heartbeats else [] + result["monitors"].append(UptimeMonitor( + id=monitor.get("id"), + name=monitor.get("name"), + status=latest_status, + ping=latest_ping, + heartbeats=[{"status": h.get("status", 0), "ping": h.get("ping")} for h in recent_hb] + )) + if latest_status == 1: + result["summary"]["up"] += 1 + else: + result["summary"]["down"] += 1 + result["summary"]["total"] += 1 + except: + pass + return result + +async def get_prometheus_metrics(url: str, queries: Dict[str, str]) -> Dict[str, Any]: + """Query Prometheus for metrics.""" + results = {} + try: + async with httpx.AsyncClient(timeout=5.0) as client: + for name, query in queries.items(): + response = await client.get(f"{url}/api/v1/query", params={"query": query}) + if response.status_code == 200: + data = response.json().get("data", {}).get("result", []) + if data: + results[name] = float(data[0].get("value", [0, 0])[1]) + except: + pass + return results + +async def get_camera_list(go2rtc_url: str) -> List[str]: + """Get camera list from go2rtc.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(f"{go2rtc_url}/api/streams") + if response.status_code == 200: + return list(response.json().keys()) + except: + pass + return [] + +async def get_sabnzbd_queue(url: str, api_key: str = "") -> Dict: + """Get Sabnzbd download queue.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + params = {"mode": "queue", "output": "json"} + if api_key: + params["apikey"] = api_key + response = await client.get(f"{url}/api", params=params) + if response.status_code == 200: + data = response.json().get("queue", {}) + return { + "speed": data.get("speed", "0 B/s"), + "size_left": data.get("sizeleft", "0 B"), + "eta": data.get("timeleft", "Unknown"), + "downloading": len(data.get("slots", [])), + "items": [ + {"name": s.get("filename", "Unknown")[:40], "progress": float(s.get("percentage", 0))} + for s in data.get("slots", [])[:3] + ] + } + except: + pass + return {"speed": "N/A", "downloading": 0, "items": []} + +def get_recent_events() -> List[StatusEvent]: + """Get recent status change events.""" + return list(recent_events) + +def get_cluster_uptime(nodes: List[NodeStatus]) -> float: + """Calculate total cluster uptime in hours.""" + total = 0 + for node in nodes: + if node.uptime_hours: + total += node.uptime_hours + return round(total, 1) diff --git a/app/templates/base.html b/app/templates/base.html new file mode 100644 index 0000000..9eca0d7 --- /dev/null +++ b/app/templates/base.html @@ -0,0 +1,145 @@ + + + + + + DeathStar Homelab + + + + + + +
+ {% block content %}{% endblock %} +
+ + + + + + + diff --git a/app/templates/dashboard.html b/app/templates/dashboard.html new file mode 100644 index 0000000..d275fd9 --- /dev/null +++ b/app/templates/dashboard.html @@ -0,0 +1,134 @@ +{% extends "base.html" %} + +{% block content %} + +
+
+
+

DeathStar Homelab

+ + + + + +
+
+ + {{ online_count }}/{{ total_count }} online + Updated: {{ last_check }} +
+
+
+ {% include "partials/status_banner.html" %} +
+
+ + +
+
+ +

Quick Access

+
+
+ {% include "partials/favorites.html" %} +
+
+ + +
+
+ +

Proxmox Cluster: NewHope

+ {{ cluster_uptime }}h total uptime +
+
+ {% include "partials/nodes.html" %} +
+
+ + +
+ +
+
+ {% include "partials/services.html" %} +
+
+ + +
+ +
+
+ +

Backups

+
+
+
Loading...
+
+
+ + +
+
+ +

Recent Events

+
+
+
Loading...
+
+
+ + +
+
+ +

Downloads

+
+
+ {% include "partials/downloads.html" %} +
+
+ + +
+
+ +

Cameras

+
+
+ {% include "partials/cameras.html" %} +
+
+ + +
+
+ +

Docker

+ {% for name, count in docker_counts.items() %}{{ count }}{% if not loop.last %}+{% endif %}{% endfor %} containers +
+
+
Loading...
+
+
+ + +
+
+ +

Uptime

+
+
+
Loading...
+
+
+
+
+{% endblock %} diff --git a/app/templates/partials/cameras.html b/app/templates/partials/cameras.html new file mode 100644 index 0000000..45fb348 --- /dev/null +++ b/app/templates/partials/cameras.html @@ -0,0 +1,22 @@ +
+ {% for camera in cameras[:4] %} +
+ {{ camera }} + +
{{ camera }}
+
+ {% endfor %} +
+{% if cameras | length > 4 %} + + View all {{ cameras | length }} cameras + +{% endif %} diff --git a/app/templates/partials/docker.html b/app/templates/partials/docker.html new file mode 100644 index 0000000..a988848 --- /dev/null +++ b/app/templates/partials/docker.html @@ -0,0 +1,20 @@ +
+ {% for host in hosts %} +
+
+ {{ host.name }} + {{ counts.get(host.name, 0) }} running +
+
+ {% for container in containers if container.host == host.name %} +
+ + {{ container.name }} +
+ {% else %} + No containers + {% endfor %} +
+
+ {% endfor %} +
diff --git a/app/templates/partials/downloads.html b/app/templates/partials/downloads.html new file mode 100644 index 0000000..ab19634 --- /dev/null +++ b/app/templates/partials/downloads.html @@ -0,0 +1,40 @@ +
+ {% if queue and queue.downloading > 0 %} +
+ {{ queue.downloading }} active + {{ queue.speed }} +
+ + {% if queue.items %} +
+ {% for item in queue.items %} +
+
+ {{ item.name }} + {{ item.progress | round(1) }}% +
+
+
+
+
+ {% endfor %} +
+ {% endif %} + + {% if queue.eta and queue.eta != 'Unknown' %} +
+ ETA: {{ queue.eta }} + {{ queue.size_left }} left +
+ {% endif %} + {% else %} +
+ + No active downloads +
+ {% endif %} + + + Open Sabnzbd + +
diff --git a/app/templates/partials/events.html b/app/templates/partials/events.html new file mode 100644 index 0000000..e0eca24 --- /dev/null +++ b/app/templates/partials/events.html @@ -0,0 +1,16 @@ +
+ {% if events %} +
+ {% for event in events | reverse %} +
+ + {{ event.service }} + {{ event.new_status }} + {{ event.timestamp.strftime('%H:%M') }} +
+ {% endfor %} +
+ {% else %} +
No recent events
+ {% endif %} +
diff --git a/app/templates/partials/favorites.html b/app/templates/partials/favorites.html new file mode 100644 index 0000000..cf036c5 --- /dev/null +++ b/app/templates/partials/favorites.html @@ -0,0 +1,13 @@ +
+ {% for service in favorites %} + {% set status = services_status.get(service.name) %} + + + {{ service.name }} + {% if status and status.response_time_ms %} + {{ status.response_time_ms }}ms + {% endif %} + + {% endfor %} +
diff --git a/app/templates/partials/nodes.html b/app/templates/partials/nodes.html new file mode 100644 index 0000000..b2c8b64 --- /dev/null +++ b/app/templates/partials/nodes.html @@ -0,0 +1,66 @@ +
+ {% for node in nodes_status %} +
+
+
+ + {{ node.name }} +
+ {% if node.uptime_hours %} + {{ (node.uptime_hours / 24) | round(1) }}d + {% endif %} +
+ + {% if node.status == 'online' %} +
+
+ CPU + {{ node.cpu_percent | default(0) | round(1) }}% +
+
+
+
+
+ RAM + {% if node.memory_used_gb %}{{ node.memory_used_gb }}{% else %}?{% endif %}/{% if node.memory_total_gb %}{{ node.memory_total_gb }}{% else %}?{% endif %}GB +
+
+
+
+
+ Disk + {{ node.disk_percent | default(0) | round(1) }}% +
+
+
+ + + + {% else %} +
Offline
+ {% endif %} +
+ {% endfor %} +
+{% if cluster_uptime %} +
Cluster total: {{ cluster_uptime }}h
+{% endif %} diff --git a/app/templates/partials/pbs.html b/app/templates/partials/pbs.html new file mode 100644 index 0000000..56f5514 --- /dev/null +++ b/app/templates/partials/pbs.html @@ -0,0 +1,31 @@ +
+ {% if pbs.status == 'online' %} +
+ Online + {% if pbs.last_backup %} + Last: {{ pbs.last_backup }} + {% endif %} +
+ {% if pbs.datastore_usage %} +
+ {% for ds in pbs.datastore_usage %} +
+
+ {{ ds.name }} + {{ ds.used_gb }}/{{ ds.total_gb }}GB +
+
+
+
+
+ {% endfor %} +
+ {% endif %} + {% else %} +
+ + PBS Offline +
+ {% endif %} + Open PBS +
diff --git a/app/templates/partials/services.html b/app/templates/partials/services.html new file mode 100644 index 0000000..0cda695 --- /dev/null +++ b/app/templates/partials/services.html @@ -0,0 +1,60 @@ +{% for category_name, category_services in services_by_category.items() %} +{% set cat_info = categories.get(category_name, {}) %} + +{% endfor %} diff --git a/app/templates/partials/status_banner.html b/app/templates/partials/status_banner.html new file mode 100644 index 0000000..2a67274 --- /dev/null +++ b/app/templates/partials/status_banner.html @@ -0,0 +1,17 @@ +{% if critical_down %} +
+
+ + + Critical services down: {{ critical_down | join(", ") }} + +
+ View details +
+{% else %} +
+ + All critical services operational + {{ online_count }}/{{ total_count }} services online +
+{% endif %} diff --git a/app/templates/partials/uptime.html b/app/templates/partials/uptime.html new file mode 100644 index 0000000..d95ca71 --- /dev/null +++ b/app/templates/partials/uptime.html @@ -0,0 +1,45 @@ +
+ {% if uptime.monitors %} +
+ {{ uptime.summary.up }}/{{ uptime.summary.total }} up + {% if uptime.summary.down > 0 %} + {{ uptime.summary.down }} down + {% endif %} +
+
+ {% for monitor in uptime.monitors[:8] %} +
+ + {{ monitor.name }} + + {% if monitor.heartbeats %} +
+ {% for hb in monitor.heartbeats[-12:] %} + {% set ping_height = ((hb.ping or 50) / 5) %} + {% if ping_height > 100 %}{% set ping_height = 100 %}{% endif %} + {% if ping_height < 20 %}{% set ping_height = 20 %}{% endif %} +
+ {% endfor %} +
+ {% endif %} + {% if monitor.ping %} + {{ monitor.ping }}ms + {% endif %} +
+ {% endfor %} +
+ {% if uptime.monitors | length > 8 %} +
+{{ uptime.monitors | length - 8 }} more
+ {% endif %} + {% else %} +
+ + No uptime data +
+ {% endif %} + + + Open Uptime Kuma + +
diff --git a/app/templates/settings.html b/app/templates/settings.html new file mode 100644 index 0000000..7853876 --- /dev/null +++ b/app/templates/settings.html @@ -0,0 +1,92 @@ + + + + + + Settings - DeathStar Homelab + + + + + +
+
+

Settings

+ Back to Dashboard +
+ + {% if request.query_params.get('saved') %} +
Settings saved successfully!
+ {% endif %} + +
+ +
+

General

+
+
+ + +
+
+ + +
+
+
+ + +
+

Display Options

+
+ + +
+
+ + +
+

Favorites

+

Select services to show in Quick Access

+
+ {% for service in all_services %} + + {% endfor %} +
+
+ +
+ Cancel + +
+
+
+ + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0c6581d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +fastapi==0.115.0 +uvicorn[standard]==0.32.0 +jinja2==3.1.4 +httpx==0.27.2 +python-dotenv==1.0.1