commit 89cdb022f3b5af8b15fac2fd6e8c80cf0c97c0c7 Author: Dashboard Date: Mon Feb 2 20:27:05 2026 +0000 Initial commit: Homelab Dashboard with YAML configuration Features: - Service health monitoring with response times - Proxmox cluster integration (nodes, VMs, containers) - PBS backup server monitoring - Camera viewer with WebRTC (go2rtc) - Docker container monitoring - Uptime Kuma integration - Mobile-friendly responsive design - YAML-based configuration for easy setup diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4a9089b --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# Secrets - NEVER commit these +secrets.yaml +*.secret + +# Python +__pycache__/ +*.py[cod] +*.class +venv/ +.venv/ +*.egg-info/ +.eggs/ + +# User settings +settings.json +config.yaml + +# Logs +*.log +logs/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db diff --git a/README.md b/README.md new file mode 100644 index 0000000..3fdaa60 --- /dev/null +++ b/README.md @@ -0,0 +1,161 @@ +# Homelab Dashboard + +A modern, responsive dashboard for monitoring your homelab infrastructure. + +## Features + +- **Service Monitoring**: Track health and response times for all your services +- **Proxmox Integration**: View cluster status, nodes, VMs, and containers +- **PBS Integration**: Monitor Proxmox Backup Server status +- **Camera Viewer**: WebRTC-based live camera feeds (via go2rtc) +- **Docker Monitoring**: Track containers across multiple Docker hosts +- **Uptime Kuma Integration**: Display uptime stats from your monitoring +- **Mobile-Friendly**: Responsive design works on all devices + +## Quick Start + +### 1. Clone and Setup + +```bash +# Copy the dashboard to your server +scp -r dashboard/ root@your-server:/opt/ + +# Install dependencies +apt install python3-venv python3-yaml +cd /opt/dashboard +python3 -m venv venv +source venv/bin/activate +pip install fastapi uvicorn httpx jinja2 +``` + +### 2. Configure + +```bash +# Copy the example configs +cp config.yaml.example config.yaml +cp secrets.yaml.example secrets.yaml + +# Edit config.yaml with your services +nano config.yaml + +# Add your API credentials to secrets.yaml +nano secrets.yaml + +# Protect secrets file +chmod 600 secrets.yaml +``` + +### 3. Run + +```bash +# Test manually +source venv/bin/activate +uvicorn app.main:app --host 0.0.0.0 --port 8000 + +# Or create a systemd service (see below) +``` + +## Configuration + +### config.yaml + +The main configuration file defines: + +- **dashboard**: Title, theme, refresh interval +- **proxmox**: Cluster nodes to monitor +- **pbs**: Proxmox Backup Server connection +- **cameras**: go2rtc camera streams +- **docker**: Docker hosts to monitor +- **uptime_kuma**: Uptime monitoring integration +- **categories**: Service category colors and icons +- **services**: All services to display and monitor +- **service_groups**: Logical groupings for services + +See `config.yaml.example` for full documentation. + +### secrets.yaml + +Store sensitive credentials separately: + +- Proxmox API tokens +- PBS API tokens +- OPNsense API keys +- Service-specific API keys + +**Important**: Never commit `secrets.yaml` to version control! + +### Service Configuration + +Each service can have: + +```yaml +- name: MyService + url: https://myservice.example.com # URL to open on click + ip: 192.168.1.100 # IP for health checks + port: 8080 # Port for health checks + category: Apps # Category grouping + icon: server # Icon name + favorite: true # Show in favorites + critical: true # Mark as critical + group: "My Group" # Service group + health_check: # Optional custom health check + url: https://custom-check-url/ + timeout: 10 +``` + +### Available Icons + +server, shield, globe, archive, lock, film, star, video, tv, music, +search, download, cog, chart, heartbeat, key, git, workflow, book, +image, brain, home, camera, message, database + +## Systemd Service + +Create `/etc/systemd/system/dashboard.service`: + +```ini +[Unit] +Description=Homelab Dashboard +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/dashboard +ExecStart=/opt/dashboard/venv/bin/uvicorn app.main:app --host 0.0.0.0 --port 8000 +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +``` + +Then enable and start: + +```bash +systemctl daemon-reload +systemctl enable dashboard +systemctl start dashboard +``` + +## Reverse Proxy (Nginx/NPM) + +Example Nginx Proxy Manager configuration: + +- **Domain**: dashboard.yourdomain.com +- **Forward Host**: 192.168.1.x (dashboard IP) +- **Forward Port**: 8000 +- **WebSocket Support**: Enable (for live updates) + +## Requirements + +- Python 3.10+ +- FastAPI +- Uvicorn +- HTTPX +- Jinja2 +- PyYAML + +## License + +MIT License - Feel free to use and modify for your homelab! diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/config.py b/app/config.py new file mode 100644 index 0000000..bfea474 --- /dev/null +++ b/app/config.py @@ -0,0 +1,280 @@ +"""Dashboard configuration loader - supports YAML config files.""" +from dataclasses import dataclass, field +from typing import Optional, List, Dict, Any +import json +import os +import yaml + +# Base paths +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +CONFIG_FILE = os.path.join(BASE_DIR, "config.yaml") +SECRETS_FILE = os.path.join(BASE_DIR, "secrets.yaml") +SETTINGS_FILE = os.path.join(BASE_DIR, "settings.json") + +@dataclass +class Service: + name: str + url: str + ip: str + port: int + category: str + icon: str = "server" + favorite: bool = False + critical: bool = False + group: Optional[str] = None + health_check: Optional[Dict] = None + +@dataclass +class HealthCheckConfig: + url: str + timeout: float = 5.0 + +# Icon SVG paths +SERVICE_ICONS = { + "shield": '', + "globe": '', + "archive": '', + "lock": '', + "film": '', + "star": '', + "video": '', + "tv": '', + "music": '', + "search": '', + "download": '', + "cog": '', + "chart": '', + "heartbeat": '', + "key": '', + "git": '', + "workflow": '', + "book": '', + "image": '', + "brain": '', + "home": '', + "camera": '', + "message": '', + "server": '', + "database": '', +} + +# ============================================================ +# Configuration Loading +# ============================================================ + +def load_yaml(path: str) -> Dict: + """Load YAML file, return empty dict if not found.""" + if os.path.exists(path): + try: + with open(path, 'r') as f: + return yaml.safe_load(f) or {} + except Exception as e: + print(f"Warning: Failed to load {path}: {e}") + return {} + +def load_config() -> Dict: + """Load main config and merge with secrets.""" + config = load_yaml(CONFIG_FILE) + secrets = load_yaml(SECRETS_FILE) + + # Merge secrets into config + if secrets: + for key in ['proxmox', 'pbs', 'opnsense', 'sabnzbd']: + if key in secrets and key in config: + config[key].update(secrets[key]) + elif key in secrets: + config[key] = secrets[key] + + return config + +# Load configuration +_config = load_config() + +# ============================================================ +# Configuration Values (with defaults for backward compatibility) +# ============================================================ + +# Dashboard settings +DASHBOARD_TITLE = _config.get('dashboard', {}).get('title', 'Homelab Dashboard') +REFRESH_INTERVAL = _config.get('dashboard', {}).get('refresh_interval', 30) + +# Proxmox configuration +_proxmox = _config.get('proxmox', {}) +PROXMOX_ENABLED = _proxmox.get('enabled', True) +PROXMOX_NODES = _proxmox.get('nodes', []) +PROXMOX_API_TOKEN = _proxmox.get('api_token', '') +PROXMOX_API_SECRET = _proxmox.get('api_secret', '') + +# PBS configuration +_pbs = _config.get('pbs', {}) +PBS_ENABLED = _pbs.get('enabled', True) +PBS_URL = _pbs.get('url', '') +PBS_API_TOKEN = _pbs.get('api_token', '') +PBS_API_SECRET = _pbs.get('api_secret', '') + +# OPNsense configuration +_opnsense = _config.get('opnsense', {}) +OPNSENSE_URL = _opnsense.get('url', '') +OPNSENSE_API_KEY = _opnsense.get('api_key', '') +OPNSENSE_API_SECRET = _opnsense.get('api_secret', '') + +# Prometheus configuration +PROMETHEUS_URL = _config.get('prometheus', {}).get('url', '') + +# Camera configuration +_cameras = _config.get('cameras', {}) +CAMERAS_ENABLED = _cameras.get('enabled', False) +GO2RTC_URL = _cameras.get('go2rtc_url', '') +CAMERAS = _cameras.get('streams', []) + +# Sabnzbd configuration +_sabnzbd = _config.get('sabnzbd', {}) +SABNZBD_URL = _sabnzbd.get('url', '') +SABNZBD_API_KEY = _sabnzbd.get('api_key', '') + +# Uptime Kuma configuration +_uptime = _config.get('uptime_kuma', {}) +UPTIME_KUMA_URL = _uptime.get('url', '') +UPTIME_KUMA_STATUS_PAGE = _uptime.get('status_page', 'default') + +# Docker hosts +_docker = _config.get('docker', {}) +DOCKER_ENABLED = _docker.get('enabled', False) +DOCKER_HOSTS = _docker.get('hosts', []) + +# Categories +CATEGORIES = _config.get('categories', { + "Infrastructure": {"color": "blue", "icon": "server"}, + "Media": {"color": "purple", "icon": "film"}, + "Monitoring": {"color": "amber", "icon": "chart"}, + "Apps": {"color": "emerald", "icon": "cog"}, + "Home": {"color": "cyan", "icon": "home"}, +}) + +# Service groups +SERVICE_GROUPS = _config.get('service_groups', {}) + +# Load services from config +def _load_services() -> List[Service]: + """Load services from YAML config.""" + services = [] + for svc in _config.get('services', []): + health_check = None + if 'health_check' in svc: + health_check = svc['health_check'] + + services.append(Service( + name=svc['name'], + url=svc['url'], + ip=svc['ip'], + port=svc['port'], + category=svc['category'], + icon=svc.get('icon', 'server'), + favorite=svc.get('favorite', False), + critical=svc.get('critical', False), + group=svc.get('group'), + health_check=health_check, + )) + return services + +SERVICES = _load_services() + +# Build SERVICE_CHECK_OVERRIDES from health_check configs +SERVICE_CHECK_OVERRIDES = {} +for svc in SERVICES: + if svc.health_check: + SERVICE_CHECK_OVERRIDES[svc.name] = ( + svc.health_check.get('url', f"http://{svc.ip}:{svc.port}/"), + svc.health_check.get('timeout', 5.0) + ) + +# ============================================================ +# Settings (user preferences, stored in settings.json) +# ============================================================ + +DEFAULT_SETTINGS = { + "refresh_interval": REFRESH_INTERVAL, + "theme": _config.get('dashboard', {}).get('theme', 'dark'), + "favorites": _config.get('favorites', []), + "collapsed_categories": [], + "show_response_times": _config.get('dashboard', {}).get('show_response_times', True), + "show_icons": _config.get('dashboard', {}).get('show_icons', True), +} + +def load_settings(): + try: + if os.path.exists(SETTINGS_FILE): + with open(SETTINGS_FILE, 'r') as f: + return {**DEFAULT_SETTINGS, **json.load(f)} + except: + pass + return DEFAULT_SETTINGS.copy() + +def save_settings(settings): + try: + with open(SETTINGS_FILE, 'w') as f: + json.dump(settings, f, indent=2) + return True + except: + return False + +# ============================================================ +# Helper Functions +# ============================================================ + +def get_services_by_category(): + categories = {} + for service in SERVICES: + if service.category not in categories: + categories[service.category] = [] + categories[service.category].append(service) + return categories + +def get_favorites(): + settings = load_settings() + fav_names = settings.get("favorites", []) + return [s for s in SERVICES if s.name in fav_names or s.favorite] + +def get_critical_services(): + return [s for s in SERVICES if s.critical] + +def get_service_icon(icon_name): + return SERVICE_ICONS.get(icon_name, SERVICE_ICONS["server"]) + +def reload_config(): + """Reload configuration from files (for runtime updates).""" + global _config, SERVICES, SERVICE_CHECK_OVERRIDES, CATEGORIES, SERVICE_GROUPS + global PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET + global PBS_URL, PBS_API_TOKEN, PBS_API_SECRET + global GO2RTC_URL, CAMERAS, DOCKER_HOSTS + + _config = load_config() + SERVICES = _load_services() + + # Rebuild overrides + SERVICE_CHECK_OVERRIDES.clear() + for svc in SERVICES: + if svc.health_check: + SERVICE_CHECK_OVERRIDES[svc.name] = ( + svc.health_check.get('url', f"http://{svc.ip}:{svc.port}/"), + svc.health_check.get('timeout', 5.0) + ) + + # Update other config values + _proxmox = _config.get('proxmox', {}) + PROXMOX_NODES = _proxmox.get('nodes', []) + PROXMOX_API_TOKEN = _proxmox.get('api_token', '') + PROXMOX_API_SECRET = _proxmox.get('api_secret', '') + + _pbs = _config.get('pbs', {}) + PBS_URL = _pbs.get('url', '') + PBS_API_TOKEN = _pbs.get('api_token', '') + PBS_API_SECRET = _pbs.get('api_secret', '') + + _cameras = _config.get('cameras', {}) + GO2RTC_URL = _cameras.get('go2rtc_url', '') + CAMERAS = _cameras.get('streams', []) + + DOCKER_HOSTS = _config.get('docker', {}).get('hosts', []) + CATEGORIES = _config.get('categories', CATEGORIES) + SERVICE_GROUPS = _config.get('service_groups', {}) diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..9377d62 --- /dev/null +++ b/app/main.py @@ -0,0 +1,298 @@ +"""Enhanced FastAPI dashboard v2 with all integrations.""" +from fastapi import FastAPI, Request, Form +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse, RedirectResponse +import httpx +from datetime import datetime + +from app.config import ( + SERVICES, PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET, + PBS_URL, PBS_API_TOKEN, PBS_API_SECRET, + CATEGORIES, GO2RTC_URL, CAMERAS, SABNZBD_URL, SABNZBD_API_KEY, + UPTIME_KUMA_URL, UPTIME_KUMA_STATUS_PAGE, DOCKER_HOSTS, + PROMETHEUS_URL, SERVICE_ICONS, SERVICE_GROUPS, + get_services_by_category, get_favorites, get_critical_services, get_service_icon, + load_settings, save_settings, DEFAULT_SETTINGS +) +from app.services.health import ( + check_all_services, get_all_proxmox_metrics, get_camera_list, + get_sabnzbd_queue, get_uptime_kuma_status, get_docker_containers, + get_docker_container_counts, get_pbs_status, get_storage_pools, + get_recent_events, get_cluster_uptime, get_prometheus_metrics +) + +app = FastAPI(title="DeathStar Dashboard", version="2.0.0") +app.mount("/static", StaticFiles(directory="static"), name="static") +templates = Jinja2Templates(directory="app/templates") + +# Add custom filters +templates.env.globals["get_service_icon"] = get_service_icon +templates.env.globals["SERVICE_ICONS"] = SERVICE_ICONS + +last_check = {"time": None} + +@app.get("/", response_class=HTMLResponse) +async def dashboard(request: Request): + """Main dashboard page.""" + services_status = await check_all_services(SERVICES) + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + docker_counts = await get_docker_container_counts(DOCKER_HOSTS) + settings = load_settings() + + online_count = sum(1 for s in services_status.values() if s.status == "online") + total_count = len(services_status) + critical_down = [s.name for s in get_critical_services() + if services_status.get(s.name) and services_status[s.name].status != "online"] + cluster_uptime = get_cluster_uptime(nodes_status) + + last_check["time"] = datetime.now() + + return templates.TemplateResponse("dashboard.html", { + "request": request, + "services_by_category": get_services_by_category(), + "services_status": services_status, + "nodes_status": nodes_status, + "favorites": get_favorites(), + "categories": CATEGORIES, + "online_count": online_count, + "total_count": total_count, + "critical_down": critical_down, + "last_check": last_check["time"].strftime("%H:%M:%S") if last_check["time"] else "Never", + "cameras": CAMERAS, + "go2rtc_url": GO2RTC_URL, + "docker_counts": docker_counts, + "cluster_uptime": cluster_uptime, + "settings": settings, + "service_groups": SERVICE_GROUPS, + }) + +@app.get("/health") +async def health(): + return {"status": "healthy"} + +@app.get("/api/services", response_class=HTMLResponse) +async def api_services(request: Request): + """HTMX endpoint for services.""" + services_status = await check_all_services(SERVICES) + return templates.TemplateResponse("partials/services.html", { + "request": request, + "services_by_category": get_services_by_category(), + "services_status": services_status, + "categories": CATEGORIES, + "service_groups": SERVICE_GROUPS, + }) + +@app.get("/api/nodes", response_class=HTMLResponse) +async def api_nodes(request: Request): + """HTMX endpoint for Proxmox nodes.""" + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + cluster_uptime = get_cluster_uptime(nodes_status) + return templates.TemplateResponse("partials/nodes.html", { + "request": request, + "nodes_status": nodes_status, + "cluster_uptime": cluster_uptime, + }) + +@app.get("/api/nodes-expanded", response_class=HTMLResponse) +async def api_nodes_expanded(request: Request): + """HTMX endpoint for expanded Proxmox nodes with VMs/containers.""" + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + return templates.TemplateResponse("partials/nodes_expanded.html", { + "request": request, + "nodes_status": nodes_status, + }) + +@app.get("/api/favorites", response_class=HTMLResponse) +async def api_favorites(request: Request): + """HTMX endpoint for favorites.""" + services_status = await check_all_services(SERVICES) + return templates.TemplateResponse("partials/favorites.html", { + "request": request, + "favorites": get_favorites(), + "services_status": services_status, + }) + +@app.get("/api/cameras", response_class=HTMLResponse) +async def api_cameras(request: Request): + """HTMX endpoint for cameras.""" + return templates.TemplateResponse("partials/cameras.html", { + "request": request, + "cameras": CAMERAS, + "go2rtc_url": GO2RTC_URL, + }) + +@app.get("/api/downloads", response_class=HTMLResponse) +async def api_downloads(request: Request): + """HTMX endpoint for downloads.""" + queue = await get_sabnzbd_queue(SABNZBD_URL, SABNZBD_API_KEY) + return templates.TemplateResponse("partials/downloads.html", { + "request": request, + "queue": queue, + }) + +@app.get("/api/status-banner", response_class=HTMLResponse) +async def api_status_banner(request: Request): + """HTMX endpoint for status banner.""" + services_status = await check_all_services(SERVICES) + critical_down = [s.name for s in get_critical_services() + if services_status.get(s.name) and services_status[s.name].status != "online"] + online_count = sum(1 for s in services_status.values() if s.status == "online") + last_check["time"] = datetime.now() + + return templates.TemplateResponse("partials/status_banner.html", { + "request": request, + "critical_down": critical_down, + "online_count": online_count, + "total_count": len(services_status), + "last_check": last_check["time"].strftime("%H:%M:%S"), + }) + +@app.get("/api/docker", response_class=HTMLResponse) +async def api_docker(request: Request): + """HTMX endpoint for Docker containers.""" + containers = await get_docker_containers(DOCKER_HOSTS) + counts = await get_docker_container_counts(DOCKER_HOSTS) + return templates.TemplateResponse("partials/docker.html", { + "request": request, + "containers": containers, + "hosts": DOCKER_HOSTS, + "counts": counts, + }) + +@app.get("/api/uptime", response_class=HTMLResponse) +async def api_uptime(request: Request): + """HTMX endpoint for Uptime Kuma status.""" + uptime_data = await get_uptime_kuma_status(UPTIME_KUMA_URL, UPTIME_KUMA_STATUS_PAGE) + return templates.TemplateResponse("partials/uptime.html", { + "request": request, + "uptime": uptime_data, + "uptime_kuma_url": UPTIME_KUMA_URL, + }) + +@app.get("/api/pbs", response_class=HTMLResponse) +async def api_pbs(request: Request): + """HTMX endpoint for PBS status.""" + pbs_status = await get_pbs_status(PBS_URL, PBS_API_TOKEN, PBS_API_SECRET) + return templates.TemplateResponse("partials/pbs.html", { + "request": request, + "pbs": pbs_status, + }) + +@app.get("/api/storage", response_class=HTMLResponse) +async def api_storage(request: Request): + """HTMX endpoint for storage pools.""" + pools = await get_storage_pools(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + return templates.TemplateResponse("partials/storage.html", { + "request": request, + "pools": pools, + }) + +@app.get("/api/events", response_class=HTMLResponse) +async def api_events(request: Request): + """HTMX endpoint for recent events.""" + events = get_recent_events() + return templates.TemplateResponse("partials/events.html", { + "request": request, + "events": events[-10:], + }) + +@app.get("/api/camera-snapshot/{camera}") +async def camera_snapshot(camera: str): + """Proxy camera snapshot from go2rtc.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(f"{GO2RTC_URL}/api/frame.jpeg?src={camera}") + if response.status_code == 200: + return StreamingResponse( + iter([response.content]), + media_type="image/jpeg", + headers={"Cache-Control": "no-cache"} + ) + except: + pass + +@app.post("/api/webrtc") +async def webrtc_proxy(request: Request, src: str): + """Proxy WebRTC offers to go2rtc""" + try: + body = await request.body() + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post( + f"{GO2RTC_URL}/api/webrtc?src={src}", + content=body, + headers={"Content-Type": "application/sdp"} + ) + from fastapi import Response + return Response( + content=response.content, + status_code=response.status_code, + media_type="application/sdp" + ) + except Exception as e: + from fastapi import Response + return Response(content=str(e), status_code=500) + + return StreamingResponse(iter([b""]), media_type="image/jpeg") + +@app.get("/api/status") +async def api_status_json(): + """JSON endpoint for all status data.""" + services_status = await check_all_services(SERVICES) + nodes_status = await get_all_proxmox_metrics(PROXMOX_NODES, PROXMOX_API_TOKEN, PROXMOX_API_SECRET) + + return { + "services": {name: {"status": s.status, "response_time_ms": s.response_time_ms} + for name, s in services_status.items()}, + "nodes": [{"name": n.name, "status": n.status, "cpu": n.cpu_percent, + "memory": n.memory_percent, "disk": n.disk_percent, "uptime_hours": n.uptime_hours} + for n in nodes_status] + } + +# Settings page +@app.get("/cameras", response_class=HTMLResponse) +async def cameras_page(request: Request): + """Full camera viewer page - mobile friendly.""" + from datetime import datetime + return templates.TemplateResponse("cameras.html", { + "request": request, + "cameras": CAMERAS, + "go2rtc_url": GO2RTC_URL, + "now": int(datetime.now().timestamp()), + }) + +@app.get("/settings", response_class=HTMLResponse) +async def settings_page(request: Request): + """Settings page.""" + settings = load_settings() + return templates.TemplateResponse("settings.html", { + "request": request, + "settings": settings, + "all_services": SERVICES, + "categories": CATEGORIES, + }) + +@app.post("/settings") +async def save_settings_handler(request: Request): + """Save settings.""" + form = await request.form() + settings = load_settings() + + # Update settings from form + settings["refresh_interval"] = int(form.get("refresh_interval", 30)) + settings["theme"] = form.get("theme", "dark") + settings["show_response_times"] = form.get("show_response_times") == "on" + settings["show_icons"] = form.get("show_icons") == "on" + + # Handle favorites (multi-select) + favorites = form.getlist("favorites") + if favorites: + settings["favorites"] = favorites + + save_settings(settings) + return RedirectResponse(url="/settings?saved=1", status_code=303) + +@app.get("/api/settings", response_class=JSONResponse) +async def get_settings_api(): + """Get settings as JSON.""" + return load_settings() diff --git a/app/routers/__init__.py b/app/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/services/__init__.py b/app/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/services/health.py b/app/services/health.py new file mode 100644 index 0000000..167ccdd --- /dev/null +++ b/app/services/health.py @@ -0,0 +1,391 @@ +"""Enhanced services module v2 with PBS, VM/LXC, storage pools, events.""" +import asyncio +from typing import Dict, Any, Optional, List +import httpx +from dataclasses import dataclass, field +from datetime import datetime +from collections import deque + +@dataclass +class HealthStatus: + name: str + status: str + response_time_ms: Optional[float] = None + error: Optional[str] = None + +@dataclass +class NodeStatus: + name: str + ip: str + status: str + cpu_percent: Optional[float] = None + memory_percent: Optional[float] = None + memory_used_gb: Optional[float] = None + memory_total_gb: Optional[float] = None + disk_percent: Optional[float] = None + uptime_hours: Optional[float] = None + vms: List[Dict] = field(default_factory=list) + containers: List[Dict] = field(default_factory=list) + +@dataclass +class DockerContainer: + name: str + status: str + state: str + image: str + host: str + +@dataclass +class UptimeMonitor: + id: int + name: str + status: int + ping: Optional[int] = None + heartbeats: Optional[List[Dict]] = None + +@dataclass +class PBSStatus: + status: str + datastore_usage: List[Dict] = field(default_factory=list) + last_backup: Optional[str] = None + total_size_gb: float = 0 + used_size_gb: float = 0 + +@dataclass +class StoragePool: + name: str + node: str + total_gb: float + used_gb: float + avail_gb: float + percent_used: float + pool_type: str + +@dataclass +class StatusEvent: + timestamp: datetime + service: str + old_status: str + new_status: str + +# Recent events storage (in-memory, last 20) +recent_events: deque = deque(maxlen=20) +last_status_cache: Dict[str, str] = {} + +SERVICE_CHECK_OVERRIDES = { + "OPNsense": ("https://192.168.1.1:8443/", 10.0), + "Vaultwarden": ("https://vault.deathstar-home.one/", 5.0), + "Immich": ("http://192.168.1.54:2283/", 5.0), +} + +async def check_service(client: httpx.AsyncClient, service) -> HealthStatus: + """Check if a service is reachable.""" + global last_status_cache, recent_events + + if service.name in SERVICE_CHECK_OVERRIDES: + check_url, timeout = SERVICE_CHECK_OVERRIDES[service.name] + else: + https_ports = [443, 8006, 8007, 8443, 9443] + scheme = "https" if service.port in https_ports else "http" + check_url = f"{scheme}://{service.ip}:{service.port}/" + timeout = 5.0 + + start = asyncio.get_event_loop().time() + try: + response = await client.get(check_url, timeout=timeout, follow_redirects=True) + elapsed = (asyncio.get_event_loop().time() - start) * 1000 + new_status = "online" if response.status_code < 500 else "degraded" + result = HealthStatus(name=service.name, status=new_status, response_time_ms=round(elapsed, 1)) + except: + new_status = "offline" + result = HealthStatus(name=service.name, status="offline") + + # Track status changes + old_status = last_status_cache.get(service.name) + if old_status and old_status != new_status: + recent_events.append(StatusEvent( + timestamp=datetime.now(), + service=service.name, + old_status=old_status, + new_status=new_status + )) + last_status_cache[service.name] = new_status + + return result + +async def check_all_services(services) -> Dict[str, HealthStatus]: + """Check all services concurrently.""" + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + tasks = [check_service(client, s) for s in services] + results = await asyncio.gather(*tasks) + return {r.name: r for r in results} + +async def get_proxmox_node_metrics(client: httpx.AsyncClient, node: Dict, token: str, secret: str) -> NodeStatus: + """Get Proxmox node metrics including VMs and containers.""" + base_url = f"https://{node['ip']}:{node['port']}/api2/json" + headers = {"Authorization": f"PVEAPIToken={token}={secret}"} + + result = NodeStatus(name=node["name"], ip=node["ip"], status="offline") + + try: + # Get node status + response = await client.get(f"{base_url}/nodes/{node['name']}/status", headers=headers, timeout=5.0) + if response.status_code == 200: + data = response.json()["data"] + cpu = data.get("cpu", 0) * 100 + mem_used = data.get("memory", {}).get("used", 0) + mem_total = data.get("memory", {}).get("total", 1) + mem_pct = (mem_used / mem_total) * 100 if mem_total else 0 + disk_used = data.get("rootfs", {}).get("used", 0) + disk_total = data.get("rootfs", {}).get("total", 1) + disk_pct = (disk_used / disk_total) * 100 if disk_total else 0 + uptime_sec = data.get("uptime", 0) + + result.status = "online" + result.cpu_percent = round(cpu, 1) + result.memory_percent = round(mem_pct, 1) + result.memory_used_gb = round(mem_used / (1024**3), 1) + result.memory_total_gb = round(mem_total / (1024**3), 1) + result.disk_percent = round(disk_pct, 1) + result.uptime_hours = round(uptime_sec / 3600, 1) + + # Get VMs + vm_response = await client.get(f"{base_url}/nodes/{node['name']}/qemu", headers=headers, timeout=5.0) + if vm_response.status_code == 200: + for vm in vm_response.json().get("data", []): + result.vms.append({ + "vmid": vm.get("vmid"), + "name": vm.get("name", f"VM {vm.get('vmid')}"), + "status": vm.get("status"), + "mem": round(vm.get("mem", 0) / (1024**3), 1) if vm.get("mem") else 0, + "cpu": round(vm.get("cpu", 0) * 100, 1) if vm.get("cpu") else 0, + }) + + # Get containers + ct_response = await client.get(f"{base_url}/nodes/{node['name']}/lxc", headers=headers, timeout=5.0) + if ct_response.status_code == 200: + for ct in ct_response.json().get("data", []): + result.containers.append({ + "vmid": ct.get("vmid"), + "name": ct.get("name", f"CT {ct.get('vmid')}"), + "status": ct.get("status"), + "mem": round(ct.get("mem", 0) / (1024**3), 1) if ct.get("mem") else 0, + "cpu": round(ct.get("cpu", 0) * 100, 1) if ct.get("cpu") else 0, + }) + except: + pass + + return result + +async def get_all_proxmox_metrics(nodes, token: str, secret: str) -> List[NodeStatus]: + """Get metrics for all Proxmox nodes.""" + async with httpx.AsyncClient(verify=False) as client: + tasks = [get_proxmox_node_metrics(client, n, token, secret) for n in nodes] + return await asyncio.gather(*tasks) + +async def get_pbs_status(url: str, token: str, secret: str) -> PBSStatus: + """Get PBS backup server status.""" + result = PBSStatus(status="offline") + headers = {"Authorization": f"PBSAPIToken={token}:{secret}"} + + try: + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + # Get datastore status + ds_response = await client.get(f"{url}/api2/json/status/datastore-usage", headers=headers) + if ds_response.status_code == 200: + result.status = "online" + for ds in ds_response.json().get("data", []): + total = ds.get("total", 0) + used = ds.get("used", 0) + result.datastore_usage.append({ + "name": ds.get("store"), + "total_gb": round(total / (1024**3), 1), + "used_gb": round(used / (1024**3), 1), + "percent": round((used / total) * 100, 1) if total else 0, + }) + result.total_size_gb += total / (1024**3) + result.used_size_gb += used / (1024**3) + + # Try to get last backup task + tasks_response = await client.get(f"{url}/api2/json/nodes/localhost/tasks", headers=headers) + if tasks_response.status_code == 200: + tasks = tasks_response.json().get("data", []) + backup_tasks = [t for t in tasks if t.get("type") == "backup"] + if backup_tasks: + last = backup_tasks[0] + result.last_backup = datetime.fromtimestamp(last.get("starttime", 0)).strftime("%Y-%m-%d %H:%M") + except: + pass + + return result + +async def get_storage_pools(nodes, token: str, secret: str) -> List[StoragePool]: + """Get storage pool info from all Proxmox nodes.""" + pools = [] + headers = {"Authorization": f"PVEAPIToken={token}={secret}"} + + async with httpx.AsyncClient(verify=False, timeout=10.0) as client: + for node in nodes: + try: + url = f"https://{node['ip']}:{node['port']}/api2/json/nodes/{node['name']}/storage" + response = await client.get(url, headers=headers) + if response.status_code == 200: + for storage in response.json().get("data", []): + if storage.get("enabled") and storage.get("total"): + total = storage.get("total", 0) + used = storage.get("used", 0) + avail = storage.get("avail", 0) + pools.append(StoragePool( + name=storage.get("storage"), + node=node["name"], + total_gb=round(total / (1024**3), 1), + used_gb=round(used / (1024**3), 1), + avail_gb=round(avail / (1024**3), 1), + percent_used=round((used / total) * 100, 1) if total else 0, + pool_type=storage.get("type", "unknown"), + )) + except: + pass + + return pools + +async def get_docker_containers(hosts: List[Dict]) -> List[DockerContainer]: + """Get Docker containers via docker-socket-proxy.""" + containers = [] + async with httpx.AsyncClient(timeout=5.0) as client: + for host in hosts: + try: + url = f"http://{host['ip']}:{host['port']}/containers/json?all=true" + response = await client.get(url) + if response.status_code == 200: + for c in response.json(): + name = c.get("Names", ["/unknown"])[0].lstrip("/") + if name == "docker-socket-proxy": + continue + containers.append(DockerContainer( + name=name, + status=c.get("Status", ""), + state=c.get("State", "unknown"), + image=c.get("Image", "").split("/")[-1].split(":")[0], + host=host["name"] + )) + except: + pass + return containers + +async def get_docker_container_counts(hosts: List[Dict]) -> Dict[str, int]: + """Get container counts per host.""" + counts = {} + async with httpx.AsyncClient(timeout=5.0) as client: + for host in hosts: + try: + url = f"http://{host['ip']}:{host['port']}/containers/json" + response = await client.get(url) + if response.status_code == 200: + # Subtract 1 for docker-socket-proxy + count = len([c for c in response.json() if "docker-socket-proxy" not in c.get("Names", [""])[0]]) + counts[host["name"]] = count + except: + counts[host["name"]] = 0 + return counts + +async def get_uptime_kuma_status(url: str, status_page: str = "uptime") -> Dict: + """Get Uptime Kuma status.""" + result = {"monitors": [], "summary": {"up": 0, "down": 0, "total": 0}} + try: + async with httpx.AsyncClient(timeout=5.0) as client: + hb_response = await client.get(f"{url}/api/status-page/heartbeat/{status_page}") + info_response = await client.get(f"{url}/api/status-page/{status_page}") + + if hb_response.status_code == 200 and info_response.status_code == 200: + heartbeats = hb_response.json().get("heartbeatList", {}) + info = info_response.json() + + for group in info.get("publicGroupList", []): + for monitor in group.get("monitorList", []): + monitor_id = str(monitor.get("id")) + monitor_heartbeats = heartbeats.get(monitor_id, []) + latest_status = 0 + latest_ping = None + if monitor_heartbeats: + latest = monitor_heartbeats[-1] + latest_status = latest.get("status", 0) + latest_ping = latest.get("ping") + recent_hb = monitor_heartbeats[-20:] if monitor_heartbeats else [] + result["monitors"].append(UptimeMonitor( + id=monitor.get("id"), + name=monitor.get("name"), + status=latest_status, + ping=latest_ping, + heartbeats=[{"status": h.get("status", 0), "ping": h.get("ping")} for h in recent_hb] + )) + if latest_status == 1: + result["summary"]["up"] += 1 + else: + result["summary"]["down"] += 1 + result["summary"]["total"] += 1 + except: + pass + return result + +async def get_prometheus_metrics(url: str, queries: Dict[str, str]) -> Dict[str, Any]: + """Query Prometheus for metrics.""" + results = {} + try: + async with httpx.AsyncClient(timeout=5.0) as client: + for name, query in queries.items(): + response = await client.get(f"{url}/api/v1/query", params={"query": query}) + if response.status_code == 200: + data = response.json().get("data", {}).get("result", []) + if data: + results[name] = float(data[0].get("value", [0, 0])[1]) + except: + pass + return results + +async def get_camera_list(go2rtc_url: str) -> List[str]: + """Get camera list from go2rtc.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(f"{go2rtc_url}/api/streams") + if response.status_code == 200: + return list(response.json().keys()) + except: + pass + return [] + +async def get_sabnzbd_queue(url: str, api_key: str = "") -> Dict: + """Get Sabnzbd download queue.""" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + params = {"mode": "queue", "output": "json"} + if api_key: + params["apikey"] = api_key + response = await client.get(f"{url}/api", params=params) + if response.status_code == 200: + data = response.json().get("queue", {}) + return { + "speed": data.get("speed", "0 B/s"), + "size_left": data.get("sizeleft", "0 B"), + "eta": data.get("timeleft", "Unknown"), + "downloading": len(data.get("slots", [])), + "items": [ + {"name": s.get("filename", "Unknown")[:40], "progress": float(s.get("percentage", 0))} + for s in data.get("slots", [])[:3] + ] + } + except: + pass + return {"speed": "N/A", "downloading": 0, "items": []} + +def get_recent_events() -> List[StatusEvent]: + """Get recent status change events.""" + return list(recent_events) + +def get_cluster_uptime(nodes: List[NodeStatus]) -> float: + """Calculate total cluster uptime in hours.""" + total = 0 + for node in nodes: + if node.uptime_hours: + total += node.uptime_hours + return round(total, 1) diff --git a/app/templates/base.html b/app/templates/base.html new file mode 100644 index 0000000..9eca0d7 --- /dev/null +++ b/app/templates/base.html @@ -0,0 +1,145 @@ + + + + + + DeathStar Homelab + + + + + + +
+ {% block content %}{% endblock %} +
+ + + + + + + diff --git a/app/templates/cameras.html b/app/templates/cameras.html new file mode 100644 index 0000000..bc26f87 --- /dev/null +++ b/app/templates/cameras.html @@ -0,0 +1,322 @@ +{% extends "base.html" %} +{% block content %} +
+ +
+
+ + + + + +

Cameras

+ {{ cameras | length }} cameras +
+
+ +
+ + +
+ + +
+
+ + +
+ {% for camera in cameras %} +
+
+ + + +
+ + +
+ + + LIVE + +
+
+ +
+ {{ camera | replace("_", " ") }} +
+ + +
+
+
+ {% endfor %} +
+
+ + + + + + + +{% endblock %} diff --git a/app/templates/dashboard.html b/app/templates/dashboard.html new file mode 100644 index 0000000..d275fd9 --- /dev/null +++ b/app/templates/dashboard.html @@ -0,0 +1,134 @@ +{% extends "base.html" %} + +{% block content %} + +
+
+
+

DeathStar Homelab

+ + + + + +
+
+ + {{ online_count }}/{{ total_count }} online + Updated: {{ last_check }} +
+
+
+ {% include "partials/status_banner.html" %} +
+
+ + +
+
+ +

Quick Access

+
+
+ {% include "partials/favorites.html" %} +
+
+ + +
+
+ +

Proxmox Cluster: NewHope

+ {{ cluster_uptime }}h total uptime +
+
+ {% include "partials/nodes.html" %} +
+
+ + +
+ +
+
+ {% include "partials/services.html" %} +
+
+ + +
+ +
+
+ +

Backups

+
+
+
Loading...
+
+
+ + +
+
+ +

Recent Events

+
+
+
Loading...
+
+
+ + +
+
+ +

Downloads

+
+
+ {% include "partials/downloads.html" %} +
+
+ + +
+
+ +

Cameras

+
+
+ {% include "partials/cameras.html" %} +
+
+ + +
+
+ +

Docker

+ {% for name, count in docker_counts.items() %}{{ count }}{% if not loop.last %}+{% endif %}{% endfor %} containers +
+
+
Loading...
+
+
+ + +
+
+ +

Uptime

+
+
+
Loading...
+
+
+
+
+{% endblock %} diff --git a/app/templates/partials/cameras.html b/app/templates/partials/cameras.html new file mode 100644 index 0000000..69fb794 --- /dev/null +++ b/app/templates/partials/cameras.html @@ -0,0 +1,20 @@ +
+ {% for camera in cameras[:4] %} +
+ {{ camera }} + +
{{ camera }}
+
+ {% endfor %} +
+ + View all {{ cameras | length }} cameras + diff --git a/app/templates/partials/docker.html b/app/templates/partials/docker.html new file mode 100644 index 0000000..a988848 --- /dev/null +++ b/app/templates/partials/docker.html @@ -0,0 +1,20 @@ +
+ {% for host in hosts %} +
+
+ {{ host.name }} + {{ counts.get(host.name, 0) }} running +
+
+ {% for container in containers if container.host == host.name %} +
+ + {{ container.name }} +
+ {% else %} + No containers + {% endfor %} +
+
+ {% endfor %} +
diff --git a/app/templates/partials/downloads.html b/app/templates/partials/downloads.html new file mode 100644 index 0000000..ab19634 --- /dev/null +++ b/app/templates/partials/downloads.html @@ -0,0 +1,40 @@ +
+ {% if queue and queue.downloading > 0 %} +
+ {{ queue.downloading }} active + {{ queue.speed }} +
+ + {% if queue.items %} +
+ {% for item in queue.items %} +
+
+ {{ item.name }} + {{ item.progress | round(1) }}% +
+
+
+
+
+ {% endfor %} +
+ {% endif %} + + {% if queue.eta and queue.eta != 'Unknown' %} +
+ ETA: {{ queue.eta }} + {{ queue.size_left }} left +
+ {% endif %} + {% else %} +
+ + No active downloads +
+ {% endif %} + + + Open Sabnzbd + +
diff --git a/app/templates/partials/events.html b/app/templates/partials/events.html new file mode 100644 index 0000000..e0eca24 --- /dev/null +++ b/app/templates/partials/events.html @@ -0,0 +1,16 @@ +
+ {% if events %} +
+ {% for event in events | reverse %} +
+ + {{ event.service }} + {{ event.new_status }} + {{ event.timestamp.strftime('%H:%M') }} +
+ {% endfor %} +
+ {% else %} +
No recent events
+ {% endif %} +
diff --git a/app/templates/partials/favorites.html b/app/templates/partials/favorites.html new file mode 100644 index 0000000..cf036c5 --- /dev/null +++ b/app/templates/partials/favorites.html @@ -0,0 +1,13 @@ +
+ {% for service in favorites %} + {% set status = services_status.get(service.name) %} + + + {{ service.name }} + {% if status and status.response_time_ms %} + {{ status.response_time_ms }}ms + {% endif %} + + {% endfor %} +
diff --git a/app/templates/partials/nodes.html b/app/templates/partials/nodes.html new file mode 100644 index 0000000..b2c8b64 --- /dev/null +++ b/app/templates/partials/nodes.html @@ -0,0 +1,66 @@ +
+ {% for node in nodes_status %} +
+
+
+ + {{ node.name }} +
+ {% if node.uptime_hours %} + {{ (node.uptime_hours / 24) | round(1) }}d + {% endif %} +
+ + {% if node.status == 'online' %} +
+
+ CPU + {{ node.cpu_percent | default(0) | round(1) }}% +
+
+
+
+
+ RAM + {% if node.memory_used_gb %}{{ node.memory_used_gb }}{% else %}?{% endif %}/{% if node.memory_total_gb %}{{ node.memory_total_gb }}{% else %}?{% endif %}GB +
+
+
+
+
+ Disk + {{ node.disk_percent | default(0) | round(1) }}% +
+
+
+ + + + {% else %} +
Offline
+ {% endif %} +
+ {% endfor %} +
+{% if cluster_uptime %} +
Cluster total: {{ cluster_uptime }}h
+{% endif %} diff --git a/app/templates/partials/pbs.html b/app/templates/partials/pbs.html new file mode 100644 index 0000000..56f5514 --- /dev/null +++ b/app/templates/partials/pbs.html @@ -0,0 +1,31 @@ +
+ {% if pbs.status == 'online' %} +
+ Online + {% if pbs.last_backup %} + Last: {{ pbs.last_backup }} + {% endif %} +
+ {% if pbs.datastore_usage %} +
+ {% for ds in pbs.datastore_usage %} +
+
+ {{ ds.name }} + {{ ds.used_gb }}/{{ ds.total_gb }}GB +
+
+
+
+
+ {% endfor %} +
+ {% endif %} + {% else %} +
+ + PBS Offline +
+ {% endif %} + Open PBS +
diff --git a/app/templates/partials/services.html b/app/templates/partials/services.html new file mode 100644 index 0000000..0cda695 --- /dev/null +++ b/app/templates/partials/services.html @@ -0,0 +1,60 @@ +{% for category_name, category_services in services_by_category.items() %} +{% set cat_info = categories.get(category_name, {}) %} + +{% endfor %} diff --git a/app/templates/partials/status_banner.html b/app/templates/partials/status_banner.html new file mode 100644 index 0000000..2a67274 --- /dev/null +++ b/app/templates/partials/status_banner.html @@ -0,0 +1,17 @@ +{% if critical_down %} +
+
+ + + Critical services down: {{ critical_down | join(", ") }} + +
+ View details +
+{% else %} +
+ + All critical services operational + {{ online_count }}/{{ total_count }} services online +
+{% endif %} diff --git a/app/templates/partials/uptime.html b/app/templates/partials/uptime.html new file mode 100644 index 0000000..d95ca71 --- /dev/null +++ b/app/templates/partials/uptime.html @@ -0,0 +1,45 @@ +
+ {% if uptime.monitors %} +
+ {{ uptime.summary.up }}/{{ uptime.summary.total }} up + {% if uptime.summary.down > 0 %} + {{ uptime.summary.down }} down + {% endif %} +
+
+ {% for monitor in uptime.monitors[:8] %} +
+ + {{ monitor.name }} + + {% if monitor.heartbeats %} +
+ {% for hb in monitor.heartbeats[-12:] %} + {% set ping_height = ((hb.ping or 50) / 5) %} + {% if ping_height > 100 %}{% set ping_height = 100 %}{% endif %} + {% if ping_height < 20 %}{% set ping_height = 20 %}{% endif %} +
+ {% endfor %} +
+ {% endif %} + {% if monitor.ping %} + {{ monitor.ping }}ms + {% endif %} +
+ {% endfor %} +
+ {% if uptime.monitors | length > 8 %} +
+{{ uptime.monitors | length - 8 }} more
+ {% endif %} + {% else %} +
+ + No uptime data +
+ {% endif %} + + + Open Uptime Kuma + +
diff --git a/app/templates/settings.html b/app/templates/settings.html new file mode 100644 index 0000000..7853876 --- /dev/null +++ b/app/templates/settings.html @@ -0,0 +1,92 @@ + + + + + + Settings - DeathStar Homelab + + + + + +
+
+

Settings

+ Back to Dashboard +
+ + {% if request.query_params.get('saved') %} +
Settings saved successfully!
+ {% endif %} + +
+ +
+

General

+
+
+ + +
+
+ + +
+
+
+ + +
+

Display Options

+
+ + +
+
+ + +
+

Favorites

+

Select services to show in Quick Access

+
+ {% for service in all_services %} + + {% endfor %} +
+
+ +
+ Cancel + +
+
+
+ + diff --git a/config.yaml.example b/config.yaml.example new file mode 100644 index 0000000..8f6fc87 --- /dev/null +++ b/config.yaml.example @@ -0,0 +1,215 @@ +# ============================================================ +# Dashboard Configuration +# ============================================================ +# Copy this file to config.yaml and customize for your setup +# Sensitive values (API keys, secrets) go in secrets.yaml +# ============================================================ + +# Dashboard settings +dashboard: + title: "My Homelab" + refresh_interval: 30 # seconds + theme: dark # dark or light + show_response_times: true + show_icons: true + +# ============================================================ +# Proxmox Cluster Configuration +# ============================================================ +proxmox: + enabled: true + nodes: + - name: node1 + ip: 192.168.1.10 + port: 8006 + - name: node2 + ip: 192.168.1.11 + port: 8006 + # API credentials go in secrets.yaml + +# ============================================================ +# Proxmox Backup Server (PBS) +# ============================================================ +pbs: + enabled: true + url: https://192.168.1.20:8007 + # API credentials go in secrets.yaml + +# ============================================================ +# Camera Configuration (go2rtc) +# ============================================================ +cameras: + enabled: true + go2rtc_url: http://192.168.1.30:1985 + # List camera stream names as configured in go2rtc + streams: + - FrontDoor + - Backyard + - Driveway + +# ============================================================ +# Docker Hosts +# ============================================================ +docker: + enabled: true + hosts: + - name: docker1 + ip: 192.168.1.40 + port: 2375 # Docker API port (ensure it's secured!) + +# ============================================================ +# Uptime Kuma +# ============================================================ +uptime_kuma: + enabled: true + url: http://192.168.1.50:3001 + status_page: default # Status page slug + +# ============================================================ +# Categories +# ============================================================ +# Define service categories with colors +categories: + Infrastructure: + color: blue + icon: server + Media: + color: purple + icon: film + Monitoring: + color: amber + icon: chart + Apps: + color: emerald + icon: cog + Home: + color: cyan + icon: home + +# ============================================================ +# Services +# ============================================================ +# Define all services to monitor +# +# Fields: +# name: Display name +# url: URL to open when clicked (can be external domain) +# ip: Internal IP for health checks +# port: Port for health checks +# category: Must match a category defined above +# icon: Icon name (see available icons below) +# favorite: Show in favorites section (optional) +# critical: Mark as critical service (optional) +# group: Group related services together (optional) +# health_check: Custom health check config (optional) +# url: Override URL for health check +# timeout: Custom timeout in seconds +# +# Available icons: +# server, shield, globe, archive, lock, film, star, video, +# tv, music, search, download, cog, chart, heartbeat, key, +# git, workflow, book, image, brain, home, camera, message, database +# +services: + # Infrastructure + - name: Router + url: https://192.168.1.1 + ip: 192.168.1.1 + port: 443 + category: Infrastructure + icon: shield + critical: true + + - name: Reverse Proxy + url: https://192.168.1.2:81 + ip: 192.168.1.2 + port: 81 + category: Infrastructure + icon: globe + critical: true + + # Media + - name: Plex + url: https://plex.example.com + ip: 192.168.1.100 + port: 32400 + category: Media + icon: film + favorite: true + + - name: Radarr + url: https://radarr.example.com + ip: 192.168.1.101 + port: 7878 + category: Media + icon: film + group: Arr Stack + + - name: Sonarr + url: https://sonarr.example.com + ip: 192.168.1.102 + port: 8989 + category: Media + icon: tv + group: Arr Stack + + # Monitoring + - name: Grafana + url: https://grafana.example.com + ip: 192.168.1.150 + port: 3000 + category: Monitoring + icon: chart + favorite: true + + - name: Uptime Kuma + url: https://status.example.com + ip: 192.168.1.151 + port: 3001 + category: Monitoring + icon: heartbeat + + # Apps + - name: Vaultwarden + url: https://vault.example.com + ip: 192.168.1.200 + port: 80 + category: Apps + icon: key + favorite: true + health_check: + url: https://vault.example.com/ + timeout: 5 + + # Home Automation + - name: Home Assistant + url: https://ha.example.com + ip: 192.168.1.50 + port: 8123 + category: Home + icon: home + favorite: true + critical: true + +# ============================================================ +# Service Groups +# ============================================================ +# Group related services for organization +service_groups: + Arr Stack: + - Radarr + - Sonarr + - Prowlarr + - Sabnzbd + Media Players: + - Plex + - Jellyfin + +# ============================================================ +# Default favorites (can be customized in UI) +# ============================================================ +favorites: + - Plex + - Grafana + - Vaultwarden + - Home Assistant diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6e2e173 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.100.0 +uvicorn>=0.23.0 +httpx>=0.24.0 +jinja2>=3.1.0 +pyyaml>=6.0 diff --git a/secrets.yaml.example b/secrets.yaml.example new file mode 100644 index 0000000..6c18301 --- /dev/null +++ b/secrets.yaml.example @@ -0,0 +1,36 @@ +# ============================================================ +# Dashboard Secrets Configuration +# ============================================================ +# Copy this file to secrets.yaml and fill in your credentials +# IMPORTANT: Never commit secrets.yaml to version control! +# Add secrets.yaml to your .gitignore +# ============================================================ + +# Proxmox API credentials +# Create a token: Datacenter > Permissions > API Tokens +proxmox: + api_token: root@pam!dashboard + api_secret: your-proxmox-api-secret-here + +# Proxmox Backup Server credentials +# Create a token: Configuration > Access Control > API Token +pbs: + api_token: root@pam!dashboard + api_secret: your-pbs-api-secret-here + +# OPNsense API credentials (optional) +# System > Access > Users > [user] > API keys +opnsense: + api_key: your-opnsense-api-key + api_secret: your-opnsense-api-secret + +# Sabnzbd API key (optional) +# Config > General > API Key +sabnzbd: + api_key: your-sabnzbd-api-key + +# Additional service API keys as needed +# Add custom keys here following the pattern: +# service_name: +# api_key: your-key +# api_secret: your-secret