diff --git a/Dockerfile b/Dockerfile index ad1de54..fa11dcb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -78,7 +78,10 @@ RUN apk update --quiet && \ su-exec \ tini \ wget \ - shadow && \ + shadow \ + python3 \ + py3-flask \ + py3-supervisor && \ rm /var/cache/apk/* && \ rm -rf /etc/periodic /etc/crontabs/root && \ # Set SUID on crontab command so it can modify crontab files @@ -89,11 +92,16 @@ RUN apk update --quiet && \ (getent group | grep -q ":${DOCKER_GID}:" && addgroup docker || addgroup -g ${DOCKER_GID} docker) && \ # Create docker user and add to docker group adduser -S docker -D -G docker && \ - mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/crontabs && \ + mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/crontabs ${HOME_DIR}/data && \ chown -R docker:docker ${HOME_DIR} COPY --from=builder /usr/bin/rq/rq /usr/local/bin COPY entrypoint.sh /opt +COPY supervisord.conf /opt/crontab/ +COPY webapp/ /opt/crontab/webapp/ + +# Expose web UI port +EXPOSE 8080 ENTRYPOINT ["/sbin/tini", "--", "/opt/entrypoint.sh"] diff --git a/README.md b/README.md index e7b8983..17eda33 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,124 @@ A great project, don't get me wrong. It was just missing certain key enterprise - Run command in a container using `container`. - Ability to trigger scripts in other containers on completion cron job using `trigger`. - Ability to share settings between cron jobs using `~~shared-settings` as a key. +- **Web Dashboard UI** for monitoring and controlling cron jobs. + +## Web Dashboard + +The crontab container includes a built-in web dashboard for monitoring and managing your cron jobs. + +### Features + +- 📊 **Job Monitoring**: View all scheduled jobs with their current status +- 📅 **Schedule Information**: See when jobs last ran and when they'll run next +- 📝 **Execution History**: Browse past executions with timestamps and exit codes +- 🔍 **Log Viewer**: View stdout and stderr output from job executions +- ▶️ **Manual Triggering**: Run jobs on-demand with a single click +- 📈 **Dashboard Stats**: Overview of total jobs, failures, and recent activity +- 🔄 **Auto-Refresh**: Dashboard automatically updates every 30 seconds + +### Accessing the Web UI + +The web dashboard is available on port **8080** by default. + +**Docker Run:** + +```bash +docker run -d \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ./config.json:/opt/crontab/config.json:ro \ + -v crontab-data:/opt/crontab/data \ + -p 8080:8080 \ + crontab +``` + +Then open http://localhost:8080 in your browser. + +**Docker Compose:** + +```yaml +services: + crontab: + build: . + ports: + - "8080:8080" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - "./config.json:/opt/crontab/config.json:ro" + - "crontab-data:/opt/crontab/data" # Persistent database + environment: + - WEB_UI_PORT=8080 + - JOB_HISTORY_RETENTION_DAYS=30 + - JOB_HISTORY_RETENTION_COUNT=1000 + +volumes: + crontab-data: +``` + +### Configuration + +Configure the web UI using environment variables: + +| Variable | Default | Description | +| ----------------------------- | ------- | ----------------------------------------- | +| `WEB_UI_PORT` | `8080` | Port for the web dashboard | +| `JOB_HISTORY_RETENTION_DAYS` | `30` | Keep execution history for this many days | +| `JOB_HISTORY_RETENTION_COUNT` | `1000` | Keep at least this many recent executions | + +### Data Persistence + +Job execution history is stored in a SQLite database at `/opt/crontab/data/crontab.db`. To persist this data across container restarts, mount a volume: + +```bash +-v crontab-data:/opt/crontab/data +``` + +### Health Check + +The web UI includes a health check endpoint at `/api/health`: + +```bash +curl http://localhost:8080/api/health +``` + +Response: + +```json +{ + "status": "healthy", + "crond_running": true, + "database_accessible": true, + "uptime_seconds": 86400 +} +``` + +### API Endpoints + +The dashboard exposes a REST API for programmatic access: + +- `GET /api/jobs` - List all jobs +- `GET /api/executions/` - Get execution history for a job +- `POST /api/trigger/` - Manually trigger a job +- `GET /api/stats` - Get dashboard statistics +- `GET /api/health` - Health check + +### Security Considerations + +The web UI does **not** include authentication by default. For production deployments: + +1. **Reverse Proxy**: Use a reverse proxy (nginx, Traefik) with authentication +1. **Network Isolation**: Run on a private network, not exposed to the internet +1. **Firewall Rules**: Restrict access to trusted IP addresses + +Example nginx reverse proxy with basic auth: + +```nginx +location /crontab/ { + auth_basic "Crontab Dashboard"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://crontab:8080/; +} +``` ## Config file diff --git a/docker-compose.yml b/docker-compose.yml index 3fd9dd2..e8aee80 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,6 +15,16 @@ services: # Or alternatively: stat -c '%g' /var/run/docker.sock DOCKER_GID: 999 restart: always + ports: + - "8080:8080" volumes: - "/var/run/docker.sock:/var/run/docker.sock:ro" - "${PWD}/config-samples/config.sample.mapping.json:/opt/crontab/config.json:ro" + - "crontab-data:/opt/crontab/data" + environment: + - WEB_UI_PORT=8080 + - JOB_HISTORY_RETENTION_DAYS=30 + - JOB_HISTORY_RETENTION_COUNT=1000 + +volumes: + crontab-data: diff --git a/entrypoint.sh b/entrypoint.sh index d3bba1e..5db84a5 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -190,8 +190,33 @@ function build_crontab() { echo "#\!/usr/bin/env bash" echo "set -e" echo "" + echo "JOB_NAME=\"${SCRIPT_NAME}\"" + echo "TIMESTAMP=\$(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "PID=\$\$" + echo "" + echo "# Log job start to database" + echo "python3 /opt/crontab/webapp/db_logger.py start \"\${JOB_NAME}\" \"\${TIMESTAMP}\" \"cron\" \"\${PID}\" 2>&1 || true" + echo "" + echo "# Capture output to temp files" + echo "STDOUT_FILE=\"/tmp/job-\${JOB_NAME}-\$\$.stdout\"" + echo "STDERR_FILE=\"/tmp/job-\${JOB_NAME}-\$\$.stderr\"" + echo "" echo "echo \"start cron job __${SCRIPT_NAME}__\"" - echo "${CRON_COMMAND}" + echo "set +e" + echo "${CRON_COMMAND} > \"\${STDOUT_FILE}\" 2> \"\${STDERR_FILE}\"" + echo "EXIT_CODE=\$?" + echo "set -e" + echo "" + echo "# Log job completion to database" + echo "END_TIMESTAMP=\$(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "python3 /opt/crontab/webapp/db_logger.py end \"\${JOB_NAME}\" \"\${END_TIMESTAMP}\" \"\${EXIT_CODE}\" \"\${STDOUT_FILE}\" \"\${STDERR_FILE}\" 2>&1 || true" + echo "" + echo "# Output to container logs" + echo "cat \"\${STDOUT_FILE}\" 2>/dev/null || true" + echo "cat \"\${STDERR_FILE}\" >&2 2>/dev/null || true" + echo "" + echo "# Clean up temp files" + echo "rm -f \"\${STDOUT_FILE}\" \"\${STDERR_FILE}\"" } > "${SCRIPT_PATH}" TRIGGER=$(echo "${KEY}" | jq -r '.trigger') @@ -245,6 +270,18 @@ function build_crontab() { printf "##### cron running #####\n" } +init_webapp() { + printf "##### initializing web app #####\n" + + # Initialize database schema + python3 /opt/crontab/webapp/init_db.py + + # Sync jobs from config to database + python3 /opt/crontab/webapp/sync_jobs.py "${CONFIG}" + + printf "##### web app initialized #####\n" +} + start_app() { normalize_config export CONFIG=${HOME_DIR}/config.working.json @@ -254,6 +291,16 @@ start_app() { fi if [ "${1}" == "crond" ]; then build_crontab + init_webapp + fi + + # Use supervisord to manage crond and Flask if we're starting crond + if [ "${1}" == "crond" ]; then + if [ "$(id -u)" = "0" ]; then + exec su-exec docker supervisord -c /opt/crontab/supervisord.conf + else + exec supervisord -c /opt/crontab/supervisord.conf + fi fi # Filter out invalid crond flags diff --git a/supervisord.conf b/supervisord.conf new file mode 100644 index 0000000..1d8981b --- /dev/null +++ b/supervisord.conf @@ -0,0 +1,28 @@ +[supervisord] +nodaemon=true +user=docker +logfile=/dev/null +logfile_maxbytes=0 +loglevel=info +pidfile=/tmp/supervisord.pid + +[program:crond] +command=crond -f -d 7 -c /opt/crontab/crontabs +autostart=true +autorestart=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/fd/2 +stderr_logfile_maxbytes=0 +priority=10 + +[program:flask] +command=python3 /opt/crontab/webapp/app.py +autostart=true +autorestart=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/fd/2 +stderr_logfile_maxbytes=0 +environment=FLASK_ENV="production",WEB_UI_PORT="8080" +priority=20 diff --git a/webapp/app.py b/webapp/app.py new file mode 100755 index 0000000..b3d8b4a --- /dev/null +++ b/webapp/app.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +"""Flask web application for crontab dashboard.""" + +from flask import Flask, render_template, jsonify, request +import os +import subprocess +import re +from models import ( + get_all_jobs, + get_job, + get_job_executions, + get_execution_by_id, + get_dashboard_stats +) + + +app = Flask(__name__) +app.config['DATABASE'] = '/opt/crontab/data/crontab.db' + + +def validate_job_name(job_name): + """ + Validate job name to prevent path traversal attacks. + + Args: + job_name: Job name to validate + + Returns: + str: Validated job name + + Raises: + ValueError: If job name is invalid + """ + if not re.match(r'^[a-zA-Z0-9_-]+$', job_name): + raise ValueError("Invalid job name format") + return job_name + + +@app.route('/') +def index(): + """Render the dashboard UI.""" + return render_template('index.html') + + +@app.route('/api/jobs') +def api_get_jobs(): + """ + Get all jobs with their current status. + + Returns: + JSON list of jobs + """ + try: + jobs = get_all_jobs() + return jsonify(jobs) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/jobs/') +def api_get_job(job_name): + """ + Get details for a specific job. + + Args: + job_name: Name of the job + + Returns: + JSON job object or 404 + """ + try: + job_name = validate_job_name(job_name) + job = get_job(job_name) + if job: + return jsonify(job) + return jsonify({"error": "Job not found"}), 404 + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/executions/') +def api_get_executions(job_name): + """ + Get execution history for a job. + + Args: + job_name: Name of the job + + Query Parameters: + limit: Maximum number of executions to return (default 50) + + Returns: + JSON list of executions + """ + try: + job_name = validate_job_name(job_name) + limit = request.args.get('limit', 50, type=int) + limit = min(max(limit, 1), 1000) # Clamp between 1 and 1000 + + executions = get_job_executions(job_name, limit) + return jsonify(executions) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/executions/id/') +def api_get_execution(execution_id): + """ + Get details for a specific execution. + + Args: + execution_id: Execution ID + + Returns: + JSON execution object or 404 + """ + try: + execution = get_execution_by_id(execution_id) + if execution: + return jsonify(execution) + return jsonify({"error": "Execution not found"}), 404 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/trigger/', methods=['POST']) +def api_trigger_job(job_name): + """ + Manually trigger a job execution. + + Args: + job_name: Name of the job to trigger + + Returns: + JSON status message + """ + # Simple rate limiting (in-memory) + if not hasattr(api_trigger_job, 'rate_limit'): + api_trigger_job.rate_limit = {} + + import time + now = time.time() + job_triggers = api_trigger_job.rate_limit.get(job_name, []) + job_triggers = [t for t in job_triggers if now - t < 60] # Last minute + + if len(job_triggers) >= 5: + return jsonify({"error": "Rate limit exceeded (max 5 triggers per minute)"}), 429 + + try: + job_name = validate_job_name(job_name) + + # Verify job exists + job = get_job(job_name) + if not job: + return jsonify({"error": "Job not found"}), 404 + + # Verify script exists + script_path = f"/opt/crontab/jobs/{job_name}.sh" + real_path = os.path.realpath(script_path) + if not real_path.startswith('/opt/crontab/jobs/'): + return jsonify({"error": "Invalid job path"}), 400 + + if not os.path.exists(script_path): + return jsonify({"error": "Job script not found"}), 404 + + # Execute job in background + subprocess.Popen( + [script_path], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + start_new_session=True + ) + + # Update rate limit + job_triggers.append(now) + api_trigger_job.rate_limit[job_name] = job_triggers + + return jsonify({ + "status": "triggered", + "job": job_name, + "timestamp": time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + }) + + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/stats') +def api_get_stats(): + """ + Get dashboard statistics. + + Returns: + JSON stats object + """ + try: + stats = get_dashboard_stats() + return jsonify(stats) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/health') +def api_health(): + """ + Health check endpoint. + + Returns: + JSON health status + """ + import time + try: + # Check database accessibility + stats = get_dashboard_stats() + db_ok = True + except Exception: + db_ok = False + + # Check if crond is running + try: + result = subprocess.run( + ['ps', 'aux'], + capture_output=True, + text=True, + timeout=2 + ) + crond_running = 'crond' in result.stdout + except Exception: + crond_running = False + + # Calculate uptime (approximate) + try: + with open('/proc/uptime', 'r') as f: + uptime = float(f.read().split()[0]) + except Exception: + uptime = 0 + + status = "healthy" if (db_ok and crond_running) else "unhealthy" + + return jsonify({ + "status": status, + "crond_running": crond_running, + "database_accessible": db_ok, + "uptime_seconds": int(uptime) + }), 200 if status == "healthy" else 503 + + +if __name__ == '__main__': + # Run Flask server + port = int(os.environ.get('WEB_UI_PORT', 8080)) + debug = os.environ.get('FLASK_ENV') == 'development' + + print(f"🚀 Starting web UI on port {port}") + app.run( + host='0.0.0.0', + port=port, + debug=debug, + threaded=False + ) diff --git a/webapp/cron_parser.py b/webapp/cron_parser.py new file mode 100755 index 0000000..f1447b5 --- /dev/null +++ b/webapp/cron_parser.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +"""Parse cron schedules and calculate next run times.""" + +import re +from datetime import datetime, timedelta + + +class CronParser: + """Parser for cron schedule expressions.""" + + # Shortcut mappings + SHORTCUTS = { + '@yearly': '0 0 1 1 *', + '@annually': '0 0 1 1 *', + '@monthly': '0 0 1 * *', + '@weekly': '0 0 * * 0', + '@daily': '0 0 * * *', + '@midnight': '0 0 * * *', + '@hourly': '0 * * * *', + } + + def parse_schedule(self, schedule_str): + """ + Parse crontab schedule and return human-readable next run time. + + Args: + schedule_str: Cron schedule string (e.g., "*/5 * * * *", "@hourly") + + Returns: + str: Human-readable description of next run time + """ + schedule_str = schedule_str.strip() + + # Handle shortcuts + if schedule_str in self.SHORTCUTS: + schedule_str = self.SHORTCUTS[schedule_str] + return self._describe_standard_cron(schedule_str) + + # Handle @every syntax + if schedule_str.startswith('@every'): + return self._parse_every(schedule_str) + + # Handle @random (return placeholder) + if schedule_str.startswith('@random'): + return "Random (varies per container start)" + + # Parse standard cron: minute hour day month weekday + return self._describe_standard_cron(schedule_str) + + def _parse_every(self, schedule_str): + """ + Parse @every syntax (e.g., @every 2m, @every 1h). + + Args: + schedule_str: Schedule string starting with @every + + Returns: + str: Description like "Every 2 minutes" or next execution time + """ + # Extract duration: @every 2m, @every 1h30m, @every 1d + match = re.search(r'@every\s+(\d+)([mhd])', schedule_str) + if match: + value, unit = int(match.group(1)), match.group(2) + + if unit == 'm': + return f"Every {value} minute{'s' if value != 1 else ''}" + elif unit == 'h': + return f"Every {value} hour{'s' if value != 1 else ''}" + elif unit == 'd': + return f"Every {value} day{'s' if value != 1 else ''}" + + return "Invalid @every syntax" + + def _describe_standard_cron(self, schedule_str): + """ + Convert standard cron syntax to human-readable description. + + Args: + schedule_str: Standard cron string (e.g., "0 2 * * *") + + Returns: + str: Human-readable description + """ + parts = schedule_str.split() + if len(parts) != 5: + return f"Invalid cron syntax: {schedule_str}" + + minute, hour, day, month, weekday = parts + + # Handle common patterns + if minute == '*' and hour == '*': + return "Every minute" + + if minute.startswith('*/'): + interval = minute[2:] + return f"Every {interval} minute{'s' if int(interval) != 1 else ''}" + + if hour.startswith('*/') and minute == '0': + interval = hour[2:] + return f"Every {interval} hour{'s' if int(interval) != 1 else ''}" + + if day.startswith('*/') and minute == '0' and hour == '0': + interval = day[2:] + return f"Every {interval} day{'s' if int(interval) != 1 else ''}" + + # Specific time patterns + if minute != '*' and hour != '*' and day == '*' and month == '*' and weekday == '*': + return f"Daily at {hour.zfill(2)}:{minute.zfill(2)}" + + if minute != '*' and hour != '*' and day != '*' and month == '*' and weekday == '*': + return f"Monthly on day {day} at {hour.zfill(2)}:{minute.zfill(2)}" + + # Fallback: show cron expression + return f"Cron: {schedule_str}" + + def calculate_next_run(self, schedule_str, from_time=None): + """ + Calculate next run time for a cron schedule. + + Args: + schedule_str: Cron schedule string + from_time: Reference time (default: now) + + Returns: + datetime: Next execution time (approximate for complex patterns) + """ + if from_time is None: + from_time = datetime.now() + + # Handle @every syntax + if schedule_str.startswith('@every'): + match = re.search(r'@every\s+(\d+)([mhd])', schedule_str) + if match: + value, unit = int(match.group(1)), match.group(2) + if unit == 'm': + return from_time + timedelta(minutes=value) + elif unit == 'h': + return from_time + timedelta(hours=value) + elif unit == 'd': + return from_time + timedelta(days=value) + + # Handle shortcuts + if schedule_str in self.SHORTCUTS: + schedule_str = self.SHORTCUTS[schedule_str] + + # Parse standard cron (simplified - just handle common patterns) + parts = schedule_str.split() + if len(parts) == 5: + minute, hour, day, month, weekday = parts + + # Every N minutes + if minute.startswith('*/'): + interval = int(minute[2:]) + next_run = from_time + timedelta(minutes=interval) + return next_run.replace(second=0, microsecond=0) + + # Every hour + if hour.startswith('*/') and minute.isdigit(): + interval = int(hour[2:]) + next_run = from_time + timedelta(hours=interval) + return next_run.replace(minute=int(minute), second=0, microsecond=0) + + # Specific time + if minute.isdigit() and hour.isdigit(): + target_hour = int(hour) + target_minute = int(minute) + next_run = from_time.replace(hour=target_hour, minute=target_minute, second=0, microsecond=0) + if next_run <= from_time: + next_run += timedelta(days=1) + return next_run + + # Fallback: estimate ~1 hour from now + return from_time + timedelta(hours=1) + + +if __name__ == '__main__': + # Test the parser + parser = CronParser() + + test_cases = [ + '*/5 * * * *', + '0 2 * * *', + '@hourly', + '@every 2m', + '@every 1h', + '43 6,18 * * *', + '* * * * *', + ] + + print("Cron Parser Test Results:") + print("-" * 60) + for schedule in test_cases: + description = parser.parse_schedule(schedule) + print(f"{schedule:20} => {description}") diff --git a/webapp/db_logger.py b/webapp/db_logger.py new file mode 100755 index 0000000..289bbef --- /dev/null +++ b/webapp/db_logger.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +"""Database logging helper called by job scripts to record executions.""" + +import sqlite3 +import sys +from datetime import datetime + + +DB_PATH = '/opt/crontab/data/crontab.db' +MAX_PREVIEW_SIZE = 10 * 1024 # 10KB preview + + +def truncate_output(content, max_size=MAX_PREVIEW_SIZE): + """Truncate output to max size, preserving size information.""" + if len(content) > max_size: + return content[:max_size] + f"\n... (truncated, {len(content)} bytes total)" + return content + + +def log_start(job_name, start_time, triggered_by, pid): + """ + Log the start of a job execution. + + Args: + job_name: Name of the job + start_time: ISO 8601 timestamp + triggered_by: 'cron' or 'manual' + pid: Process ID + """ + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + cursor.execute(''' + INSERT INTO job_executions (job_name, start_time, triggered_by) + VALUES (?, ?, ?) + ''', (job_name, start_time, triggered_by)) + + execution_id = cursor.lastrowid + + # Update job status to 'running' + cursor.execute(''' + UPDATE jobs SET status = 'running', last_run = ? WHERE name = ? + ''', (start_time, job_name)) + + conn.commit() + conn.close() + + # Print execution ID so script can use it + print(f"EXECUTION_ID={execution_id}", file=sys.stderr) + return 0 + + except Exception as e: + print(f"Error logging job start: {e}", file=sys.stderr) + return 1 + + +def log_end(job_name, end_time, exit_code, stdout_file, stderr_file): + """ + Log the completion of a job execution. + + Args: + job_name: Name of the job + end_time: ISO 8601 timestamp + exit_code: Exit code from command + stdout_file: Path to stdout temp file + stderr_file: Path to stderr temp file + """ + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Read output files + stdout_content = "" + stderr_content = "" + stdout_size = 0 + stderr_size = 0 + + try: + with open(stdout_file, 'r', encoding='utf-8', errors='replace') as f: + stdout_content = f.read() + stdout_size = len(stdout_content) + except FileNotFoundError: + pass + except Exception as e: + print(f"Warning: Could not read stdout file: {e}", file=sys.stderr) + + try: + with open(stderr_file, 'r', encoding='utf-8', errors='replace') as f: + stderr_content = f.read() + stderr_size = len(stderr_content) + except FileNotFoundError: + pass + except Exception as e: + print(f"Warning: Could not read stderr file: {e}", file=sys.stderr) + + # Truncate for preview + stdout_preview = truncate_output(stdout_content) + stderr_preview = truncate_output(stderr_content) + + # Find the most recent execution for this job that hasn't ended + cursor.execute(''' + SELECT id, start_time FROM job_executions + WHERE job_name = ? AND end_time IS NULL + ORDER BY start_time DESC LIMIT 1 + ''', (job_name,)) + + row = cursor.fetchone() + if row: + execution_id, start_time = row + + # Calculate duration + start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00')) + end_dt = datetime.fromisoformat(end_time.replace('Z', '+00:00')) + duration = (end_dt - start_dt).total_seconds() + + # Update execution record + cursor.execute(''' + UPDATE job_executions + SET end_time = ?, + duration_seconds = ?, + exit_code = ?, + stdout_preview = ?, + stderr_preview = ?, + stdout_size = ?, + stderr_size = ? + WHERE id = ? + ''', (end_time, duration, exit_code, stdout_preview, stderr_preview, + stdout_size, stderr_size, execution_id)) + + # Update job status based on exit code + status = 'completed' if exit_code == 0 else 'failed' + cursor.execute(''' + UPDATE jobs SET status = ? WHERE name = ? + ''', (status, job_name)) + + conn.commit() + conn.close() + return 0 + else: + print(f"Warning: No pending execution found for {job_name}", file=sys.stderr) + conn.close() + return 1 + + except Exception as e: + print(f"Error logging job end: {e}", file=sys.stderr) + return 1 + + +def main(): + """Main entry point for db_logger script.""" + if len(sys.argv) < 3: + print("Usage: db_logger.py ...", file=sys.stderr) + print(" start ", file=sys.stderr) + print(" end ", file=sys.stderr) + return 1 + + command = sys.argv[1] + + if command == 'start': + if len(sys.argv) != 6: + print("Usage: db_logger.py start ", file=sys.stderr) + return 1 + return log_start(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) + + elif command == 'end': + if len(sys.argv) != 7: + print("Usage: db_logger.py end ", file=sys.stderr) + return 1 + return log_end(sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6]) + + else: + print(f"Unknown command: {command}", file=sys.stderr) + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/webapp/init_db.py b/webapp/init_db.py new file mode 100755 index 0000000..790fa96 --- /dev/null +++ b/webapp/init_db.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +"""Initialize the SQLite database schema for crontab web UI.""" + +import sqlite3 +import os +import sys + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def init_database(): + """Initialize database schema if not exists.""" + # Create data directory if it doesn't exist + os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Create tables (idempotent - safe to run multiple times) + cursor.executescript(''' + CREATE TABLE IF NOT EXISTS jobs ( + name TEXT PRIMARY KEY, + schedule TEXT NOT NULL, + command TEXT NOT NULL, + image TEXT, + container TEXT, + comment TEXT, + last_run TIMESTAMP, + next_run TEXT, + status TEXT DEFAULT 'scheduled', + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE TABLE IF NOT EXISTS job_executions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + job_name TEXT NOT NULL, + start_time TIMESTAMP NOT NULL, + end_time TIMESTAMP, + duration_seconds REAL, + exit_code INTEGER, + stdout_preview TEXT, + stderr_preview TEXT, + stdout_size INTEGER, + stderr_size INTEGER, + triggered_by TEXT DEFAULT 'cron', + parent_job TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (job_name) REFERENCES jobs(name) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS system_events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_type TEXT NOT NULL, + message TEXT, + metadata TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE INDEX IF NOT EXISTS idx_executions_job_start + ON job_executions(job_name, start_time DESC); + CREATE INDEX IF NOT EXISTS idx_executions_start_time + ON job_executions(start_time DESC); + CREATE INDEX IF NOT EXISTS idx_events_type_time + ON system_events(event_type, created_at DESC); + ''') + + conn.commit() + conn.close() + print("✅ Database initialized successfully") + return 0 + + +if __name__ == '__main__': + try: + sys.exit(init_database()) + except Exception as e: + print(f"❌ Error initializing database: {e}", file=sys.stderr) + sys.exit(1) diff --git a/webapp/models.py b/webapp/models.py new file mode 100755 index 0000000..f47c74b --- /dev/null +++ b/webapp/models.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 +"""Database models and query functions.""" + +import sqlite3 +from typing import List, Dict, Optional +from datetime import datetime, timedelta + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def get_db(): + """Get database connection with row factory.""" + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + return conn + + +def get_all_jobs() -> List[Dict]: + """ + Get all jobs with their status. + + Returns: + List of job dictionaries + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM jobs ORDER BY name + ''') + jobs = [dict(row) for row in cursor.fetchall()] + db.close() + return jobs + + +def get_job(job_name: str) -> Optional[Dict]: + """ + Get a single job by name. + + Args: + job_name: Name of the job + + Returns: + Job dictionary or None if not found + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM jobs WHERE name = ? + ''', (job_name,)) + row = cursor.fetchone() + db.close() + return dict(row) if row else None + + +def get_job_executions(job_name: str, limit: int = 50) -> List[Dict]: + """ + Get execution history for a job. + + Args: + job_name: Name of the job + limit: Maximum number of executions to return + + Returns: + List of execution dictionaries + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM job_executions + WHERE job_name = ? + ORDER BY start_time DESC + LIMIT ? + ''', (job_name, limit)) + executions = [dict(row) for row in cursor.fetchall()] + db.close() + return executions + + +def get_execution_by_id(execution_id: int) -> Optional[Dict]: + """ + Get a single execution by ID. + + Args: + execution_id: Execution ID + + Returns: + Execution dictionary or None if not found + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM job_executions WHERE id = ? + ''', (execution_id,)) + row = cursor.fetchone() + db.close() + return dict(row) if row else None + + +def get_dashboard_stats() -> Dict: + """ + Get dashboard statistics. + + Returns: + Dictionary with stats + """ + db = get_db() + + # Total jobs + cursor = db.execute('SELECT COUNT(*) as count FROM jobs') + total_jobs = cursor.fetchone()['count'] + + # Active jobs (not failed) + cursor = db.execute(''' + SELECT COUNT(*) as count FROM jobs WHERE status != 'failed' + ''') + active_jobs = cursor.fetchone()['count'] + + # Total executions + cursor = db.execute('SELECT COUNT(*) as count FROM job_executions') + total_executions = cursor.fetchone()['count'] + + # Recent failures (last 24 hours) + yesterday = (datetime.now() - timedelta(days=1)).isoformat() + cursor = db.execute(''' + SELECT COUNT(*) as count FROM job_executions + WHERE exit_code != 0 AND start_time > ? + ''', (yesterday,)) + recent_failures = cursor.fetchone()['count'] + + # Executions in last 24 hours + cursor = db.execute(''' + SELECT COUNT(*) as count FROM job_executions + WHERE start_time > ? + ''', (yesterday,)) + last_24h_executions = cursor.fetchone()['count'] + + db.close() + + return { + 'total_jobs': total_jobs, + 'active_jobs': active_jobs, + 'total_executions': total_executions, + 'recent_failures': recent_failures, + 'last_24h_executions': last_24h_executions + } + + +def cleanup_old_executions(retention_days: int = 30, retention_count: int = 1000): + """ + Clean up old job executions based on retention policy. + + Args: + retention_days: Keep executions newer than this many days + retention_count: Keep at least this many recent executions + """ + db = get_db() + + # Delete executions that are: + # 1. Not in the most recent N executions AND + # 2. Older than retention_days + cutoff_date = (datetime.now() - timedelta(days=retention_days)).isoformat() + + db.execute(''' + DELETE FROM job_executions + WHERE id NOT IN ( + SELECT id FROM job_executions + ORDER BY start_time DESC LIMIT ? + ) AND start_time < ? + ''', (retention_count, cutoff_date)) + + deleted = db.total_changes + db.commit() + db.close() + + if deleted > 0: + print(f"✅ Cleaned up {deleted} old job executions") + + return deleted + + +if __name__ == '__main__': + # Test database queries + print("Testing database models...") + print("-" * 60) + + try: + stats = get_dashboard_stats() + print(f"Dashboard Stats: {stats}") + + jobs = get_all_jobs() + print(f"\nFound {len(jobs)} jobs") + for job in jobs: + print(f" - {job['name']}: {job['schedule']}") + + except Exception as e: + print(f"Error: {e}") diff --git a/webapp/static/css/style.css b/webapp/static/css/style.css new file mode 100644 index 0000000..58e238b --- /dev/null +++ b/webapp/static/css/style.css @@ -0,0 +1,512 @@ +/* Docker Crontab Dashboard Styles */ + +:root { + --primary: #2563eb; + --primary-hover: #1d4ed8; + --success: #10b981; + --success-hover: #059669; + --warning: #f59e0b; + --danger: #ef4444; + --danger-hover: #dc2626; + --bg-light: #f9fafb; + --bg-dark: #1f2937; + --bg-card: #ffffff; + --text-primary: #111827; + --text-secondary: #6b7280; + --border: #e5e7eb; + --shadow: rgba(0, 0, 0, 0.1); +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + background: var(--bg-light); + color: var(--text-primary); + line-height: 1.6; + min-height: 100vh; + display: flex; + flex-direction: column; +} + +.container { + max-width: 1200px; + margin: 0 auto; + padding: 0 1.5rem; + width: 100%; +} + +/* Header */ +header { + background: var(--bg-card); + padding: 1.5rem 0; + box-shadow: 0 2px 4px var(--shadow); + margin-bottom: 2rem; +} + +header h1 { + font-size: 1.875rem; + margin-bottom: 1rem; + color: var(--text-primary); +} + +#stats-bar { + display: flex; + gap: 2rem; + flex-wrap: wrap; +} + +.stat-item { + display: flex; + flex-direction: column; + gap: 0.25rem; +} + +.stat-label { + font-size: 0.875rem; + color: var(--text-secondary); + font-weight: 500; +} + +.stat-value { + font-size: 1.5rem; + font-weight: 700; + color: var(--primary); +} + +/* Main Content */ +main { + flex: 1; + padding-bottom: 2rem; +} + +.section-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 1.5rem; +} + +.section-header h2 { + font-size: 1.5rem; + color: var(--text-primary); +} + +.hidden { + display: none !important; +} + +/* Job Cards */ +.job-card { + background: var(--bg-card); + padding: 1.5rem; + margin: 1rem 0; + border-radius: 8px; + box-shadow: 0 1px 3px var(--shadow); + display: flex; + justify-content: space-between; + align-items: center; + border-left: 4px solid var(--border); + transition: transform 0.2s, box-shadow 0.2s; +} + +.job-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 6px var(--shadow); +} + +.job-card.status-running { + border-left-color: var(--primary); +} + +.job-card.status-completed { + border-left-color: var(--success); +} + +.job-card.status-failed { + border-left-color: var(--danger); +} + +.job-card.status-scheduled { + border-left-color: var(--text-secondary); +} + +.job-info { + flex: 1; +} + +.job-info h3 { + font-size: 1.125rem; + margin-bottom: 0.5rem; + color: var(--text-primary); +} + +.job-info p { + color: var(--text-secondary); + margin-bottom: 0.5rem; + font-size: 0.875rem; +} + +.job-info small { + color: var(--text-secondary); + font-size: 0.8rem; +} + +.job-meta { + display: flex; + gap: 1.5rem; + margin-top: 0.5rem; +} + +.job-meta-item { + display: flex; + flex-direction: column; +} + +.job-meta-item strong { + font-size: 0.75rem; + color: var(--text-secondary); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.job-actions { + display: flex; + gap: 0.75rem; +} + +/* Execution History */ +.history-table { + background: var(--bg-card); + border-radius: 8px; + overflow: hidden; + box-shadow: 0 1px 3px var(--shadow); +} + +.history-header { + display: grid; + grid-template-columns: 2fr 1fr 100px 120px 150px; + gap: 1rem; + padding: 1rem 1.5rem; + background: var(--bg-light); + font-weight: 600; + font-size: 0.875rem; + color: var(--text-secondary); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.execution-row { + display: grid; + grid-template-columns: 2fr 1fr 100px 120px 150px; + gap: 1rem; + padding: 1rem 1.5rem; + border-bottom: 1px solid var(--border); + align-items: center; + transition: background-color 0.2s; +} + +.execution-row:last-child { + border-bottom: none; +} + +.execution-row:hover { + background: var(--bg-light); +} + +.status-badge { + display: inline-block; + padding: 0.25rem 0.75rem; + border-radius: 12px; + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.status-badge.success { + background: #d1fae5; + color: #065f46; +} + +.status-badge.failed { + background: #fee2e2; + color: #991b1b; +} + +/* Buttons */ +.btn { + padding: 0.5rem 1rem; + border: none; + border-radius: 6px; + cursor: pointer; + font-size: 0.875rem; + font-weight: 600; + transition: background-color 0.2s, transform 0.1s; + white-space: nowrap; +} + +.btn:hover { + transform: translateY(-1px); +} + +.btn:active { + transform: translateY(0); +} + +.btn-primary { + background: var(--primary); + color: white; +} + +.btn-primary:hover { + background: var(--primary-hover); +} + +.btn-success { + background: var(--success); + color: white; +} + +.btn-success:hover { + background: var(--success-hover); +} + +.btn-danger { + background: var(--danger); + color: white; +} + +.btn-danger:hover { + background: var(--danger-hover); +} + +.btn-secondary { + background: var(--bg-light); + color: var(--text-primary); + border: 1px solid var(--border); +} + +.btn-secondary:hover { + background: var(--border); +} + +.btn-sm { + padding: 0.375rem 0.75rem; + font-size: 0.8rem; +} + +/* Modal */ +.modal { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(0, 0, 0, 0.5); + display: flex; + justify-content: center; + align-items: center; + z-index: 1000; +} + +.modal-content { + background: var(--bg-card); + border-radius: 12px; + width: 90%; + max-width: 800px; + max-height: 90vh; + display: flex; + flex-direction: column; + box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.1); +} + +.modal-header { + padding: 1.5rem; + border-bottom: 1px solid var(--border); + display: flex; + justify-content: space-between; + align-items: center; +} + +.modal-header h3 { + font-size: 1.25rem; + color: var(--text-primary); +} + +.btn-close { + background: none; + border: none; + font-size: 1.5rem; + cursor: pointer; + color: var(--text-secondary); + padding: 0; + width: 2rem; + height: 2rem; + display: flex; + align-items: center; + justify-content: center; + border-radius: 4px; +} + +.btn-close:hover { + background: var(--bg-light); + color: var(--text-primary); +} + +.modal-body { + padding: 1.5rem; + overflow-y: auto; + flex: 1; +} + +.log-info { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 1rem; + margin-bottom: 1.5rem; + padding: 1rem; + background: var(--bg-light); + border-radius: 6px; +} + +.log-tabs { + display: flex; + gap: 0.5rem; + margin-bottom: 1rem; + border-bottom: 2px solid var(--border); +} + +.tab-btn { + padding: 0.75rem 1.5rem; + border: none; + background: none; + cursor: pointer; + font-weight: 600; + color: var(--text-secondary); + border-bottom: 2px solid transparent; + margin-bottom: -2px; + transition: color 0.2s, border-color 0.2s; +} + +.tab-btn:hover { + color: var(--text-primary); +} + +.tab-btn.active { + color: var(--primary); + border-bottom-color: var(--primary); +} + +.log-content { + position: relative; + min-height: 300px; +} + +.log-pane { + background: var(--bg-dark); + color: #d1d5db; + padding: 1rem; + border-radius: 6px; + overflow-x: auto; + font-family: 'Courier New', Courier, monospace; + font-size: 0.875rem; + line-height: 1.5; + white-space: pre-wrap; + word-wrap: break-word; +} + +.log-pane.hidden { + display: none; +} + +/* Loading */ +.loading { + text-align: center; + padding: 3rem; + color: var(--text-secondary); + font-size: 1.125rem; +} + +.empty-state { + text-align: center; + padding: 3rem; + color: var(--text-secondary); +} + +.empty-state h3 { + margin-bottom: 0.5rem; + color: var(--text-primary); +} + +/* Footer */ +footer { + background: var(--bg-card); + padding: 1.5rem 0; + margin-top: 2rem; + border-top: 1px solid var(--border); + text-align: center; + color: var(--text-secondary); + font-size: 0.875rem; +} + +footer a { + color: var(--primary); + text-decoration: none; +} + +footer a:hover { + text-decoration: underline; +} + +/* Responsive */ +@media (max-width: 768px) { + .job-card { + flex-direction: column; + align-items: flex-start; + gap: 1rem; + } + + .job-actions { + width: 100%; + } + + .job-actions .btn { + flex: 1; + } + + .history-header, + .execution-row { + grid-template-columns: 1fr; + gap: 0.5rem; + } + + .history-header { + display: none; + } + + .execution-row > div::before { + content: attr(data-label); + font-weight: 600; + margin-right: 0.5rem; + color: var(--text-secondary); + font-size: 0.75rem; + } + + #stats-bar { + gap: 1rem; + } + + .modal-content { + width: 95%; + max-height: 95vh; + } + + .log-info { + grid-template-columns: 1fr; + } +} diff --git a/webapp/static/js/app.js b/webapp/static/js/app.js new file mode 100644 index 0000000..91bdd54 --- /dev/null +++ b/webapp/static/js/app.js @@ -0,0 +1,398 @@ +// Docker Crontab Dashboard - Vanilla JavaScript Application + +const App = { + currentView: 'jobs', + refreshInterval: null, + refreshDelay: 30000, // 30 seconds + + /** + * Initialize the application + */ + init() { + this.setupEventListeners(); + this.loadStats(); + this.loadJobs(); + this.startAutoRefresh(); + console.log('🚀 Dashboard initialized'); + }, + + /** + * Set up event listeners + */ + setupEventListeners() { + // Back to jobs button + document.getElementById('back-to-jobs').addEventListener('click', () => { + this.showJobsList(); + }); + + // Refresh button + document.getElementById('refresh-btn').addEventListener('click', () => { + this.refresh(); + }); + + // Modal close button + document.getElementById('close-modal').addEventListener('click', () => { + this.closeModal(); + }); + + // Close modal on background click + document.getElementById('log-modal').addEventListener('click', (e) => { + if (e.target.id === 'log-modal') { + this.closeModal(); + } + }); + + // Tab buttons + document.querySelectorAll('.tab-btn').forEach(btn => { + btn.addEventListener('click', (e) => { + this.switchTab(e.target.dataset.tab); + }); + }); + + // Keyboard shortcuts + document.addEventListener('keydown', (e) => { + if (e.key === 'Escape') { + this.closeModal(); + } + }); + }, + + /** + * Start auto-refresh timer + */ + startAutoRefresh() { + this.refreshInterval = setInterval(() => { + this.refresh(); + }, this.refreshDelay); + }, + + /** + * Stop auto-refresh timer + */ + stopAutoRefresh() { + if (this.refreshInterval) { + clearInterval(this.refreshInterval); + this.refreshInterval = null; + } + }, + + /** + * Refresh current view + */ + async refresh() { + await this.loadStats(); + if (this.currentView === 'jobs') { + await this.loadJobs(); + } + console.log('🔄 Refreshed'); + }, + + /** + * Load dashboard statistics + */ + async loadStats() { + try { + const resp = await fetch('/api/stats'); + const stats = await resp.json(); + + document.getElementById('total-jobs').textContent = stats.total_jobs; + document.getElementById('active-jobs').textContent = stats.active_jobs; + document.getElementById('recent-failures').textContent = stats.recent_failures; + document.getElementById('last-24h-executions').textContent = stats.last_24h_executions; + } catch (error) { + console.error('Error loading stats:', error); + } + }, + + /** + * Load all jobs + */ + async loadJobs() { + const container = document.getElementById('jobs-container'); + container.innerHTML = '
Loading jobs...
'; + + try { + const resp = await fetch('/api/jobs'); + const jobs = await resp.json(); + + if (jobs.length === 0) { + container.innerHTML = ` +
+

No Jobs Found

+

No cron jobs are currently configured.

+
+ `; + return; + } + + container.innerHTML = jobs.map(job => this.renderJobCard(job)).join(''); + + // Attach event listeners to job cards + jobs.forEach(job => { + const historyBtn = document.getElementById(`history-${job.name}`); + const triggerBtn = document.getElementById(`trigger-${job.name}`); + + if (historyBtn) { + historyBtn.addEventListener('click', () => this.viewHistory(job.name)); + } + + if (triggerBtn) { + triggerBtn.addEventListener('click', () => this.triggerJob(job.name)); + } + }); + + } catch (error) { + console.error('Error loading jobs:', error); + container.innerHTML = ` +
+

Error Loading Jobs

+

${error.message}

+
+ `; + } + }, + + /** + * Render a job card + */ + renderJobCard(job) { + const lastRun = job.last_run ? new Date(job.last_run).toLocaleString() : 'Never'; + const status = job.status || 'scheduled'; + + return ` +
+
+

${this.escapeHtml(job.name)}

+

${this.escapeHtml(job.comment || job.command || 'No description')}

+
+
+ Schedule + ${this.escapeHtml(job.schedule)} +
+
+ Next Run + ${this.escapeHtml(job.next_run || 'Calculating...')} +
+
+ Last Run + ${lastRun} +
+
+ Status + ${status} +
+
+
+
+ + +
+
+ `; + }, + + /** + * View execution history for a job + */ + async viewHistory(jobName) { + this.currentView = 'history'; + document.getElementById('jobs-list').classList.add('hidden'); + document.getElementById('execution-history').classList.remove('hidden'); + document.getElementById('current-job-name').textContent = jobName; + + const container = document.getElementById('history-container'); + container.innerHTML = '
Loading history...
'; + + try { + const resp = await fetch(`/api/executions/${encodeURIComponent(jobName)}?limit=100`); + const executions = await resp.json(); + + if (executions.length === 0) { + container.innerHTML = ` +
+

No Execution History

+

This job hasn't been executed yet.

+
+ `; + return; + } + + container.innerHTML = ` +
+
+
Start Time
+
Duration
+
Exit Code
+
Triggered By
+
Actions
+
+ ${executions.map(ex => this.renderExecutionRow(ex)).join('')} +
+ `; + + // Attach event listeners to log buttons + executions.forEach(ex => { + const logBtn = document.getElementById(`logs-${ex.id}`); + if (logBtn) { + logBtn.addEventListener('click', () => this.viewLogs(ex)); + } + }); + + } catch (error) { + console.error('Error loading history:', error); + container.innerHTML = ` +
+

Error Loading History

+

${error.message}

+
+ `; + } + }, + + /** + * Render an execution row + */ + renderExecutionRow(execution) { + const startTime = new Date(execution.start_time).toLocaleString(); + const duration = execution.duration_seconds + ? `${execution.duration_seconds.toFixed(2)}s` + : 'In progress'; + const exitCode = execution.exit_code !== null ? execution.exit_code : '-'; + const statusClass = execution.exit_code === 0 ? 'success' : 'failed'; + + return ` +
+
${startTime}
+
${duration}
+
+ + ${exitCode} + +
+
${this.escapeHtml(execution.triggered_by || 'cron')}
+
+ +
+
+ `; + }, + + /** + * View logs for an execution + */ + viewLogs(execution) { + document.getElementById('log-job-name').textContent = execution.job_name; + document.getElementById('log-start-time').textContent = new Date(execution.start_time).toLocaleString(); + document.getElementById('log-duration').textContent = execution.duration_seconds + ? `${execution.duration_seconds.toFixed(2)}s` + : 'In progress'; + document.getElementById('log-exit-code').textContent = execution.exit_code !== null + ? execution.exit_code + : '-'; + + const stdout = execution.stdout_preview || '(empty)'; + const stderr = execution.stderr_preview || '(empty)'; + + document.getElementById('log-stdout').textContent = stdout; + document.getElementById('log-stderr').textContent = stderr; + + // Show stdout tab by default + this.switchTab('stdout'); + + // Show modal + document.getElementById('log-modal').classList.remove('hidden'); + }, + + /** + * Close log modal + */ + closeModal() { + document.getElementById('log-modal').classList.add('hidden'); + }, + + /** + * Switch log tabs + */ + switchTab(tabName) { + // Update tab buttons + document.querySelectorAll('.tab-btn').forEach(btn => { + if (btn.dataset.tab === tabName) { + btn.classList.add('active'); + } else { + btn.classList.remove('active'); + } + }); + + // Update tab panes + document.querySelectorAll('.log-pane').forEach(pane => { + if (pane.id === `log-${tabName}`) { + pane.classList.remove('hidden'); + pane.classList.add('active'); + } else { + pane.classList.add('hidden'); + pane.classList.remove('active'); + } + }); + }, + + /** + * Show jobs list + */ + showJobsList() { + this.currentView = 'jobs'; + document.getElementById('execution-history').classList.add('hidden'); + document.getElementById('jobs-list').classList.remove('hidden'); + this.loadJobs(); + }, + + /** + * Trigger a job manually + */ + async triggerJob(jobName) { + if (!confirm(`Trigger job "${jobName}" now?`)) { + return; + } + + try { + const resp = await fetch(`/api/trigger/${encodeURIComponent(jobName)}`, { + method: 'POST', + headers: { + 'X-Requested-With': 'XMLHttpRequest' + } + }); + + const result = await resp.json(); + + if (resp.ok) { + alert(`✅ Job "${jobName}" triggered successfully!`); + // Refresh jobs after a short delay + setTimeout(() => this.loadJobs(), 1000); + } else { + alert(`❌ Error: ${result.error}`); + } + } catch (error) { + alert(`❌ Error triggering job: ${error.message}`); + } + }, + + /** + * Escape HTML to prevent XSS + */ + escapeHtml(text) { + if (text === null || text === undefined) return ''; + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } +}; + +// Initialize app when DOM is ready +document.addEventListener('DOMContentLoaded', () => { + App.init(); +}); diff --git a/webapp/sync_jobs.py b/webapp/sync_jobs.py new file mode 100755 index 0000000..cdd4959 --- /dev/null +++ b/webapp/sync_jobs.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +"""Sync jobs table from config.working.json.""" + +import sqlite3 +import json +import sys +from cron_parser import CronParser + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def sync_jobs_from_config(config_path): + """ + Sync jobs table from config.working.json. + + Args: + config_path: Path to config.working.json + + Returns: + int: 0 on success, 1 on error + """ + try: + with open(config_path, 'r') as f: + jobs = json.load(f) + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + parser = CronParser() + + # Get existing job names + cursor.execute('SELECT name FROM jobs') + existing_jobs = {row[0] for row in cursor.fetchall()} + + config_jobs = set() + + # Process each job from config + for job in jobs: + name = job.get('name', 'unnamed') + config_jobs.add(name) + + # Calculate next run description + schedule = job.get('schedule', '* * * * *') + next_run = parser.parse_schedule(schedule) + + # Upsert job (insert or update) + cursor.execute(''' + INSERT INTO jobs (name, schedule, command, image, container, comment, next_run) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(name) DO UPDATE SET + schedule = excluded.schedule, + command = excluded.command, + image = excluded.image, + container = excluded.container, + comment = excluded.comment, + next_run = excluded.next_run, + updated_at = CURRENT_TIMESTAMP + ''', ( + name, + schedule, + job.get('command'), + job.get('image'), + job.get('container'), + job.get('comment'), + next_run + )) + + # Remove jobs that no longer exist in config + removed_jobs = existing_jobs - config_jobs + for job_name in removed_jobs: + cursor.execute('DELETE FROM jobs WHERE name = ?', (job_name,)) + + conn.commit() + conn.close() + + print(f"✅ Synced {len(config_jobs)} jobs to database") + if removed_jobs: + print(f" Removed {len(removed_jobs)} stale jobs: {', '.join(removed_jobs)}") + + return 0 + + except FileNotFoundError: + print(f"❌ Error: Config file not found: {config_path}", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"❌ Error parsing config JSON: {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"❌ Error syncing jobs: {e}", file=sys.stderr) + return 1 + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print("Usage: sync_jobs.py ", file=sys.stderr) + sys.exit(1) + + sys.exit(sync_jobs_from_config(sys.argv[1])) diff --git a/webapp/templates/index.html b/webapp/templates/index.html new file mode 100644 index 0000000..1667110 --- /dev/null +++ b/webapp/templates/index.html @@ -0,0 +1,94 @@ + + + + + + Docker Crontab Dashboard + + + +
+
+

🕐 Docker Crontab Dashboard

+
+
+ Total Jobs + 0 +
+
+ Active + 0 +
+
+ Recent Failures + 0 +
+
+ Last 24h Executions + 0 +
+
+ Auto-Refresh + 30s +
+
+
+
+ +
+
+
+

Scheduled Jobs

+ +
+
+
Loading jobs...
+
+
+ + +
+ + + + + + + + +