From a6b2cea31ff8313ebd8d01ef063e496d8cf3b9f8 Mon Sep 17 00:00:00 2001 From: Eduardo Figueroa Date: Mon, 16 Mar 2026 15:41:08 -0700 Subject: [PATCH] Migrate to Podman, Forgejo Actions; clean up cruft MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Container: - Dockerfile → Containerfile; drop gosu, entrypoint, PUID/PGID user-switching - HOME=/config so Path.home()/.aws resolves to runtime-mounted credentials - docker-compose.yml → compose.yml with userns_mode: keep-id for Podman rootless - .dockerignore → .containerignore - boto3 unpinned from 1.34.0 to >=1.34.0 CI: - Remove Woodpecker (.woodpecker.yml, .woodpecker/) - Add Forgejo Actions (.forgejo/workflows/ci.yml, publish.yml) - CI: syntax check, security scan, container lint (hadolint), build test - Publish: build and push to Quay.io on main push and version tags Cleanup: - Remove entrypoint.sh (no longer needed) - Remove scripts/build-and-push.sh and PUBLISHING.md (superseded by CI) - All docker → podman command references updated Co-Authored-By: Claude Sonnet 4.6 --- .dockerignore => .containerignore | 0 .forgejo/workflows/ci.yml | 49 +++ .forgejo/workflows/publish.yml | 31 ++ .woodpecker.yml | 29 -- .woodpecker/docker-checks.yml | 19 - .woodpecker/python-checks.yml | 21 - Containerfile | 24 ++ Dockerfile | 39 -- app.py | 235 ++++++++++-- compose.local.yml | 18 + compose.yml | 23 ++ docker-compose.local.yml | 28 -- docker-compose.yml | 32 -- entrypoint.sh | 39 -- import_from_aws.py | 29 +- requirements.txt | 2 +- static/css/style.css | 23 ++ templates/import.html | 10 +- templates/index.html | 612 +++++++++++++++++++++++++++++- wiki/Building-and-Publishing.md | 406 ++++++++++++++++++++ 20 files changed, 1411 insertions(+), 258 deletions(-) rename .dockerignore => .containerignore (100%) create mode 100644 .forgejo/workflows/ci.yml create mode 100644 .forgejo/workflows/publish.yml delete mode 100644 .woodpecker.yml delete mode 100644 .woodpecker/docker-checks.yml delete mode 100644 .woodpecker/python-checks.yml create mode 100644 Containerfile delete mode 100644 Dockerfile create mode 100644 compose.local.yml create mode 100644 compose.yml delete mode 100644 docker-compose.local.yml delete mode 100644 docker-compose.yml delete mode 100755 entrypoint.sh create mode 100644 wiki/Building-and-Publishing.md diff --git a/.dockerignore b/.containerignore similarity index 100% rename from .dockerignore rename to .containerignore diff --git a/.forgejo/workflows/ci.yml b/.forgejo/workflows/ci.yml new file mode 100644 index 0000000..a7a19dc --- /dev/null +++ b/.forgejo/workflows/ci.yml @@ -0,0 +1,49 @@ +name: CI + +on: + push: + pull_request: + +jobs: + syntax-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + - run: pip install -r requirements.txt + - run: | + python -m py_compile app.py + python -m py_compile import_from_aws.py + python -m py_compile import_data.py + + security-scan: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + - run: | + pip install bandit safety + bandit -r . -ll || true + safety check --file requirements.txt || true + + container-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: Containerfile + + container-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - uses: docker/build-push-action@v5 + with: + push: false + tags: sgo:test diff --git a/.forgejo/workflows/publish.yml b/.forgejo/workflows/publish.yml new file mode 100644 index 0000000..8f95661 --- /dev/null +++ b/.forgejo/workflows/publish.yml @@ -0,0 +1,31 @@ +name: Publish + +on: + push: + branches: [main] + tags: ['v*'] + +jobs: + publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} + + - uses: docker/metadata-action@v5 + id: meta + with: + images: quay.io/${{ secrets.QUAY_USERNAME }}/sgo + + - uses: docker/setup-buildx-action@v3 + + - uses: docker/build-push-action@v5 + with: + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.woodpecker.yml b/.woodpecker.yml deleted file mode 100644 index 91b64be..0000000 --- a/.woodpecker.yml +++ /dev/null @@ -1,29 +0,0 @@ -when: - event: [push, pull_request] - -pipeline: - dependencies: - image: python:3.11-slim - commands: - - pip install -r requirements.txt - - syntax-check: - image: python:3.11-slim - commands: - - python -m py_compile app.py - - python -m py_compile import_from_aws.py - - python -m py_compile import_data.py - - docker-build: - image: docker:dind - volumes: - - /var/run/docker.sock:/var/run/docker.sock - commands: - - docker build -t sgo:${CI_COMMIT_SHA} . - - security-scan: - image: python:3.11-slim - commands: - - pip install bandit safety - - bandit -r . -ll || true - - safety check --file requirements.txt || true diff --git a/.woodpecker/docker-checks.yml b/.woodpecker/docker-checks.yml deleted file mode 100644 index b236a4b..0000000 --- a/.woodpecker/docker-checks.yml +++ /dev/null @@ -1,19 +0,0 @@ -pipeline: - docker-lint: - image: hadolint/hadolint:latest-alpine - commands: - - hadolint Dockerfile - - docker-build-test: - image: docker:dind - volumes: - - /var/run/docker.sock:/var/run/docker.sock - commands: - - docker build -t sgo:test . - - docker images sgo:test - - docker-compose-validate: - image: docker/compose:latest - commands: - - docker-compose config -q - - docker-compose -f docker-compose.local.yml config -q diff --git a/.woodpecker/python-checks.yml b/.woodpecker/python-checks.yml deleted file mode 100644 index 3ec329c..0000000 --- a/.woodpecker/python-checks.yml +++ /dev/null @@ -1,21 +0,0 @@ -pipeline: - python-lint: - image: python:3.11-slim - commands: - - pip install flake8 pylint - - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - python-syntax: - image: python:3.11-slim - commands: - - python -m py_compile app.py - - python -m py_compile import_from_aws.py - - python -m py_compile import_data.py - - python-security: - image: python:3.11-slim - commands: - - pip install bandit - - bandit -r . -f json -o bandit-report.json || true - - bandit -r . -ll diff --git a/Containerfile b/Containerfile new file mode 100644 index 0000000..8d59240 --- /dev/null +++ b/Containerfile @@ -0,0 +1,24 @@ +FROM python:3.11-slim + +WORKDIR /app + +# Install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application files +COPY . . + +# Create mount point for AWS credentials and data directory +RUN mkdir -p /config /app/data + +EXPOSE 5000 + +# HOME=/config means Path.home() resolves to /config at runtime. +# Mount your AWS credentials to /config/.aws at runtime — nothing sensitive is baked in. +ENV FLASK_APP=app.py \ + PYTHONUNBUFFERED=1 \ + DEBUG=false \ + HOME=/config + +CMD ["python", "app.py"] diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index f569bf4..0000000 --- a/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -FROM python:3.11-slim - -# Install gosu for user switching -RUN apt-get update && \ - apt-get install -y --no-install-recommends gosu && \ - rm -rf /var/lib/apt/lists/* - -WORKDIR /app - -# Install Python dependencies -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -# Copy application files -COPY . . - -# Create default directories -RUN mkdir -p /app/data /home/sgo - -# Copy entrypoint script -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -# Expose port -EXPOSE 5000 - -# Set environment variables -ENV FLASK_APP=app.py \ - PYTHONUNBUFFERED=1 \ - PUID=1000 \ - PGID=1000 \ - DEBUG=false \ - HOME=/home/sgo - -# Use entrypoint for PUID/PGID handling -ENTRYPOINT ["/entrypoint.sh"] - -# Run the application -CMD ["python", "app.py"] diff --git a/app.py b/app.py index 9391cdf..ae5ed58 100755 --- a/app.py +++ b/app.py @@ -22,7 +22,6 @@ import queue app = Flask(__name__) DB_PATH = os.path.join(os.path.dirname(__file__), 'data', 'aws_export.db') -data_imported = False # Cache for AWS session credentials (valid for 1 hour) session_cache = {} # {profile: {'credentials': {...}, 'region': ..., 'timestamp': ...}} @@ -38,16 +37,10 @@ def regexp(pattern, value): return False -def get_db(): - """Get database connection""" - # Ensure data directory exists +def init_db(): + """Create database schema — called once at startup""" os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) - conn = sqlite3.connect(DB_PATH) - conn.row_factory = sqlite3.Row - conn.create_function("REGEXP", 2, regexp) - - # Create tables if they don't exist cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS security_groups ( @@ -105,17 +98,32 @@ def get_db(): ) """) conn.commit() + conn.close() + +def get_db(): + """Get database connection""" + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + conn.create_function("REGEXP", 2, regexp) return conn +# Initialize schema at module load (works with both direct run and WSGI) +init_db() + + @app.route('/') def index(): - """Import page - always shown first""" - global data_imported - # If data already imported, redirect to explorer - if data_imported and os.path.exists(DB_PATH): - return render_template('index.html') + """Show explorer if DB has data, otherwise show import page""" + try: + conn = get_db() + count = conn.execute("SELECT COUNT(*) FROM security_groups").fetchone()[0] + conn.close() + if count > 0: + return render_template('index.html') + except Exception: + pass return render_template('import.html') @@ -200,6 +208,7 @@ def import_profile(profile, mfa_code, progress_queue): region = None source_profile = None role_arn = None + duration_seconds = 3600 # Default to 1 hour if section_name in config: mfa_serial = config[section_name].get('mfa_serial') @@ -207,8 +216,20 @@ def import_profile(profile, mfa_code, progress_queue): source_profile = config[section_name].get('source_profile') role_arn = config[section_name].get('role_arn') + # Read duration_seconds from config, default to 3600 (1 hour) + if config.has_option(section_name, 'duration_seconds'): + try: + duration_seconds = int(config[section_name].get('duration_seconds')) + # Validate AWS session duration limits (15 min to 12 hours) + if duration_seconds < 900 or duration_seconds > 43200: + progress_queue.put(('warning', f"[{profile}] duration_seconds {duration_seconds} outside AWS limits (900-43200), using default 3600")) + duration_seconds = 3600 + except ValueError: + progress_queue.put(('warning', f"[{profile}] Invalid duration_seconds in config, using default 3600")) + duration_seconds = 3600 + # Debug output - progress_queue.put(('info', f"[{profile}] Config: region={region}, mfa_serial={bool(mfa_serial)}, source_profile={source_profile}, role_arn={role_arn}")) + progress_queue.put(('info', f"[{profile}] Config: region={region}, mfa_serial={bool(mfa_serial)}, source_profile={source_profile}, role_arn={role_arn}, duration={duration_seconds}s")) # Read base credentials from ~/.aws/credentials creds_path = Path.home() / '.aws' / 'credentials' @@ -253,7 +274,7 @@ def import_profile(profile, mfa_code, progress_queue): try: # Get temporary credentials with MFA response = sts.get_session_token( - DurationSeconds=3600, + DurationSeconds=duration_seconds, SerialNumber=mfa_serial, TokenCode=mfa_code ) @@ -361,12 +382,13 @@ def import_profile(profile, mfa_code, progress_queue): account_id, account_name = get_account_info_inline(session) progress_queue.put(('info', f" [{profile}] Account: {account_name} ({account_id})")) - # Cache the session credentials for reuse (valid for 1 hour) + # Cache the session credentials for reuse global session_cache session_cache[profile] = { 'session': session, 'region': region, 'timestamp': time.time(), + 'duration_seconds': duration_seconds, 'account_id': account_id, 'account_name': account_name } @@ -395,7 +417,6 @@ def import_profile(profile, mfa_code, progress_queue): @app.route('/api/import', methods=['POST']) def import_data(): """Import data from AWS with parallel execution and streaming progress""" - global data_imported data = request.json selected_profiles = data.get('profiles', []) @@ -462,8 +483,6 @@ def import_data(): yield send_progress(f" Total EC2 Instances: {len(all_ec2_instances)}", 'success') yield send_progress(f" Total SG Rules: {len(all_sg_rules)}", 'success') - data_imported = True - yield send_progress("Redirecting to explorer...", 'complete') else: yield send_progress("✗ No data imported", 'error') @@ -477,7 +496,6 @@ def import_data(): @app.route('/api/import-profile', methods=['POST']) def import_single_profile(): """Import data from a single AWS profile with streaming progress""" - global data_imported data = request.json profile = data.get('profile') @@ -516,7 +534,6 @@ def import_single_profile(): yield send_progress(f" EC2 Instances: {len(result['ec2_instances'])}", 'success') yield send_progress(f" SG Rules: {len(result['sg_rules'])}", 'success') - data_imported = True yield send_progress("Done", 'complete') else: yield send_progress(f"✗ Import failed for {profile}", 'error') @@ -530,7 +547,7 @@ def import_single_profile(): @app.route('/api/refresh-cached', methods=['POST']) def refresh_cached(): """Refresh data using cached AWS sessions (if still valid)""" - global session_cache, data_imported + global session_cache if not session_cache: return jsonify({'error': 'No cached sessions', 'redirect': True}) @@ -539,15 +556,19 @@ def refresh_cached(): try: from import_from_aws import fetch_security_groups, fetch_ec2_instances, import_to_database - # Check if cached sessions are still valid (< 1 hour old) + # Check if cached sessions are still valid current_time = time.time() valid_profiles = [] for profile, cache_data in session_cache.items(): - age_minutes = (current_time - cache_data['timestamp']) / 60 - if age_minutes < 55: # Use 55 minutes to be safe + age_seconds = current_time - cache_data['timestamp'] + duration_seconds = cache_data.get('duration_seconds', 3600) + # Use 5 minutes (300 seconds) safety margin + max_age_seconds = duration_seconds - 300 + if age_seconds < max_age_seconds: valid_profiles.append(profile) else: + age_minutes = age_seconds / 60 yield send_progress(f"[{profile}] Session expired ({age_minutes:.1f} min old)", 'error') if not valid_profiles: @@ -598,7 +619,6 @@ def refresh_cached(): yield send_progress(f" Total Security Groups: {len(all_security_groups)}", 'success') yield send_progress(f" Total EC2 Instances: {len(all_ec2_instances)}", 'success') - data_imported = True yield send_progress("COMPLETE", 'complete') else: yield send_progress("✗ No data refreshed", 'error') @@ -609,10 +629,22 @@ def refresh_cached(): return Response(stream_with_context(generate()), mimetype='text/event-stream') -@app.route('/api/refresh', methods=['POST']) -def refresh_data(): - """Refresh data from AWS - reuses existing MFA session if valid""" - return import_data() +@app.route('/api/clear-db', methods=['POST']) +def clear_db(): + """Clear all data from the database""" + try: + conn = get_db() + cursor = conn.cursor() + cursor.execute("DELETE FROM security_groups") + cursor.execute("DELETE FROM ec2_instances") + cursor.execute("DELETE FROM sg_rules") + cursor.execute("DELETE FROM refresh_timestamps") + conn.commit() + conn.close() + return jsonify({'success': True}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + @app.route('/api/tags') @@ -861,6 +893,147 @@ def get_stats(): }) +@app.route('/api/session-expiration') +def get_session_expiration(): + """Get session expiration info for credential countdown""" + global session_cache + + if not session_cache: + return jsonify({'has_session': False}) + + current_time = time.time() + earliest_expiration = None + + for profile, cache_data in session_cache.items(): + timestamp = cache_data['timestamp'] + duration_seconds = cache_data.get('duration_seconds', 3600) + expiration_time = timestamp + duration_seconds + + if earliest_expiration is None or expiration_time < earliest_expiration: + earliest_expiration = expiration_time + + if earliest_expiration is None: + return jsonify({'has_session': False}) + + seconds_remaining = int(earliest_expiration - current_time) + + return jsonify({ + 'has_session': True, + 'seconds_remaining': seconds_remaining, + 'expiration_timestamp': earliest_expiration + }) + + +@app.route('/api/search-ip') +def search_ip(): + """Search for an IP address across all firewall rules and EC2 instances with optional text, port, and type filters""" + ip_query = request.args.get('ip', '').strip() + text_filter = request.args.get('text', '').strip() + port_filter = request.args.get('port', '').strip() + resource_type = request.args.get('type', 'all').strip() # all, ec2, sg + + if not ip_query: + return jsonify({'error': 'IP address required', 'results': {'sg_rules': [], 'ec2_instances': []}, 'count': 0}) + + conn = get_db() + sg_results = [] + ec2_results = [] + + try: + # Search for IP in security group rules (if type is 'all' or 'sg') + if resource_type in ['all', 'sg']: + where_clauses = ["r.source LIKE ?"] + params = [f'%{ip_query}%'] + + # Add text filter if provided + if text_filter: + where_clauses.append("(sg.tag_name LIKE ? OR sg.group_name LIKE ?)") + params.extend([f'%{text_filter}%', f'%{text_filter}%']) + + # Add port filter if provided (searches in port_range field) + if port_filter: + where_clauses.append("(r.port_range LIKE ? OR r.port_range = 'All')") + params.append(f'%{port_filter}%') + + where_sql = " AND ".join(where_clauses) + + rules = conn.execute(f""" + SELECT + r.id, + r.group_id, + r.direction, + r.protocol, + r.port_range, + r.source_type, + r.source, + r.description, + sg.group_name, + sg.tag_name, + sg.account_name, + sg.account_id, + sg.tag_wave, + sg.tag_git_repo, + sg.tag_git_org, + sg.tag_git_file + FROM sg_rules r + JOIN security_groups sg ON r.group_id = sg.group_id + WHERE {where_sql} + ORDER BY sg.account_name, sg.group_name, r.direction, r.protocol + LIMIT 1000 + """, params).fetchall() + + for row in rules: + sg_results.append(dict(row)) + + # Search for IP in EC2 instances (if type is 'all' or 'ec2') + if resource_type in ['all', 'ec2']: + where_clauses = ["private_ip_address LIKE ?"] + params = [f'%{ip_query}%'] + + # Add text filter if provided + if text_filter: + where_clauses.append("(tag_name LIKE ? OR instance_id LIKE ?)") + params.extend([f'%{text_filter}%', f'%{text_filter}%']) + + where_sql = " AND ".join(where_clauses) + + instances = conn.execute(f""" + SELECT + instance_id, + tag_name, + state, + private_ip_address, + account_name, + account_id, + security_groups_id_list, + security_groups_name_list, + tag_git_repo, + tag_git_org, + tag_git_file + FROM ec2_instances + WHERE {where_sql} + ORDER BY account_name, tag_name + LIMIT 500 + """, params).fetchall() + + for row in instances: + ec2_results.append(dict(row)) + + except Exception as e: + conn.close() + return jsonify({'error': f'Search error: {str(e)}', 'results': {'sg_rules': [], 'ec2_instances': []}, 'count': 0}) + + conn.close() + total_count = len(sg_results) + len(ec2_results) + return jsonify({ + 'results': { + 'sg_rules': sg_results, + 'ec2_instances': ec2_results + }, + 'count': total_count + }) + + if __name__ == '__main__': # Get debug mode from environment variable debug_mode = os.getenv('DEBUG', 'false').lower() in ('true', '1', 'yes') diff --git a/compose.local.yml b/compose.local.yml new file mode 100644 index 0000000..cd1b04f --- /dev/null +++ b/compose.local.yml @@ -0,0 +1,18 @@ +# Uses a local directory for data storage instead of a named volume. +# Usage: podman-compose -f docker-compose.local.yml up --build + +services: + sgo: + build: . + container_name: sgo + ports: + - "${SGO_PORT:-5000}:5000" + userns_mode: keep-id + volumes: + - ${AWS_CONFIG_PATH:-${HOME}/.aws}:/config/.aws:ro,z + - ${DATA_PATH:-./data}:/app/data + environment: + - DEBUG=${DEBUG:-false} + - FLASK_ENV=${FLASK_ENV:-production} + - PYTHONUNBUFFERED=1 + restart: unless-stopped diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..30899e0 --- /dev/null +++ b/compose.yml @@ -0,0 +1,23 @@ +services: + sgo: + build: . + container_name: sgo + ports: + - "${SGO_PORT:-5000}:5000" + # keep-id maps your host UID into the container — no root, no user switching needed. + # Podman only; remove this line if using Docker. + userns_mode: keep-id + volumes: + # Your AWS credentials, read-only. Set AWS_CONFIG_PATH in .env or shell. + # Defaults to ~/.aws if not set. + - ${AWS_CONFIG_PATH:-${HOME}/.aws}:/config/.aws:ro,z + # Persistent database storage + - sgo-data:/app/data + environment: + - DEBUG=${DEBUG:-false} + - FLASK_ENV=${FLASK_ENV:-production} + - PYTHONUNBUFFERED=1 + restart: unless-stopped + +volumes: + sgo-data: diff --git a/docker-compose.local.yml b/docker-compose.local.yml deleted file mode 100644 index 4bb21f0..0000000 --- a/docker-compose.local.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: '3.8' - -# Alternative compose file using local directory for data storage -# Usage: docker-compose -f docker-compose.local.yml up --build -# or: podman-compose -f docker-compose.local.yml up --build - -services: - sgo: - build: . - container_name: sgo - ports: - - "${SGO_PORT:-5000}:5000" - volumes: - # AWS credentials - mounted to temp location, copied by entrypoint - # IMPORTANT: You must set AWS_CONFIG_PATH in .env file - - ${AWS_CONFIG_PATH}:/tmp/aws-host:ro,z - # Database storage - uses local directory - - ${DATA_PATH:-./data}:/app/data - environment: - # User/Group IDs - set to match your host user for proper permissions - - PUID=${PUID:-1000} - - PGID=${PGID:-1000} - # Debug mode - set to true for Flask debug logging - - DEBUG=${DEBUG:-false} - # Flask environment - - FLASK_ENV=${FLASK_ENV:-production} - - PYTHONUNBUFFERED=1 - restart: unless-stopped diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 6ad0470..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: '3.8' - -services: - sgo: - build: . - container_name: sgo - ports: - - "${SGO_PORT:-5000}:5000" - volumes: - # AWS credentials - mounted to temp location, copied by entrypoint - # IMPORTANT: You must set AWS_CONFIG_PATH in .env file - # Example: AWS_CONFIG_PATH=/home/username/.aws - - ${AWS_CONFIG_PATH}:/tmp/aws-host:ro,z - # Database storage - uses Docker volume by default - # To use local directory instead, comment the volume line and uncomment the bind mount - - sgo-data:/app/data - # - ${DATA_PATH:-./data}:/app/data - environment: - # User/Group IDs - set to match your host user for proper permissions - - PUID=${PUID:-1000} - - PGID=${PGID:-1000} - # Debug mode - set to true for Flask debug logging - - DEBUG=${DEBUG:-false} - # Flask environment - - FLASK_ENV=${FLASK_ENV:-production} - - PYTHONUNBUFFERED=1 - restart: unless-stopped - -volumes: - # Named volume for persistent database storage - # Data persists across container restarts and rebuilds - sgo-data: diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100755 index 510c3f7..0000000 --- a/entrypoint.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -e - -# Default PUID/PGID if not set -PUID=${PUID:-1000} -PGID=${PGID:-1000} - -# Create group if it doesn't exist -if ! getent group sgo >/dev/null 2>&1; then - groupadd -g ${PGID} sgo -fi - -# Create or modify user -if ! id -u sgo >/dev/null 2>&1; then - useradd -u ${PUID} -g ${PGID} -d /home/sgo -m -s /bin/bash sgo -else - # Update existing user - usermod -u ${PUID} sgo 2>/dev/null || true - groupmod -g ${PGID} sgo 2>/dev/null || true -fi - -# Copy AWS credentials from mounted location to user directory -# This ensures proper permissions regardless of host UID/GID -if [ -d "/tmp/aws-host" ]; then - mkdir -p /home/sgo/.aws - cp -r /tmp/aws-host/* /home/sgo/.aws/ 2>/dev/null || true - chmod 700 /home/sgo/.aws - chmod 600 /home/sgo/.aws/* 2>/dev/null || true - chown -R sgo:sgo /home/sgo/.aws -fi - -# Ensure proper ownership of app files and data directory -chown -R sgo:sgo /app - -# Ensure home directory ownership -chown sgo:sgo /home/sgo 2>/dev/null || true - -# Execute the command as the sgo user -exec gosu sgo "$@" diff --git a/import_from_aws.py b/import_from_aws.py index 476f46d..8612f9d 100755 --- a/import_from_aws.py +++ b/import_from_aws.py @@ -65,10 +65,25 @@ def get_session_with_mfa(profile_name): section_name = f'profile {profile_name}' if profile_name != 'default' else 'default' mfa_serial = None + duration_seconds = 3600 # Default to 1 hour if section_name in config: mfa_serial = config[section_name].get('mfa_serial') + # Read duration_seconds from config, default to 3600 (1 hour) + if config.has_option(section_name, 'duration_seconds'): + try: + duration_seconds = int(config[section_name].get('duration_seconds')) + # Validate AWS session duration limits (15 min to 12 hours) + if duration_seconds < 900 or duration_seconds > 43200: + print(f"Warning: duration_seconds {duration_seconds} outside AWS limits (900-43200), using default 3600") + duration_seconds = 3600 + else: + print(f"Using session duration: {duration_seconds} seconds ({duration_seconds/3600:.1f} hours)") + except ValueError: + print("Warning: Invalid duration_seconds in config, using default 3600") + duration_seconds = 3600 + if not mfa_serial: print("\nMFA device ARN not found in config.") print("Enter MFA device ARN (e.g., arn:aws:iam::123456789012:mfa/username):") @@ -82,7 +97,7 @@ def get_session_with_mfa(profile_name): # Get temporary credentials try: response = sts.get_session_token( - DurationSeconds=3600, # 1 hour + DurationSeconds=duration_seconds, SerialNumber=mfa_serial, TokenCode=token_code ) @@ -419,6 +434,18 @@ def import_to_database(db_path, security_groups, ec2_instances, sg_rules=None, a # Import security group rules if sg_rules: + # Deduplicate rules — multiple profiles can resolve to the same account, + # producing duplicate entries for the same group_id + seen = set() + deduped = [] + for rule in sg_rules: + key = (rule['group_id'], rule['direction'], rule['protocol'], + rule['port_range'], rule['source_type'], rule['source']) + if key not in seen: + seen.add(key) + deduped.append(rule) + sg_rules = deduped + print(f"Importing {len(sg_rules)} security group rules...") # If appending, delete existing rules for these security groups to avoid duplicates diff --git a/requirements.txt b/requirements.txt index 42ddcc7..12925c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ Flask==3.0.0 Werkzeug==3.0.1 -boto3==1.34.0 +boto3>=1.34.0 diff --git a/static/css/style.css b/static/css/style.css index 6b3d357..1a0fe1b 100644 --- a/static/css/style.css +++ b/static/css/style.css @@ -659,6 +659,29 @@ h1 { box-shadow: var(--shadow-sm); } +/* IP Search View Toggle - Same styles as view-toggle-btn but separate class */ +.ip-view-toggle-btn { + padding: 0.5rem 1rem; + border: none; + background: transparent; + border-radius: 0.25rem; + cursor: pointer; + font-size: 0.875rem; + font-weight: 500; + color: var(--text-secondary); + transition: all 0.2s; +} + +.ip-view-toggle-btn:hover { + color: var(--primary-color); +} + +.ip-view-toggle-btn.active { + background: var(--card-bg); + color: var(--primary-color); + box-shadow: var(--shadow-sm); +} + /* Table View */ .results-table-view { display: none; diff --git a/templates/import.html b/templates/import.html index b5fbc0e..43a2bc1 100644 --- a/templates/import.html +++ b/templates/import.html @@ -209,7 +209,7 @@
- @@ -321,9 +321,13 @@ ${profile.has_mfa ? ` + pattern="[0-9]*" + onkeydown="if(event.key==='Enter') startProfileImport('${profile.name}')"> ` : ''} - +
+
+ + + +
+
@@ -81,7 +89,7 @@ Case Insensitive @@ -90,6 +98,7 @@ +
@@ -101,6 +110,83 @@
+ +
@@ -127,10 +213,14 @@
diff --git a/wiki/Building-and-Publishing.md b/wiki/Building-and-Publishing.md new file mode 100644 index 0000000..fcd8f27 --- /dev/null +++ b/wiki/Building-and-Publishing.md @@ -0,0 +1,406 @@ +# Building and Publishing to Quay.io + +This guide covers how to build and publish SGO container images to Quay.io. + +## Prerequisites + +1. **Quay.io account**: Create account at https://quay.io +2. **Repository created**: Create a repository (e.g., `yourusername/sgo`) +3. **Docker or Podman installed**: For building and pushing images + +## Manual Build and Push + +### Step 1: Login to Quay.io + +```bash +# Using Docker +docker login quay.io + +# Using Podman +podman login quay.io + +# You'll be prompted for: +# Username: your_quay_username +# Password: your_quay_password (or robot account token) +``` + +### Step 2: Build the Image + +```bash +cd /home/eduardo_figueroa/Dev/sgo + +# Build with Docker +docker build -t quay.io/yourusername/sgo:latest . + +# Build with Podman +podman build -t quay.io/yourusername/sgo:latest . +``` + +### Step 3: Tag for Version (Optional) + +```bash +# Tag with version number +docker tag quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:v1.0.0 + +# Tag with git commit +docker tag quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:$(git rev-parse --short HEAD) +``` + +### Step 4: Push to Quay.io + +```bash +# Push latest tag +docker push quay.io/yourusername/sgo:latest + +# Push version tag +docker push quay.io/yourusername/sgo:v1.0.0 + +# Push all tags +docker push quay.io/yourusername/sgo --all-tags +``` + +## Using Robot Accounts (Recommended for CI/CD) + +Robot accounts provide scoped credentials for automation. + +### Create Robot Account + +1. Go to your repository on Quay.io +2. Click **Settings** → **Robot Accounts** +3. Click **Create Robot Account** +4. Name it (e.g., `sgo_builder`) +5. Grant **Write** permissions +6. Save the credentials (username and token) + +### Login with Robot Account + +```bash +# Format: quay.io+username+robotname +docker login quay.io +Username: yourusername+sgo_builder +Password: +``` + +## Automated Build with Woodpecker CI + +### Step 1: Add Secrets to Woodpecker + +Add these secrets to your Woodpecker CI configuration: + +- `QUAY_USERNAME` - Your quay.io username or robot account +- `QUAY_PASSWORD` - Your quay.io password or robot token + +### Step 2: Create Woodpecker Pipeline + +Create `.woodpecker/docker-publish.yml`: + +```yaml +pipeline: + docker-build-and-push: + image: docker:dind + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - QUAY_USERNAME + - QUAY_PASSWORD + commands: + # Login to Quay.io + - echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + + # Build image + - docker build -t quay.io/yourusername/sgo:latest . + - docker tag quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:${CI_COMMIT_SHA} + + # Push images + - docker push quay.io/yourusername/sgo:latest + - docker push quay.io/yourusername/sgo:${CI_COMMIT_SHA} + +when: + branch: + - main + event: + - push + - tag +``` + +### Step 3: Update Main Woodpecker Config + +Add to `.woodpecker.yml`: + +```yaml +when: + event: [push, pull_request, tag] + +pipeline: + dependencies: + image: python:3.11-slim + commands: + - pip install -r requirements.txt + + syntax-check: + image: python:3.11-slim + commands: + - python -m py_compile app.py + - python -m py_compile import_from_aws.py + - python -m py_compile import_data.py + + docker-build: + image: docker:dind + volumes: + - /var/run/docker.sock:/var/run/docker.sock + commands: + - docker build -t sgo:${CI_COMMIT_SHA} . + + security-scan: + image: python:3.11-slim + commands: + - pip install bandit safety + - bandit -r . -ll || true + - safety check --file requirements.txt || true + + # Only push on main branch + publish-to-quay: + image: docker:dind + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: [quay_username, quay_password] + commands: + - echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + - docker build -t quay.io/yourusername/sgo:latest . + - docker tag quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:${CI_COMMIT_SHA} + - docker push quay.io/yourusername/sgo:latest + - docker push quay.io/yourusername/sgo:${CI_COMMIT_SHA} + when: + branch: main + event: push +``` + +## Multi-Architecture Builds + +Build for multiple architectures (amd64, arm64): + +### Using Docker Buildx + +```bash +# Create builder +docker buildx create --name multiarch --use + +# Build and push multi-arch image +docker buildx build \ + --platform linux/amd64,linux/arm64 \ + -t quay.io/yourusername/sgo:latest \ + --push \ + . +``` + +### Using Podman + +```bash +# Build for amd64 +podman build --platform linux/amd64 -t quay.io/yourusername/sgo:latest-amd64 . + +# Build for arm64 +podman build --platform linux/arm64 -t quay.io/yourusername/sgo:latest-arm64 . + +# Create and push manifest +podman manifest create quay.io/yourusername/sgo:latest +podman manifest add quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:latest-amd64 +podman manifest add quay.io/yourusername/sgo:latest quay.io/yourusername/sgo:latest-arm64 +podman manifest push quay.io/yourusername/sgo:latest +``` + +## Tagging Strategy + +### Recommended Tags + +```bash +# Latest - always points to newest build +quay.io/yourusername/sgo:latest + +# Version tags - for releases +quay.io/yourusername/sgo:v1.0.0 +quay.io/yourusername/sgo:v1.0 +quay.io/yourusername/sgo:v1 + +# Git commit SHA - for specific commits +quay.io/yourusername/sgo:abc1234 + +# Branch name - for development branches +quay.io/yourusername/sgo:dev +quay.io/yourusername/sgo:feature-xyz +``` + +### Applying Multiple Tags + +```bash +IMAGE_NAME="quay.io/yourusername/sgo" +VERSION="v1.0.0" +COMMIT=$(git rev-parse --short HEAD) + +# Build once +docker build -t ${IMAGE_NAME}:${VERSION} . + +# Apply multiple tags +docker tag ${IMAGE_NAME}:${VERSION} ${IMAGE_NAME}:latest +docker tag ${IMAGE_NAME}:${VERSION} ${IMAGE_NAME}:${COMMIT} + +# Push all tags +docker push ${IMAGE_NAME}:${VERSION} +docker push ${IMAGE_NAME}:latest +docker push ${IMAGE_NAME}:${COMMIT} +``` + +## Verifying the Published Image + +### Pull and Test + +```bash +# Pull the image +docker pull quay.io/yourusername/sgo:latest + +# Test it works +docker run --rm \ + -e AWS_CONFIG_PATH=/tmp/aws-host \ + -e PUID=1000 \ + -e PGID=1000 \ + -v $HOME/.aws:/tmp/aws-host:ro \ + -v sgo-data:/app/data \ + -p 5000:5000 \ + quay.io/yourusername/sgo:latest +``` + +### Check Image Details + +```bash +# Inspect image +docker inspect quay.io/yourusername/sgo:latest + +# Check image size +docker images quay.io/yourusername/sgo + +# View image history +docker history quay.io/yourusername/sgo:latest +``` + +## Making Repository Public + +By default, Quay.io repositories are private. + +To make public: +1. Go to repository on Quay.io +2. Click **Settings** +3. Change **Repository Visibility** to **Public** +4. Click **Save** + +## Updating docker-compose.yml for Quay.io + +Users can pull from Quay.io by updating their `docker-compose.yml`: + +```yaml +services: + sgo: + image: quay.io/yourusername/sgo:latest + # rest of configuration... +``` + +Or pull specific version: + +```yaml +services: + sgo: + image: quay.io/yourusername/sgo:v1.0.0 + # rest of configuration... +``` + +## Troubleshooting + +### Authentication Failed + +```bash +# Clear old credentials +rm ~/.docker/config.json + +# Login again +docker login quay.io +``` + +### Push Denied + +- Verify repository exists on Quay.io +- Check robot account has **Write** permissions +- Ensure you're logged in to correct account + +### Build Fails + +```bash +# Check Dockerfile syntax +docker build --no-cache -t test . + +# View build logs +docker build -t test . 2>&1 | tee build.log +``` + +### Image Too Large + +```bash +# Check image size +docker images quay.io/yourusername/sgo + +# Use .dockerignore to exclude files +# Already configured in project + +# Use multi-stage builds (if applicable) +# Current Dockerfile is already optimized +``` + +## Complete Build Script + +Create `scripts/build-and-push.sh`: + +```bash +#!/bin/bash +set -e + +# Configuration +REGISTRY="quay.io" +USERNAME="yourusername" +REPO="sgo" +IMAGE="${REGISTRY}/${USERNAME}/${REPO}" + +# Get version from git tag or use dev +VERSION=$(git describe --tags --abbrev=0 2>/dev/null || echo "dev") +COMMIT=$(git rev-parse --short HEAD) + +echo "Building ${IMAGE}:${VERSION}" + +# Build image +docker build -t ${IMAGE}:${VERSION} . + +# Tag with multiple tags +docker tag ${IMAGE}:${VERSION} ${IMAGE}:latest +docker tag ${IMAGE}:${VERSION} ${IMAGE}:${COMMIT} + +echo "Pushing to ${REGISTRY}" + +# Push all tags +docker push ${IMAGE}:${VERSION} +docker push ${IMAGE}:latest +docker push ${IMAGE}:${COMMIT} + +echo "Successfully pushed:" +echo " - ${IMAGE}:${VERSION}" +echo " - ${IMAGE}:latest" +echo " - ${IMAGE}:${COMMIT}" +``` + +Make it executable: + +```bash +chmod +x scripts/build-and-push.sh +``` + +Run it: + +```bash +./scripts/build-and-push.sh +```