diff --git a/.github/workflows/pullpreview-helm.yml b/.github/workflows/pullpreview-helm.yml
new file mode 100644
index 0000000..4f59295
--- /dev/null
+++ b/.github/workflows/pullpreview-helm.yml
@@ -0,0 +1,149 @@
+name: pullpreview_helm
+
+on:
+ schedule:
+ - cron: "15 4 * * *"
+ pull_request:
+ types: [labeled, unlabeled, synchronize, closed, reopened, opened]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ deploy_smoke_1:
+ runs-on: ubuntu-slim
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview-helm' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview-helm'))
+ outputs:
+ live: ${{ steps.pullpreview.outputs.live }}
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Deploy Helm smoke app (v1)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-helm
+ admins: "@collaborators/push"
+ app_path: .
+ provider: hetzner
+ region: ash
+ image: ubuntu-24.04
+ dns: rev2.click
+ instance_type: cpx21
+ max_domain_length: 40
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ chart_set: service.type=ClusterIP
+ proxy_tls: '{{ release_name }}-wordpress:80'
+ ttl: 1h
+ env:
+ HCLOUD_TOKEN: "${{ secrets.HCLOUD_TOKEN }}"
+ HETZNER_CA_KEY: "${{ secrets.HETZNER_CA_KEY }}"
+
+ - name: Assert deploy v1
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ status_code=""
+ body=""
+ for attempt in $(seq 1 60); do
+ status_code="$(curl -fsSIL -o /dev/null -w '%{http_code}' --max-time 20 "${PREVIEW_URL}" || true)"
+ body="$(curl -fsSL --max-time 20 "${PREVIEW_URL}" || true)"
+ if [[ "${status_code}" == "200" ]] && \
+ grep -Eqi 'wp-content|wp-includes|User's Blog' <<<"${body}"; then
+ echo "Helm smoke v1 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/60: waiting for Helm v1 response from ${PREVIEW_URL} (http_status=${status_code:-n/a})"
+ sleep 5
+ done
+
+ echo "::error::Unexpected Helm response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
+
+ deploy_smoke_2:
+ runs-on: ubuntu-slim
+ needs: deploy_smoke_1
+ if: needs.deploy_smoke_1.result == 'success' && needs.deploy_smoke_1.outputs.live == 'true'
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Redeploy Helm smoke app (v2)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-helm
+ admins: "@collaborators/push"
+ app_path: .
+ provider: hetzner
+ region: ash
+ image: ubuntu-24.04
+ dns: rev2.click
+ instance_type: cpx21
+ max_domain_length: 40
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ chart_set: service.type=ClusterIP
+ proxy_tls: '{{ release_name }}-wordpress:80'
+ ttl: 1h
+ env:
+ HCLOUD_TOKEN: "${{ secrets.HCLOUD_TOKEN }}"
+ HETZNER_CA_KEY: "${{ secrets.HETZNER_CA_KEY }}"
+
+ - name: Assert deploy v2
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ status_code=""
+ body=""
+ for attempt in $(seq 1 60); do
+ status_code="$(curl -fsSIL -o /dev/null -w '%{http_code}' --max-time 20 "${PREVIEW_URL}" || true)"
+ body="$(curl -fsSL --max-time 20 "${PREVIEW_URL}" || true)"
+ if [[ "${status_code}" == "200" ]] && \
+ grep -Eqi 'wp-content|wp-includes|User's Blog' <<<"${body}"; then
+ echo "Helm smoke v2 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/60: waiting for Helm v2 response from ${PREVIEW_URL} (http_status=${status_code:-n/a})"
+ sleep 5
+ done
+
+ echo "::error::Unexpected Helm response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
diff --git a/.github/workflows/pullpreview-lightsail-helm.yml b/.github/workflows/pullpreview-lightsail-helm.yml
new file mode 100644
index 0000000..4aa0811
--- /dev/null
+++ b/.github/workflows/pullpreview-lightsail-helm.yml
@@ -0,0 +1,147 @@
+name: pullpreview_lightsail_helm
+
+on:
+ schedule:
+ - cron: "30 6 * * *"
+ pull_request:
+ types: [labeled, unlabeled, synchronize, closed, reopened, opened]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ deploy_smoke_1:
+ runs-on: ubuntu-slim
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview-lightsail-helm' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview-lightsail-helm'))
+ outputs:
+ live: ${{ steps.pullpreview.outputs.live }}
+ timeout-minutes: 50
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Deploy Lightsail Helm smoke app (v1)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-lightsail-helm
+ admins: "@collaborators/push"
+ app_path: .
+ provider: lightsail
+ region: us-east-1
+ dns: rev3.click
+ instance_type: medium
+ max_domain_length: 40
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ chart_set: service.type=ClusterIP
+ proxy_tls: '{{ release_name }}-wordpress:80'
+ ttl: 1h
+ env:
+ AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}"
+ AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}"
+
+ - name: Assert deploy v1
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ status_code=""
+ body=""
+ for attempt in $(seq 1 60); do
+ status_code="$(curl -fsSIL -o /dev/null -w '%{http_code}' --max-time 20 "${PREVIEW_URL}" || true)"
+ body="$(curl -fsSL --max-time 20 "${PREVIEW_URL}" || true)"
+ if [[ "${status_code}" == "200" ]] && \
+ grep -Eqi 'wp-content|wp-includes|User's Blog' <<<"${body}"; then
+ echo "Lightsail Helm smoke v1 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/60: waiting for Lightsail Helm v1 response from ${PREVIEW_URL} (http_status=${status_code:-n/a})"
+ sleep 5
+ done
+
+ echo "::error::Unexpected Lightsail Helm response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
+
+ deploy_smoke_2:
+ runs-on: ubuntu-slim
+ needs: deploy_smoke_1
+ if: needs.deploy_smoke_1.result == 'success' && needs.deploy_smoke_1.outputs.live == 'true'
+ timeout-minutes: 50
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Redeploy Lightsail Helm smoke app (v2)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-lightsail-helm
+ admins: "@collaborators/push"
+ app_path: .
+ provider: lightsail
+ region: us-east-1
+ dns: rev3.click
+ instance_type: medium
+ max_domain_length: 40
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ chart_set: service.type=ClusterIP
+ proxy_tls: '{{ release_name }}-wordpress:80'
+ ttl: 1h
+ env:
+ AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}"
+ AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}"
+
+ - name: Assert deploy v2
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ status_code=""
+ body=""
+ for attempt in $(seq 1 60); do
+ status_code="$(curl -fsSIL -o /dev/null -w '%{http_code}' --max-time 20 "${PREVIEW_URL}" || true)"
+ body="$(curl -fsSL --max-time 20 "${PREVIEW_URL}" || true)"
+ if [[ "${status_code}" == "200" ]] && \
+ grep -Eqi 'wp-content|wp-includes|User's Blog' <<<"${body}"; then
+ echo "Lightsail Helm smoke v2 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/60: waiting for Lightsail Helm v2 response from ${PREVIEW_URL} (http_status=${status_code:-n/a})"
+ sleep 5
+ done
+
+ echo "::error::Unexpected Lightsail Helm response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
diff --git a/.github/workflows/pullpreview-multi-env.yml b/.github/workflows/pullpreview-multi-env.yml
index a3076a1..3eb3241 100644
--- a/.github/workflows/pullpreview-multi-env.yml
+++ b/.github/workflows/pullpreview-multi-env.yml
@@ -8,7 +8,14 @@ on:
jobs:
deploy_env1:
runs-on: ubuntu-slim
- if: github.event_name == 'schedule' || github.event.label.name == 'pullpreview-multi-env' || contains(github.event.pull_request.labels.*.name, 'pullpreview-multi-env')
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview-multi-env' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview-multi-env'))
timeout-minutes: 30
steps:
- uses: actions/checkout@v5
@@ -26,7 +33,14 @@ jobs:
deploy_env2:
runs-on: ubuntu-slim
- if: github.event_name == 'schedule' || github.event.label.name == 'pullpreview-multi-env' || contains(github.event.pull_request.labels.*.name, 'pullpreview-multi-env')
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview-multi-env' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview-multi-env'))
timeout-minutes: 30
steps:
- uses: actions/checkout@v5
diff --git a/.github/workflows/pullpreview-openproject.yml b/.github/workflows/pullpreview-openproject.yml
new file mode 100644
index 0000000..bf366c7
--- /dev/null
+++ b/.github/workflows/pullpreview-openproject.yml
@@ -0,0 +1,143 @@
+name: pullpreview_openproject
+
+on:
+ schedule:
+ - cron: "45 5 * * *"
+ pull_request:
+ types: [labeled, unlabeled, synchronize, closed, reopened, opened]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ deploy_smoke_1:
+ runs-on: ubuntu-slim
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview-openproject' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview-openproject'))
+ outputs:
+ live: ${{ steps.pullpreview.outputs.live }}
+ timeout-minutes: 55
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Deploy OpenProject Helm smoke app (v1)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-openproject
+ admins: "@collaborators/push"
+ app_path: .
+ provider: hetzner
+ region: ash
+ image: ubuntu-24.04
+ dns: rev2.click
+ instance_type: cpx21
+ max_domain_length: 40
+ deployment_target: helm
+ chart: openproject
+ chart_repository: https://charts.openproject.org
+ chart_set: 'develop=true,openproject.https=true,service.type=ClusterIP,openproject.host={{ pullpreview_public_dns }},resources.limits.cpu=2,persistence.accessModes[0]=ReadWriteOnce'
+ proxy_tls: '{{ release_name }}-openproject:8080'
+ ttl: 1h
+ env:
+ HCLOUD_TOKEN: "${{ secrets.HCLOUD_TOKEN }}"
+ HETZNER_CA_KEY: "${{ secrets.HETZNER_CA_KEY }}"
+
+ - name: Assert deploy v1
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ body=""
+ for attempt in $(seq 1 90); do
+ body="$(curl -fsSL --max-time 30 "${PREVIEW_URL}" || true)"
+ if grep -Eqi 'OpenProject|Sign in' <<<"${body}"; then
+ echo "OpenProject smoke v1 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/90: waiting for OpenProject v1 response from ${PREVIEW_URL}"
+ sleep 10
+ done
+
+ echo "::error::Unexpected OpenProject response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
+
+ deploy_smoke_2:
+ runs-on: ubuntu-slim
+ needs: deploy_smoke_1
+ if: needs.deploy_smoke_1.result == 'success' && needs.deploy_smoke_1.outputs.live == 'true'
+ timeout-minutes: 55
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Redeploy OpenProject Helm smoke app (v2)
+ id: pullpreview
+ uses: "./"
+ with:
+ label: pullpreview-openproject
+ admins: "@collaborators/push"
+ app_path: .
+ provider: hetzner
+ region: ash
+ image: ubuntu-24.04
+ dns: rev2.click
+ instance_type: cpx21
+ max_domain_length: 40
+ deployment_target: helm
+ chart: openproject
+ chart_repository: https://charts.openproject.org
+ chart_set: 'develop=true,openproject.https=true,service.type=ClusterIP,openproject.host={{ pullpreview_public_dns }},resources.limits.cpu=2,persistence.accessModes[0]=ReadWriteOnce'
+ proxy_tls: '{{ release_name }}-openproject:8080'
+ ttl: 1h
+ env:
+ HCLOUD_TOKEN: "${{ secrets.HCLOUD_TOKEN }}"
+ HETZNER_CA_KEY: "${{ secrets.HETZNER_CA_KEY }}"
+
+ - name: Assert deploy v2
+ if: steps.pullpreview.outputs.live == 'true'
+ shell: bash
+ env:
+ PREVIEW_URL: ${{ steps.pullpreview.outputs.url }}
+ run: |
+ set -euo pipefail
+
+ if [[ "${PREVIEW_URL}" != https://* ]]; then
+ echo "::error::Expected https preview URL when proxy_tls is enabled, got ${PREVIEW_URL}"
+ exit 1
+ fi
+
+ body=""
+ for attempt in $(seq 1 90); do
+ body="$(curl -fsSL --max-time 30 "${PREVIEW_URL}" || true)"
+ if grep -Eqi 'OpenProject|Sign in' <<<"${body}"; then
+ echo "OpenProject smoke v2 checks passed for ${PREVIEW_URL}"
+ exit 0
+ fi
+
+ echo "Attempt ${attempt}/90: waiting for OpenProject v2 response from ${PREVIEW_URL}"
+ sleep 10
+ done
+
+ echo "::error::Unexpected OpenProject response from ${PREVIEW_URL}"
+ printf '%s\n' "${body}"
+ exit 1
diff --git a/.github/workflows/pullpreview.yml b/.github/workflows/pullpreview.yml
index 49b59ea..846d6a3 100644
--- a/.github/workflows/pullpreview.yml
+++ b/.github/workflows/pullpreview.yml
@@ -5,7 +5,8 @@ on:
pull_request:
types: [labeled, unlabeled, synchronize, closed, reopened, opened]
-concurrency: ${{ github.ref }}
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
permissions:
contents: read # to fetch code (actions/checkout)
@@ -14,7 +15,14 @@ permissions:
jobs:
deploy_smoke_1:
runs-on: ubuntu-slim
- if: github.event_name == 'schedule' || github.event.label.name == 'pullpreview' || contains(github.event.pull_request.labels.*.name, 'pullpreview')
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview'))
outputs:
live: ${{ steps.pullpreview.outputs.live }}
timeout-minutes: 35
@@ -55,9 +63,9 @@ jobs:
response=""
for attempt in $(seq 1 60); do
response="$(curl -fsSL --max-time 15 "${PREVIEW_URL}" || true)"
- if printf '%s' "${response}" | grep -q 'Hello World Deploy 1' && \
- printf '%s' "${response}" | grep -q 'seed_count=1' && \
- printf '%s' "${response}" | grep -q 'seed_label=persisted'; then
+ if grep -q 'Hello World Deploy 1' <<<"${response}" && \
+ grep -q 'seed_count=1' <<<"${response}" && \
+ grep -q 'seed_label=persisted' <<<"${response}"; then
echo "Smoke v1 checks passed for ${PREVIEW_URL}"
exit 0
fi
@@ -121,9 +129,9 @@ jobs:
response=""
for attempt in $(seq 1 60); do
response="$(curl -fsSL --max-time 15 "${PREVIEW_URL}" || true)"
- if printf '%s' "${response}" | grep -q 'Hello World Deploy 2' && \
- printf '%s' "${response}" | grep -q 'seed_count=1' && \
- printf '%s' "${response}" | grep -q 'seed_label=persisted'; then
+ if grep -q 'Hello World Deploy 2' <<<"${response}" && \
+ grep -q 'seed_count=1' <<<"${response}" && \
+ grep -q 'seed_label=persisted' <<<"${response}"; then
echo "Smoke v2 checks passed for ${PREVIEW_URL}"
exit 0
fi
@@ -138,7 +146,14 @@ jobs:
deploy_smoke_hetzner:
runs-on: ubuntu-slim
- if: github.event_name == 'schedule' || github.event.label.name == 'pullpreview' || contains(github.event.pull_request.labels.*.name, 'pullpreview')
+ if: >-
+ github.event_name == 'schedule' ||
+ github.event.label.name == 'pullpreview' ||
+ ((github.event.action == 'opened' ||
+ github.event.action == 'reopened' ||
+ github.event.action == 'synchronize' ||
+ github.event.action == 'closed') &&
+ contains(github.event.pull_request.labels.*.name, 'pullpreview'))
timeout-minutes: 35
steps:
- uses: actions/checkout@v6
diff --git a/README.md b/README.md
index a478473..bb8a27b 100644
--- a/README.md
+++ b/README.md
@@ -13,8 +13,8 @@ is made to Pull Requests labelled with the `pullpreview` label.
When triggered, it will:
1. Check out the repository code
-2. Provision a preview instance (Lightsail by default, or Hetzner with `provider: hetzner`), with docker and docker-compose set up
-3. Continuously deploy the specified pull requests using your docker-compose file(s)
+2. Provision a preview instance (Lightsail by default, or Hetzner with `provider: hetzner`), with the runtime needed for the selected deployment target
+3. Continuously deploy the specified pull requests using your Docker Compose file(s) or a Helm chart on k3s
4. Report the preview instance URL in the GitHub UI
It is designed to be the **no-nonsense, cheap, and secure** alternative to
@@ -29,7 +29,7 @@ Adding the label triggers the deployment. A PR comment appears immediately with
### Step 2 — Instance is provisioned
-PullPreview creates (or restores) a preview instance and waits for SSH access.
+PullPreview creates a preview instance and waits for SSH access.
@@ -57,7 +57,8 @@ When the label is removed, the preview environment is automatically destroyed.
Preview environments that:
- work with your **existing tooling**: If your app can be started with
- docker-compose, it can be deployed to preview environments with PullPreview.
+ docker-compose or packaged as a Helm chart, it can be deployed to preview
+ environments with PullPreview.
- can be **started and destroyed easily**: You can manage preview environments
by adding or removing the `pullpreview` label on your Pull Requests.
@@ -115,15 +116,20 @@ All supported `with:` inputs from `action.yml`:
| `ports` | `80/tcp,443/tcp` | Firewall ports to expose publicly (SSH `22` is always open). |
| `cidrs` | `0.0.0.0/0` | Allowed source CIDR ranges for exposed ports. |
| `default_port` | `80` | Port used to build the preview URL output. |
+| `deployment_target` | `compose` | Deployment target: `compose` or `helm` (`helm` supports Hetzner and Lightsail). |
| `compose_files` | `docker-compose.yml` | Comma-separated Compose files passed to deploy. |
| `compose_options` | `--build` | Additional options appended to `docker compose up`. |
+| `chart` | `""` | Helm chart reference: local path (`./chart` or `../chart`), repo chart name (`wordpress`), or OCI reference (`oci://...`) (`deployment_target: helm`). |
+| `chart_repository` | `""` | Helm repository URL used when `chart` is a repo chart name such as `wordpress`; not used for local paths or OCI refs (`deployment_target: helm`). |
+| `chart_values` | `""` | Comma-separated Helm values files relative to `app_path` (`deployment_target: helm`). |
+| `chart_set` | `""` | Comma-separated Helm `--set` overrides (`deployment_target: helm`). |
| `license` | `""` | PullPreview license key. |
| `instance_type` | `small` | Provider-specific instance size (`small` for Lightsail, `cpx21` for Hetzner). |
| `region` | `` | Optional provider region/datacenter override (`AWS_REGION`/Hetzner location). If empty, provider defaults apply. |
| `image` | `ubuntu-24.04` | Instance image for Hetzner (provider-specific) and ignored for AWS. |
| `deployment_variant` | `""` | Optional short suffix to run multiple preview environments per PR (max 4 chars). |
| `provider` | `lightsail` | Cloud provider (`lightsail`, `hetzner`). |
-| `registries` | `""` | Private registry credentials, e.g. `docker://user:password@ghcr.io`. |
+| `registries` | `""` | Private registry credentials for Compose deployments, e.g. `docker://user:password@ghcr.io`. |
| `proxy_tls` | `""` | Automatic HTTPS forwarding with Caddy + Let's Encrypt (`service:port`, e.g. `web:80`). |
| `pre_script` | `""` | Path to a local shell script (relative to `app_path`) executed inline over SSH before compose deploy (should be self-contained). |
| `ttl` | `infinite` | Maximum deployment lifetime (e.g. `10h`, `5d`, `infinite`). |
@@ -131,10 +137,17 @@ All supported `with:` inputs from `action.yml`:
Notes:
- `proxy_tls` forces URL/output/comment links to HTTPS on port `443`, injects a Caddy proxy service, and suppresses firewall exposure for port `80`. **When using `proxy_tls`, it is strongly recommended to set `dns` to a [custom domain](https://github.com/pullpreview/action/wiki/Using-a-custom-domain) or one of the built-in `revN.click` alternatives** to avoid hitting shared Let's Encrypt rate limits on `my.preview.run`.
+- For `deployment_target: helm`, `proxy_tls` is required and targets the Kubernetes Service behind the PullPreview-managed Caddy gateway (`service:port`, with placeholder support such as `{{ release_name }}` and `{{ namespace }}`).
+- For `deployment_target: helm`, use either a local chart path (`./charts/my-app`), a repo chart name plus `chart_repository` (`chart: wordpress` with `chart_repository: https://charts.bitnami.com/bitnami`), or an OCI reference (`oci://...`).
+- For `deployment_target: helm`, PullPreview bootstraps k3s on the preview instance, deploys the chart as a single Helm release in a dedicated namespace, and exposes one HTTPS preview URL through a PullPreview-managed Caddy Deployment.
+- For `deployment_target: helm`, `registries` is not supported yet; use public images or chart-managed pull secrets instead.
- `admins: "@collaborators/push"` uses GitHub API collaborators with push permission (first page, up to 100 users; warning is logged if more exist).
- SSH key fetches are cached between runs in the action cache.
+- For Lightsail, configure `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. Lightsail previews support fresh deploys and same-instance redeploys.
- For Hetzner, configure credentials and defaults via action inputs and environment: `HCLOUD_TOKEN` (required), `HETZNER_CA_KEY` (required), optional `region` and `image` (`region` defaults to `nbg1`, `image` defaults to `ubuntu-24.04`). `instance_type` defaults to `cpx21` when provider is Hetzner.
- `HETZNER_CA_KEY` must be an SSH private key (RSA or Ed25519) for the instance-access CA. PullPreview signs a per-run ephemeral login key with this CA key and uses SSH certificates (`...-cert.pub`) instead of reusing a persistent private key across runs.
+- Scheduled cleanup is scoped by workflow label and repo, and sweeps all deployment variants for that label. Separate labels such as `pullpreview` and `pullpreview-helm` do not clean up each other's instances.
+- Non-default labels also get isolated instance names and preview hostnames, so separate workflows on the same PR do not reuse the wrong runtime bootstrap.
- Generate a CA key once for your repository secret:
```bash
@@ -144,6 +157,22 @@ ssh-keygen -t rsa -b 3072 -m PEM -N "" -f hetzner_ca_key
- **Let's Encrypt rate limits**: Let's Encrypt allows a maximum of [50 certificates per registered domain per week](https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain). If you use `proxy_tls` and hit this limit on the default `my.preview.run` domain, switch to one of the built-in alternatives: `rev1.click`, `rev2.click`, ... `rev9.click`. Set `dns: rev1.click` in your workflow inputs. You can also use a [custom domain](https://github.com/pullpreview/action/wiki/Using-a-custom-domain).
- For local CLI runs, set `HCLOUD_TOKEN` and `HETZNER_CA_KEY` (for example via `.env`) when using `provider: hetzner` to avoid relying on action inputs.
+## Action Outputs (v6)
+
+All supported outputs from `action.yml`:
+
+| Output | Description |
+| --- | --- |
+| `live` | `true` when the current run produced or updated a live preview deployment, otherwise `false`. |
+| `url` | Public preview URL reported in PR comments and step outputs. With `proxy_tls`, this is an HTTPS URL on port `443`. |
+| `host` | Preview instance hostname or IP address. |
+| `username` | SSH username for the preview instance. |
+
+Notes:
+
+- On non-deploying events, such as unrelated PR activity without the configured label, `live` is `false` and the other outputs are omitted.
+- For `deployment_target: helm`, outputs keep the same shape as `compose`: one preview URL, one host, and one SSH username per preview instance.
+
## Example
Workflow file for pullpreview-driven deployments:
@@ -225,6 +254,71 @@ jobs:
```
+## Hetzner Helm example
+
+```yaml
+# .github/workflows/pullpreview-hetzner-helm.yml
+name: PullPreview Helm
+on:
+ pull_request:
+ types: [labeled, unlabeled, synchronize, closed, reopened, opened]
+
+jobs:
+ deploy_hetzner_helm:
+ runs-on: ubuntu-slim
+ if: github.event.label.name == 'pullpreview' || contains(github.event.pull_request.labels.*.name, 'pullpreview')
+ timeout-minutes: 35
+ steps:
+ - uses: actions/checkout@v5
+ - uses: pullpreview/action@v6
+ with:
+ provider: hetzner
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ proxy_tls: "{{ release_name }}-wordpress:80"
+ instance_type: cpx21
+ image: ubuntu-24.04
+ region: nbg1
+ dns: rev2.click
+ env:
+ HCLOUD_TOKEN: "${{ secrets.HCLOUD_TOKEN }}"
+ HETZNER_CA_KEY: "${{ secrets.HETZNER_CA_KEY }}"
+```
+
+## Lightsail Helm example
+
+```yaml
+# .github/workflows/pullpreview-lightsail-helm.yml
+name: PullPreview Lightsail Helm
+on:
+ pull_request:
+ types: [labeled, unlabeled, synchronize, closed, reopened, opened]
+
+jobs:
+ deploy_lightsail_helm:
+ runs-on: ubuntu-slim
+ if: github.event.label.name == 'pullpreview-lightsail-helm' || contains(github.event.pull_request.labels.*.name, 'pullpreview-lightsail-helm')
+ timeout-minutes: 45
+ steps:
+ - uses: actions/checkout@v6
+ - uses: pullpreview/action@v6
+ with:
+ label: pullpreview-lightsail-helm
+ provider: lightsail
+ region: us-east-1
+ deployment_target: helm
+ chart: wordpress
+ chart_repository: https://charts.bitnami.com/bitnami
+ chart_set: service.type=ClusterIP
+ proxy_tls: "{{ release_name }}-wordpress:80"
+ instance_type: medium
+ dns: rev3.click
+ env:
+ AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}"
+ AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}"
+```
+
## CLI usage (installed binary)
Pull the released CLI binary from GitHub Releases, install it in your PATH, then use:
diff --git a/action.yml b/action.yml
index 2d24ce7..56884cc 100644
--- a/action.yml
+++ b/action.yml
@@ -1,5 +1,5 @@
name: "Preview Environments for GitHub"
-description: "Ephemeral preview deployments of your app for every pull request. Just using Docker Compose."
+description: "Ephemeral preview deployments of your app for every pull request."
author: "Cyril Rohr"
branding:
icon: anchor
@@ -45,6 +45,26 @@ inputs:
description: "Additional options to pass to docker-compose up, comma-separated"
required: false
default: "--build"
+ deployment_target:
+ description: "Deployment target to use: compose or helm (helm supports Hetzner and Lightsail)"
+ required: false
+ default: "compose"
+ chart:
+ description: "Helm chart reference: local path, repo chart name, or OCI reference (helm target only)"
+ required: false
+ default: ""
+ chart_repository:
+ description: "Helm repository URL used when chart is a repo chart name (helm target only)"
+ required: false
+ default: ""
+ chart_values:
+ description: "Helm values files relative to app_path, comma-separated (helm target only)"
+ required: false
+ default: ""
+ chart_set:
+ description: "Additional Helm --set overrides, comma-separated (helm target only)"
+ required: false
+ default: ""
license:
description: "PullPreview license"
required: false
@@ -70,7 +90,7 @@ inputs:
required: false
default: "lightsail"
registries:
- description: "Names of private registries to authenticate against. E.g. docker://username:password@ghcr.io"
+ description: "Names of private registries to authenticate against for compose deployments. E.g. docker://username:password@ghcr.io"
required: false
default: ""
proxy_tls:
@@ -155,8 +175,13 @@ runs:
"${binary}" github-sync "${app_path}" \
--admins "${{ inputs.admins }}" \
--cidrs "${{ inputs.cidrs }}" \
+ --deployment-target "${{ inputs.deployment_target }}" \
--compose-files "${{ inputs.compose_files }}" \
--compose-options "${{ inputs.compose_options }}" \
+ --chart "${{ inputs.chart }}" \
+ --chart-repository "${{ inputs.chart_repository }}" \
+ --chart-values "${{ inputs.chart_values }}" \
+ --chart-set "${{ inputs.chart_set }}" \
--dns "${{ inputs.dns }}" \
--label "${{ inputs.label }}" \
--ports "${{ inputs.ports }}" \
diff --git a/cmd/pullpreview/main.go b/cmd/pullpreview/main.go
index 34241c3..57c5221 100644
--- a/cmd/pullpreview/main.go
+++ b/cmd/pullpreview/main.go
@@ -95,6 +95,7 @@ func runDown(ctx context.Context, args []string, logger *pullpreview.Logger) {
fs := flag.NewFlagSet("down", flag.ExitOnError)
verbose := fs.Bool("verbose", false, "Enable verbose mode")
name := fs.String("name", "", "Name of the environment to destroy")
+ providerName := fs.String("provider", "", "Cloud provider to use")
fs.Parse(args)
if *verbose {
logger.SetLevel(pullpreview.LevelDebug)
@@ -103,7 +104,7 @@ func runDown(ctx context.Context, args []string, logger *pullpreview.Logger) {
fmt.Println("Usage: pullpreview down --name ")
os.Exit(1)
}
- provider := mustProvider(ctx, logger, pullpreview.CommonOptions{})
+ provider := mustProvider(ctx, logger, pullpreview.CommonOptions{ProviderName: *providerName})
if err := pullpreview.RunDown(pullpreview.DownOptions{Name: *name}, provider, logger); err != nil {
fmt.Println("Error:", err)
os.Exit(1)
@@ -151,6 +152,7 @@ func runList(ctx context.Context, args []string, logger *pullpreview.Logger) {
verbose := fs.Bool("verbose", false, "Enable verbose mode")
org := fs.String("org", "", "Restrict to given organization name")
repo := fs.String("repo", "", "Restrict to given repository name")
+ providerName := fs.String("provider", "", "Cloud provider to use")
leadingTarget, parseArgs := splitLeadingPositional(args)
fs.Parse(parseArgs)
if *verbose {
@@ -169,7 +171,7 @@ func runList(ctx context.Context, args []string, logger *pullpreview.Logger) {
*repo = parts[1]
}
}
- provider := mustProvider(ctx, logger, pullpreview.CommonOptions{})
+ provider := mustProvider(ctx, logger, pullpreview.CommonOptions{ProviderName: *providerName})
if err := pullpreview.RunList(pullpreview.ListOptions{Org: *org, Repo: *repo}, provider, logger); err != nil {
fmt.Println("Error:", err)
os.Exit(1)
@@ -177,25 +179,32 @@ func runList(ctx context.Context, args []string, logger *pullpreview.Logger) {
}
type commonFlagValues struct {
- region string
- image string
- admins string
- cidrs string
- registries string
- ports string
- composeFiles string
- composeOptions string
- tags multiValue
- options pullpreview.CommonOptions
+ provider string
+ region string
+ image string
+ admins string
+ cidrs string
+ registries string
+ ports string
+ composeFiles string
+ composeOptions string
+ chart string
+ chartRepository string
+ chartValues string
+ chartSet string
+ tags multiValue
+ options pullpreview.CommonOptions
}
func registerCommonFlags(fs *flag.FlagSet) *commonFlagValues {
values := &commonFlagValues{}
+ fs.StringVar(&values.provider, "provider", "", "Cloud provider to use")
fs.StringVar(&values.region, "region", "", "Provider region to use")
fs.StringVar(&values.image, "image", "", "Provider image to use")
fs.StringVar(&values.admins, "admins", "", "Logins of GitHub users that will have their SSH key installed on the instance")
fs.StringVar(&values.cidrs, "cidrs", "0.0.0.0/0", "CIDRs allowed to connect to the instance")
fs.StringVar(&values.registries, "registries", "", "URIs of docker registries to authenticate against")
+ fs.StringVar((*string)(&values.options.DeploymentTarget), "deployment-target", string(pullpreview.DeploymentTargetCompose), "Deployment target to use: compose or helm")
fs.StringVar(&values.options.ProxyTLS, "proxy-tls", "", "Enable automatic HTTPS proxying with Let's Encrypt (format: service:port, e.g. web:80)")
fs.StringVar(&values.options.DNS, "dns", "my.preview.run", "DNS suffix to use")
fs.StringVar(&values.ports, "ports", "80/tcp,443/tcp", "Ports to open for external access")
@@ -204,21 +213,31 @@ func registerCommonFlags(fs *flag.FlagSet) *commonFlagValues {
fs.Var(&values.tags, "tags", "Tags to add to the instance (key:value), comma-separated")
fs.StringVar(&values.composeFiles, "compose-files", "docker-compose.yml", "Compose files to use")
fs.StringVar(&values.composeOptions, "compose-options", "--build", "Additional options to pass to docker-compose up")
- fs.StringVar(&values.options.PreScript, "pre-script", "", "Path to a bash script to run on the instance before docker compose")
+ fs.StringVar(&values.chart, "chart", "", "Helm chart path, name, or OCI reference")
+ fs.StringVar(&values.chartRepository, "chart-repository", "", "Helm repository URL to use with --chart")
+ fs.StringVar(&values.chartValues, "chart-values", "", "Comma-separated Helm values files relative to app_path")
+ fs.StringVar(&values.chartSet, "chart-set", "", "Comma-separated Helm --set overrides")
+ fs.StringVar(&values.options.PreScript, "pre-script", "", "Path to a bash script to run on the instance before deployment")
return values
}
func (c *commonFlagValues) ToOptions(ctx context.Context) pullpreview.CommonOptions {
opts := c.options
+ opts.ProviderName = strings.TrimSpace(c.provider)
opts.Region = strings.TrimSpace(c.region)
opts.Image = strings.TrimSpace(c.image)
opts.Context = ctx
+ opts.DeploymentTarget = pullpreview.NormalizeDeploymentTarget(string(c.options.DeploymentTarget))
opts.Admins = splitCommaList(c.admins)
opts.CIDRs = splitCommaList(c.cidrs)
opts.Registries = splitCommaList(c.registries)
opts.Ports = splitCommaList(c.ports)
opts.ComposeFiles = splitCommaList(c.composeFiles)
opts.ComposeOptions = splitCommaList(c.composeOptions)
+ opts.Chart = strings.TrimSpace(c.chart)
+ opts.ChartRepository = strings.TrimSpace(c.chartRepository)
+ opts.ChartValues = splitCommaList(c.chartValues)
+ opts.ChartSet = splitCommaList(c.chartSet)
opts.Tags = parseTags(c.tags)
return opts
}
@@ -274,7 +293,10 @@ func splitLeadingPositional(args []string) (string, []string) {
}
func mustProvider(ctx context.Context, logger *pullpreview.Logger, common pullpreview.CommonOptions) pullpreview.Provider {
- providerName := strings.TrimSpace(os.Getenv("PULLPREVIEW_PROVIDER"))
+ providerName := strings.TrimSpace(common.ProviderName)
+ if providerName == "" {
+ providerName = strings.TrimSpace(os.Getenv("PULLPREVIEW_PROVIDER"))
+ }
env := buildProviderEnv(common)
provider, _, err := providers.NewProvider(ctx, providerName, env, logger)
if err != nil {
diff --git a/cmd/pullpreview/main_test.go b/cmd/pullpreview/main_test.go
index c4ad354..28877b8 100644
--- a/cmd/pullpreview/main_test.go
+++ b/cmd/pullpreview/main_test.go
@@ -1,6 +1,12 @@
package main
-import "testing"
+import (
+ "context"
+ "flag"
+ "testing"
+
+ "github.com/pullpreview/action/internal/pullpreview"
+)
func TestDefaultUpNameFromLocalPath(t *testing.T) {
got := defaultUpName("path/to/example-app")
@@ -51,3 +57,73 @@ func TestSplitLeadingPositionalWhenFlagsFirst(t *testing.T) {
t.Fatalf("unexpected remaining args: %#v", rest)
}
}
+
+func TestRegisterCommonFlagsParsesHelmOptions(t *testing.T) {
+ fs := flag.NewFlagSet("up", flag.ContinueOnError)
+ values := registerCommonFlags(fs)
+ if err := fs.Parse([]string{
+ "--provider", "hetzner",
+ "--deployment-target", "helm",
+ "--chart", "wordpress",
+ "--chart-repository", "https://charts.bitnami.com/bitnami",
+ "--chart-values", "values.yaml,values.preview.yaml",
+ "--chart-set", "image.tag=123,ingress.host={{ release_name }}.preview.run",
+ }); err != nil {
+ t.Fatalf("Parse() error: %v", err)
+ }
+
+ opts := values.ToOptions(context.Background())
+ if opts.ProviderName != "hetzner" {
+ t.Fatalf("expected provider name hetzner, got %q", opts.ProviderName)
+ }
+ if opts.DeploymentTarget != pullpreview.DeploymentTargetHelm {
+ t.Fatalf("expected helm deployment target, got %q", opts.DeploymentTarget)
+ }
+ if opts.Chart != "wordpress" {
+ t.Fatalf("unexpected chart: %q", opts.Chart)
+ }
+ if opts.ChartRepository != "https://charts.bitnami.com/bitnami" {
+ t.Fatalf("unexpected chart repository: %q", opts.ChartRepository)
+ }
+ if len(opts.ChartValues) != 2 || opts.ChartValues[0] != "values.yaml" || opts.ChartValues[1] != "values.preview.yaml" {
+ t.Fatalf("unexpected chart values: %#v", opts.ChartValues)
+ }
+ if len(opts.ChartSet) != 2 {
+ t.Fatalf("unexpected chart set values: %#v", opts.ChartSet)
+ }
+}
+
+func TestRegisterCommonFlagsDefaultsToCompose(t *testing.T) {
+ fs := flag.NewFlagSet("up", flag.ContinueOnError)
+ values := registerCommonFlags(fs)
+ if err := fs.Parse(nil); err != nil {
+ t.Fatalf("Parse() error: %v", err)
+ }
+
+ opts := values.ToOptions(context.Background())
+ if opts.DeploymentTarget != pullpreview.DeploymentTargetCompose {
+ t.Fatalf("expected compose deployment target by default, got %q", opts.DeploymentTarget)
+ }
+ if len(opts.ComposeFiles) != 1 || opts.ComposeFiles[0] != "docker-compose.yml" {
+ t.Fatalf("unexpected compose files: %#v", opts.ComposeFiles)
+ }
+ if len(opts.ComposeOptions) != 1 || opts.ComposeOptions[0] != "--build" {
+ t.Fatalf("unexpected compose options: %#v", opts.ComposeOptions)
+ }
+ if len(opts.ChartValues) != 0 || len(opts.ChartSet) != 0 {
+ t.Fatalf("expected empty helm options by default, got values=%#v set=%#v", opts.ChartValues, opts.ChartSet)
+ }
+}
+
+func TestRegisterCommonFlagsNormalizesDeploymentTarget(t *testing.T) {
+ fs := flag.NewFlagSet("up", flag.ContinueOnError)
+ values := registerCommonFlags(fs)
+ if err := fs.Parse([]string{"--deployment-target", "HeLm"}); err != nil {
+ t.Fatalf("Parse() error: %v", err)
+ }
+
+ opts := values.ToOptions(context.Background())
+ if opts.DeploymentTarget != pullpreview.DeploymentTargetHelm {
+ t.Fatalf("expected normalized helm target, got %q", opts.DeploymentTarget)
+ }
+}
diff --git a/dist/pullpreview-linux-amd64 b/dist/pullpreview-linux-amd64
index b80af2b..a178330 100755
Binary files a/dist/pullpreview-linux-amd64 and b/dist/pullpreview-linux-amd64 differ
diff --git a/internal/providers/hetzner/hetzner.go b/internal/providers/hetzner/hetzner.go
index a51c361..e38fe3e 100644
--- a/internal/providers/hetzner/hetzner.go
+++ b/internal/providers/hetzner/hetzner.go
@@ -292,88 +292,29 @@ func (p *Provider) DisplayName() string {
return "Hetzner Cloud"
}
-func (p *Provider) SupportsSnapshots() bool {
- return false
-}
-
-func (p *Provider) SupportsRestore() bool {
- return false
-}
-
func (p *Provider) SupportsFirewall() bool {
return true
}
-func (p *Provider) BuildUserData(options pullpreview.UserDataOptions) (string, error) {
- lines := []string{
- "#!/usr/bin/env bash",
- "set -xe ; set -o pipefail",
- }
- homeDir := pullpreview.HomeDirForUser(options.Username)
- lines = append(lines, fmt.Sprintf("mkdir -p %s/.ssh", homeDir))
- if options.Username != "root" {
- lines = append(lines, "if [ -f /root/.ssh/authorized_keys ]; then")
- lines = append(lines, fmt.Sprintf(" cp /root/.ssh/authorized_keys %s/.ssh/authorized_keys", homeDir))
- lines = append(lines, "fi")
- }
- if len(options.SSHPublicKeys) > 0 {
- lines = append(lines, fmt.Sprintf("echo '%s' >> %s/.ssh/authorized_keys", strings.Join(options.SSHPublicKeys, "\n"), homeDir))
- }
- if options.Username != "root" || len(options.SSHPublicKeys) > 0 {
- lines = append(lines,
- fmt.Sprintf("chown -R %s:%s %s/.ssh", options.Username, options.Username, homeDir),
- fmt.Sprintf("chmod 0700 %s/.ssh && chmod 0600 %s/.ssh/authorized_keys", homeDir, homeDir),
- )
- }
- lines = append(lines,
- fmt.Sprintf("mkdir -p %s && chown -R %s:%s %s", options.AppPath, options.Username, options.Username, options.AppPath),
- "mkdir -p /etc/profile.d",
- fmt.Sprintf("echo 'cd %s' > /etc/profile.d/pullpreview.sh", options.AppPath),
- fmt.Sprintf("IMAGE_NAME=%q", p.image),
- "if command -v apt-get >/dev/null 2>&1; then",
- " mkdir -p /etc/apt/keyrings",
- " install -m 0755 -d /etc/apt/keyrings",
- " apt-get update",
- " apt-get install -y ca-certificates curl gnupg lsb-release",
- " if echo \"$IMAGE_NAME\" | grep -iq ubuntu; then",
- " DISTRO=ubuntu",
- " else",
- " DISTRO=debian",
- " fi",
- " curl -fsSL https://download.docker.com/linux/$DISTRO/gpg -o /etc/apt/keyrings/docker.asc",
- " chmod a+r /etc/apt/keyrings/docker.asc",
- " echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/$DISTRO $(lsb_release -cs) stable\" > /etc/apt/sources.list.d/docker.list",
- " apt-get update",
- " apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
- " systemctl restart docker",
- "elif command -v dnf >/dev/null 2>&1; then",
- " dnf -y install dnf-plugins-core",
- " dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
- " dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
- " systemctl restart docker",
- "elif command -v yum >/dev/null 2>&1; then",
- " yum -y install yum-utils",
- " yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
- " yum -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
- " systemctl restart docker",
- "else",
- " echo \"unsupported OS family; expected apt, dnf, or yum\"",
- " exit 1",
- "fi",
- "mkdir -p /etc/pullpreview && touch /etc/pullpreview/ready",
- fmt.Sprintf("chown -R %s:%s /etc/pullpreview", options.Username, options.Username),
- )
- if strings.TrimSpace(p.caPublicKey) != "" {
- lines = append(lines,
- "mkdir -p /etc/ssh/sshd_config.d",
- fmt.Sprintf("cat <<'EOF' > /etc/ssh/pullpreview-user-ca.pub\n%s\nEOF", p.caPublicKey),
- "cat <<'EOF' > /etc/ssh/sshd_config.d/pullpreview.conf",
- "TrustedUserCAKeys /etc/ssh/pullpreview-user-ca.pub",
- "EOF",
- "systemctl restart ssh || systemctl restart sshd || true",
- )
+func (p *Provider) SupportsDeploymentTarget(target pullpreview.DeploymentTarget) bool {
+ switch pullpreview.NormalizeDeploymentTarget(string(target)) {
+ case pullpreview.DeploymentTargetCompose, pullpreview.DeploymentTargetHelm:
+ return true
+ default:
+ return false
}
- return strings.Join(lines, "\n"), nil
+}
+
+func (p *Provider) BuildUserData(options pullpreview.UserDataOptions) (string, error) {
+ return pullpreview.BuildBootstrapScript(pullpreview.BootstrapOptions{
+ AppPath: options.AppPath,
+ Username: options.Username,
+ SSHPublicKeys: options.SSHPublicKeys,
+ DeploymentTarget: options.DeploymentTarget,
+ ImageName: p.image,
+ TrustedUserCAKey: p.caPublicKey,
+ PropagateRootSSH: true,
+ })
}
func (p *Provider) Launch(name string, opts pullpreview.LaunchOptions) (pullpreview.AccessDetails, error) {
@@ -385,6 +326,15 @@ func (p *Provider) Launch(name string, opts pullpreview.LaunchOptions) (pullprev
if existing == nil {
return p.createServer(name, opts)
}
+ if reason, mismatch := pullpreview.DeploymentIdentityMismatch(labelsOrEmpty(existing.Labels), opts.Tags); mismatch {
+ if p.logger != nil {
+ p.logger.Warnf("Existing Hetzner instance %q has incompatible deployment identity (%s); recreating instance", name, reason)
+ }
+ if err := p.destroyInstanceAndCache(existing, name); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ continue
+ }
if err := p.ensureServerRunning(existing); err != nil {
return pullpreview.AccessDetails{}, err
}
@@ -495,12 +445,15 @@ func (p *Provider) createServer(name string, opts pullpreview.LaunchOptions) (pu
if err := p.validateSSHAccessWithRetry(server, privateKey, "", defaultHetznerSSHRetries); err != nil {
return pullpreview.AccessDetails{}, p.cleanupFailedCreate(name, sshKey, server, err)
}
- if err := p.deleteCloudSSHKeyIfExists(sshKey); err != nil && p.logger != nil {
- p.logger.Warnf("Unable to delete temporary Hetzner SSH key %s: %v", keyName, err)
- }
certPrivateKey, cert, err := p.generateSignedAccessCredentials()
if err != nil {
- return pullpreview.AccessDetails{}, p.cleanupFailedCreate(name, nil, server, err)
+ return pullpreview.AccessDetails{}, p.cleanupFailedCreate(name, sshKey, server, err)
+ }
+ if err := p.validateSSHAccessWithRetry(server, certPrivateKey, cert, defaultHetznerSSHRetries); err != nil {
+ return pullpreview.AccessDetails{}, p.cleanupFailedCreate(name, sshKey, server, err)
+ }
+ if err := p.deleteCloudSSHKeyIfExists(sshKey); err != nil && p.logger != nil {
+ p.logger.Warnf("Unable to delete temporary Hetzner SSH key %s: %v", keyName, err)
}
if p.logger != nil {
p.logger.Infof("Created Hetzner server %s with SSH key %s", server.Name, keyName)
diff --git a/internal/providers/hetzner/hetzner_test.go b/internal/providers/hetzner/hetzner_test.go
index 39a8f06..5bd6b06 100644
--- a/internal/providers/hetzner/hetzner_test.go
+++ b/internal/providers/hetzner/hetzner_test.go
@@ -148,6 +148,24 @@ func TestBuildUserDataBranchesAndPaths(t *testing.T) {
if strings.Contains(debianScript, "authorized_keys") {
t.Fatalf("did not expect authorized_keys setup without keys: %s", debianScript)
}
+
+ helmScript, err := p.BuildUserData(pullpreview.UserDataOptions{
+ AppPath: "/app",
+ Username: "root",
+ DeploymentTarget: pullpreview.DeploymentTargetHelm,
+ })
+ if err != nil {
+ t.Fatalf("BuildUserData() for helm error: %v", err)
+ }
+ if !strings.Contains(helmScript, "INSTALL_K3S_EXEC='server --disable traefik --write-kubeconfig-mode 0644'") {
+ t.Fatalf("expected k3s install command in helm script: %s", helmScript)
+ }
+ if !strings.Contains(helmScript, "get-helm-3") {
+ t.Fatalf("expected helm installer in helm script: %s", helmScript)
+ }
+ if strings.Contains(helmScript, "docker-compose-plugin") {
+ t.Fatalf("did not expect docker compose install in helm script: %s", helmScript)
+ }
}
func TestValidateSSHPrivateKeyFormat(t *testing.T) {
@@ -497,9 +515,13 @@ func TestHetznerLaunchLifecycleRecreateWhenCacheMissing(t *testing.T) {
provider.client = client
originalRunSSHCommand := runSSHCommand
defer func() { runSSHCommand = originalRunSSHCommand }()
+ certChecks := 0
runSSHCommand = func(_ context.Context, _ string, certFile string, _ string, _ string) ([]byte, error) {
if strings.TrimSpace(certFile) != "" {
- return nil, fmt.Errorf("ssh unavailable")
+ certChecks++
+ if certChecks <= defaultHetznerSSHRetries {
+ return nil, fmt.Errorf("ssh unavailable")
+ }
}
return []byte("ok"), nil
}
@@ -559,6 +581,52 @@ func TestHetznerLaunchLifecycleRecreateWhenPublicIPMissing(t *testing.T) {
}
}
+func TestHetznerLaunchLifecycleRecreatesWhenDeploymentIdentityMismatches(t *testing.T) {
+ provider := mustNewProviderWithContext(t, Config{
+ APIToken: "token",
+ Location: defaultHetznerLocation,
+ Image: defaultHetznerImage,
+ SSHUsername: defaultHetznerSSHUser,
+ SSHKeysCacheDir: t.TempDir(),
+ })
+ instance := "gh-1-pr-1"
+ existing := makeTestServer(instance, "198.51.100.1", hcloud.ServerStatusRunning, nil)
+ existing.Labels = map[string]string{
+ "pullpreview_label": "pullpreview-helm",
+ "pullpreview_target": "helm",
+ "pullpreview_runtime": "k3s",
+ }
+ created := makeTestServer(instance, "203.0.113.10", hcloud.ServerStatusRunning, nil)
+ client := &fakeHcloudClient{
+ serverListResponses: [][]*hcloud.Server{{existing}, nil},
+ sshKeyCreateResult: mustTestSSHKey(12),
+ serverCreateResult: hcloud.ServerCreateResult{Server: created},
+ }
+ provider.client = client
+ originalRunSSHCommand := runSSHCommand
+ defer func() { runSSHCommand = originalRunSSHCommand }()
+ runSSHCommand = func(context.Context, string, string, string, string) ([]byte, error) {
+ return []byte("ok"), nil
+ }
+
+ _, err := provider.Launch(instance, pullpreview.LaunchOptions{
+ Tags: map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ })
+ if err != nil {
+ t.Fatalf("Launch() error: %v", err)
+ }
+ if client.serverDeleteCalls != 1 {
+ t.Fatalf("expected one delete call for identity mismatch, got %d", client.serverDeleteCalls)
+ }
+ if client.serverCreateCalls != 1 {
+ t.Fatalf("expected one create call after identity mismatch, got %d", client.serverCreateCalls)
+ }
+}
+
func TestHetznerCreateLifecycleRecreateWhenSSHPrecheckFails(t *testing.T) {
cacheDir := t.TempDir()
provider := mustNewProviderWithContext(t, Config{
diff --git a/internal/providers/lightsail/lightsail.go b/internal/providers/lightsail/lightsail.go
index 40c11f4..92ea0dd 100644
--- a/internal/providers/lightsail/lightsail.go
+++ b/internal/providers/lightsail/lightsail.go
@@ -3,7 +3,6 @@ package lightsail
import (
"context"
"errors"
- "sort"
"strings"
"time"
@@ -25,12 +24,29 @@ var sizeMap = map[string]string{
}
type Provider struct {
- client *ls.Client
+ client lightsailClient
ctx context.Context
region string
logger *pullpreview.Logger
}
+type lightsailClient interface {
+ GetInstanceState(context.Context, *ls.GetInstanceStateInput, ...func(*ls.Options)) (*ls.GetInstanceStateOutput, error)
+ DeleteInstance(context.Context, *ls.DeleteInstanceInput, ...func(*ls.Options)) (*ls.DeleteInstanceOutput, error)
+ CreateInstances(context.Context, *ls.CreateInstancesInput, ...func(*ls.Options)) (*ls.CreateInstancesOutput, error)
+ PutInstancePublicPorts(context.Context, *ls.PutInstancePublicPortsInput, ...func(*ls.Options)) (*ls.PutInstancePublicPortsOutput, error)
+ GetInstanceAccessDetails(context.Context, *ls.GetInstanceAccessDetailsInput, ...func(*ls.Options)) (*ls.GetInstanceAccessDetailsOutput, error)
+ GetInstance(context.Context, *ls.GetInstanceInput, ...func(*ls.Options)) (*ls.GetInstanceOutput, error)
+ GetInstances(context.Context, *ls.GetInstancesInput, ...func(*ls.Options)) (*ls.GetInstancesOutput, error)
+ GetRegions(context.Context, *ls.GetRegionsInput, ...func(*ls.Options)) (*ls.GetRegionsOutput, error)
+ GetBlueprints(context.Context, *ls.GetBlueprintsInput, ...func(*ls.Options)) (*ls.GetBlueprintsOutput, error)
+ GetBundles(context.Context, *ls.GetBundlesInput, ...func(*ls.Options)) (*ls.GetBundlesOutput, error)
+}
+
+type lightsailClientAdapter struct {
+ client *ls.Client
+}
+
func New(ctx context.Context, region string, logger *pullpreview.Logger) (*Provider, error) {
ctx = pullpreview.EnsureContext(ctx)
if region == "" {
@@ -41,13 +57,53 @@ func New(ctx context.Context, region string, logger *pullpreview.Logger) (*Provi
return nil, err
}
return &Provider{
- client: ls.NewFromConfig(cfg),
+ client: lightsailClientAdapter{client: ls.NewFromConfig(cfg)},
ctx: ctx,
region: region,
logger: logger,
}, nil
}
+func (a lightsailClientAdapter) GetInstanceState(ctx context.Context, input *ls.GetInstanceStateInput, optFns ...func(*ls.Options)) (*ls.GetInstanceStateOutput, error) {
+ return a.client.GetInstanceState(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) DeleteInstance(ctx context.Context, input *ls.DeleteInstanceInput, optFns ...func(*ls.Options)) (*ls.DeleteInstanceOutput, error) {
+ return a.client.DeleteInstance(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) CreateInstances(ctx context.Context, input *ls.CreateInstancesInput, optFns ...func(*ls.Options)) (*ls.CreateInstancesOutput, error) {
+ return a.client.CreateInstances(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) PutInstancePublicPorts(ctx context.Context, input *ls.PutInstancePublicPortsInput, optFns ...func(*ls.Options)) (*ls.PutInstancePublicPortsOutput, error) {
+ return a.client.PutInstancePublicPorts(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetInstanceAccessDetails(ctx context.Context, input *ls.GetInstanceAccessDetailsInput, optFns ...func(*ls.Options)) (*ls.GetInstanceAccessDetailsOutput, error) {
+ return a.client.GetInstanceAccessDetails(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetInstance(ctx context.Context, input *ls.GetInstanceInput, optFns ...func(*ls.Options)) (*ls.GetInstanceOutput, error) {
+ return a.client.GetInstance(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetInstances(ctx context.Context, input *ls.GetInstancesInput, optFns ...func(*ls.Options)) (*ls.GetInstancesOutput, error) {
+ return a.client.GetInstances(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetRegions(ctx context.Context, input *ls.GetRegionsInput, optFns ...func(*ls.Options)) (*ls.GetRegionsOutput, error) {
+ return a.client.GetRegions(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetBlueprints(ctx context.Context, input *ls.GetBlueprintsInput, optFns ...func(*ls.Options)) (*ls.GetBlueprintsOutput, error) {
+ return a.client.GetBlueprints(ctx, input, optFns...)
+}
+
+func (a lightsailClientAdapter) GetBundles(ctx context.Context, input *ls.GetBundlesInput, optFns ...func(*ls.Options)) (*ls.GetBundlesOutput, error) {
+ return a.client.GetBundles(ctx, input, optFns...)
+}
+
func (p *Provider) Running(name string) (bool, error) {
resp, err := p.client.GetInstanceState(p.ctx, &ls.GetInstanceStateInput{InstanceName: aws.String(name)})
if err != nil {
@@ -65,27 +121,52 @@ func (p *Provider) Terminate(name string) error {
if err != nil {
return err
}
- if len(resp.Operations) == 0 {
- return nil
- }
- if resp.Operations[0].ErrorCode != nil {
+ if len(resp.Operations) > 0 && resp.Operations[0].ErrorCode != nil {
return errors.New(*resp.Operations[0].ErrorCode)
}
- return nil
+ return p.waitUntilDeleted(name)
}
func (p *Provider) Launch(name string, opts pullpreview.LaunchOptions) (pullpreview.AccessDetails, error) {
- running, err := p.Running(name)
- if err != nil {
- return pullpreview.AccessDetails{}, err
- }
- if !running {
- if err := p.launchOrRestore(name, opts); err != nil {
+ for {
+ existing, err := p.instanceByName(name)
+ if err != nil {
return pullpreview.AccessDetails{}, err
}
- if err := p.waitUntilRunning(name); err != nil {
+ if existing == nil {
+ if err := p.launchInstance(name, opts); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ if err := p.waitUntilRunning(name); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ break
+ }
+ if reason, mismatch := pullpreview.DeploymentIdentityMismatch(tagsToMap(existing.Tags), opts.Tags); mismatch {
+ if p.logger != nil {
+ p.logger.Warnf("Existing Lightsail instance %q has incompatible deployment identity (%s); recreating instance", name, reason)
+ }
+ if err := p.Terminate(name); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ if err := p.waitUntilDeleted(name); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ continue
+ }
+ running, err := p.Running(name)
+ if err != nil {
return pullpreview.AccessDetails{}, err
}
+ if !running {
+ if err := p.launchInstance(name, opts); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ if err := p.waitUntilRunning(name); err != nil {
+ return pullpreview.AccessDetails{}, err
+ }
+ }
+ break
}
if err := p.setupFirewall(name, opts.CIDRs, opts.Ports); err != nil {
return pullpreview.AccessDetails{}, err
@@ -93,7 +174,7 @@ func (p *Provider) Launch(name string, opts pullpreview.LaunchOptions) (pullprev
return p.fetchAccessDetails(name)
}
-func (p *Provider) launchOrRestore(name string, opts pullpreview.LaunchOptions) error {
+func (p *Provider) launchInstance(name string, opts pullpreview.LaunchOptions) error {
bundleID, err := p.bundleID(opts.Size)
if err != nil {
return err
@@ -110,21 +191,8 @@ func (p *Provider) launchOrRestore(name string, opts pullpreview.LaunchOptions)
UserData: aws.String(opts.UserData),
BlueprintId: aws.String(p.blueprintID()),
}
-
- snapshot := p.latestSnapshot(name)
- if snapshot != nil {
- if p.logger != nil {
- p.logger.Infof("Found snapshot to restore from: %s", aws.ToString(snapshot.Name))
- }
- _, err := p.client.CreateInstancesFromSnapshot(p.ctx, &ls.CreateInstancesFromSnapshotInput{
- InstanceNames: []string{name},
- AvailabilityZone: aws.String(zones[0]),
- BundleId: aws.String(bundleID),
- Tags: params.Tags,
- UserData: aws.String(opts.UserData),
- InstanceSnapshotName: snapshot.Name,
- })
- return err
+ if p.logger != nil {
+ p.logger.Infof("Creating fresh Lightsail instance name=%s", name)
}
_, err = p.client.CreateInstances(p.ctx, params)
@@ -156,6 +224,17 @@ func (p *Provider) waitUntilRunning(name string) error {
return nil
}
+func (p *Provider) waitUntilDeleted(name string) error {
+ ok := pullpreview.WaitUntilContext(p.ctx, 30, 5*time.Second, func() bool {
+ inst, err := p.instanceByName(name)
+ return err == nil && inst == nil
+ })
+ if !ok {
+ return errors.New("timeout while waiting for instance deletion")
+ }
+ return nil
+}
+
func (p *Provider) setupFirewall(name string, cidrs, ports []string) error {
portInfos := []types.PortInfo{}
for _, portDef := range ports {
@@ -230,27 +309,19 @@ func (p *Provider) fetchAccessDetails(name string) (pullpreview.AccessDetails, e
}, nil
}
-func (p *Provider) latestSnapshot(name string) *types.InstanceSnapshot {
- resp, err := p.client.GetInstanceSnapshots(p.ctx, &ls.GetInstanceSnapshotsInput{})
+func (p *Provider) instanceByName(name string) (*types.Instance, error) {
+ resp, err := p.client.GetInstance(p.ctx, &ls.GetInstanceInput{InstanceName: aws.String(name)})
if err != nil {
- return nil
- }
- snapshots := resp.InstanceSnapshots
- sort.Slice(snapshots, func(i, j int) bool {
- if snapshots[i].CreatedAt == nil {
- return false
- }
- if snapshots[j].CreatedAt == nil {
- return true
- }
- return snapshots[i].CreatedAt.After(*snapshots[j].CreatedAt)
- })
- for _, snap := range snapshots {
- if snap.State == types.InstanceSnapshotStateAvailable && aws.ToString(snap.FromInstanceName) == name {
- return &snap
+ var nf *types.NotFoundException
+ if errors.As(err, &nf) {
+ return nil, nil
}
+ return nil, err
}
- return nil
+ if resp.Instance == nil {
+ return nil, nil
+ }
+ return resp.Instance, nil
}
func (p *Provider) ListInstances(tags map[string]string) ([]pullpreview.InstanceSummary, error) {
diff --git a/internal/providers/lightsail/lightsail_test.go b/internal/providers/lightsail/lightsail_test.go
index 594f115..fd16032 100644
--- a/internal/providers/lightsail/lightsail_test.go
+++ b/internal/providers/lightsail/lightsail_test.go
@@ -1,11 +1,159 @@
package lightsail
import (
+ "context"
+ "strings"
"testing"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ ls "github.com/aws/aws-sdk-go-v2/service/lightsail"
"github.com/aws/aws-sdk-go-v2/service/lightsail/types"
+ "github.com/pullpreview/action/internal/pullpreview"
)
+type fakeLightsailClient struct {
+ instanceStateOutput *ls.GetInstanceStateOutput
+ instanceStateByName map[string]*ls.GetInstanceStateOutput
+ deleteInstanceCalls int
+ createInstancesCalls int
+ createInstancesInput *ls.CreateInstancesInput
+ putInstancePublicPortsCalls int
+ getInstanceAccessDetailsOutput *ls.GetInstanceAccessDetailsOutput
+ getInstanceOutput *ls.GetInstanceOutput
+ getInstanceByName map[string]*types.Instance
+ getInstancesOutput *ls.GetInstancesOutput
+ getRegionsOutput *ls.GetRegionsOutput
+ getBlueprintsOutput *ls.GetBlueprintsOutput
+ getBundlesOutput *ls.GetBundlesOutput
+}
+
+func (f *fakeLightsailClient) GetInstanceState(ctx context.Context, input *ls.GetInstanceStateInput, optFns ...func(*ls.Options)) (*ls.GetInstanceStateOutput, error) {
+ if f.instanceStateByName != nil {
+ if out, ok := f.instanceStateByName[aws.ToString(input.InstanceName)]; ok {
+ return out, nil
+ }
+ }
+ if f.instanceStateOutput != nil {
+ return f.instanceStateOutput, nil
+ }
+ return &ls.GetInstanceStateOutput{State: &types.InstanceState{Name: aws.String("running")}}, nil
+}
+
+func (f *fakeLightsailClient) DeleteInstance(ctx context.Context, input *ls.DeleteInstanceInput, optFns ...func(*ls.Options)) (*ls.DeleteInstanceOutput, error) {
+ f.deleteInstanceCalls++
+ if f.getInstanceByName != nil {
+ delete(f.getInstanceByName, aws.ToString(input.InstanceName))
+ }
+ return &ls.DeleteInstanceOutput{
+ Operations: []types.Operation{{}},
+ }, nil
+}
+
+func (f *fakeLightsailClient) CreateInstances(ctx context.Context, input *ls.CreateInstancesInput, optFns ...func(*ls.Options)) (*ls.CreateInstancesOutput, error) {
+ f.createInstancesCalls++
+ f.createInstancesInput = input
+ name := input.InstanceNames[0]
+ if f.getInstanceByName == nil {
+ f.getInstanceByName = map[string]*types.Instance{}
+ }
+ f.getInstanceByName[name] = &types.Instance{
+ Name: aws.String(name),
+ Tags: input.Tags,
+ }
+ if f.instanceStateByName == nil {
+ f.instanceStateByName = map[string]*ls.GetInstanceStateOutput{}
+ }
+ f.instanceStateByName[name] = &ls.GetInstanceStateOutput{State: &types.InstanceState{Name: aws.String("running")}}
+ return &ls.CreateInstancesOutput{}, nil
+}
+
+func (f *fakeLightsailClient) PutInstancePublicPorts(ctx context.Context, input *ls.PutInstancePublicPortsInput, optFns ...func(*ls.Options)) (*ls.PutInstancePublicPortsOutput, error) {
+ f.putInstancePublicPortsCalls++
+ return &ls.PutInstancePublicPortsOutput{}, nil
+}
+
+func (f *fakeLightsailClient) GetInstanceAccessDetails(ctx context.Context, input *ls.GetInstanceAccessDetailsInput, optFns ...func(*ls.Options)) (*ls.GetInstanceAccessDetailsOutput, error) {
+ if f.getInstanceAccessDetailsOutput != nil {
+ return f.getInstanceAccessDetailsOutput, nil
+ }
+ return &ls.GetInstanceAccessDetailsOutput{
+ AccessDetails: &types.InstanceAccessDetails{
+ Username: aws.String("ec2-user"),
+ IpAddress: aws.String("1.2.3.4"),
+ },
+ }, nil
+}
+
+func (f *fakeLightsailClient) GetInstance(ctx context.Context, input *ls.GetInstanceInput, optFns ...func(*ls.Options)) (*ls.GetInstanceOutput, error) {
+ name := aws.ToString(input.InstanceName)
+ if f.getInstanceByName != nil {
+ if inst, ok := f.getInstanceByName[name]; ok {
+ return &ls.GetInstanceOutput{Instance: inst}, nil
+ }
+ return nil, &types.NotFoundException{}
+ }
+ if f.getInstanceOutput != nil {
+ return f.getInstanceOutput, nil
+ }
+ return nil, &types.NotFoundException{}
+}
+
+func (f *fakeLightsailClient) GetInstances(ctx context.Context, input *ls.GetInstancesInput, optFns ...func(*ls.Options)) (*ls.GetInstancesOutput, error) {
+ if f.getInstancesOutput != nil {
+ return f.getInstancesOutput, nil
+ }
+ return &ls.GetInstancesOutput{}, nil
+}
+
+func (f *fakeLightsailClient) GetRegions(ctx context.Context, input *ls.GetRegionsInput, optFns ...func(*ls.Options)) (*ls.GetRegionsOutput, error) {
+ if f.getRegionsOutput != nil {
+ return f.getRegionsOutput, nil
+ }
+ return &ls.GetRegionsOutput{
+ Regions: []types.Region{
+ {
+ Name: types.RegionName(DefaultRegion),
+ AvailabilityZones: []types.AvailabilityZone{
+ {ZoneName: aws.String(DefaultRegion + "a")},
+ },
+ },
+ },
+ }, nil
+}
+
+func (f *fakeLightsailClient) GetBlueprints(ctx context.Context, input *ls.GetBlueprintsInput, optFns ...func(*ls.Options)) (*ls.GetBlueprintsOutput, error) {
+ if f.getBlueprintsOutput != nil {
+ return f.getBlueprintsOutput, nil
+ }
+ return &ls.GetBlueprintsOutput{
+ Blueprints: []types.Blueprint{
+ {
+ BlueprintId: aws.String("amazon-linux-2023"),
+ Group: aws.String("amazon_linux_2023"),
+ IsActive: aws.Bool(true),
+ Platform: types.InstancePlatformLinuxUnix,
+ Type: types.BlueprintTypeOs,
+ },
+ },
+ }, nil
+}
+
+func (f *fakeLightsailClient) GetBundles(ctx context.Context, input *ls.GetBundlesInput, optFns ...func(*ls.Options)) (*ls.GetBundlesOutput, error) {
+ if f.getBundlesOutput != nil {
+ return f.getBundlesOutput, nil
+ }
+ return &ls.GetBundlesOutput{
+ Bundles: []types.Bundle{
+ {
+ BundleId: aws.String("small"),
+ InstanceType: aws.String("small"),
+ CpuCount: aws.Int32(2),
+ RamSizeInGb: aws.Float32(2),
+ },
+ },
+ }, nil
+}
+
func TestMergeTags(t *testing.T) {
merged := mergeTags(
map[string]string{"stack": "pullpreview", "repo": "action"},
@@ -71,4 +219,103 @@ func TestUsername(t *testing.T) {
}
}
+func TestSupportsDeploymentTarget(t *testing.T) {
+ p := &Provider{}
+ if !p.SupportsDeploymentTarget(pullpreview.DeploymentTargetCompose) {
+ t.Fatalf("expected compose support")
+ }
+ if !p.SupportsDeploymentTarget(pullpreview.DeploymentTargetHelm) {
+ t.Fatalf("expected helm support")
+ }
+}
+
+func TestBuildUserDataForHelmUsesSharedBootstrap(t *testing.T) {
+ p := &Provider{}
+ script, err := p.BuildUserData(pullpreview.UserDataOptions{
+ AppPath: "/app",
+ Username: "ec2-user",
+ DeploymentTarget: pullpreview.DeploymentTargetHelm,
+ })
+ if err != nil {
+ t.Fatalf("BuildUserData() error: %v", err)
+ }
+ for _, fragment := range []string{
+ "--write-kubeconfig-mode 0644",
+ "get-helm-3",
+ "test -s /swapfile",
+ "systemctl mask tmp.mount",
+ } {
+ if !strings.Contains(script, fragment) {
+ t.Fatalf("expected script to contain %q, script:\n%s", fragment, script)
+ }
+ }
+}
+
+func TestLaunchInstanceCreatesFreshInstanceForHelm(t *testing.T) {
+ client := &fakeLightsailClient{}
+ p := &Provider{client: client, ctx: context.Background(), region: DefaultRegion}
+
+ err := p.launchInstance("demo", pullpreview.LaunchOptions{
+ Tags: map[string]string{"pullpreview_target": "helm"},
+ })
+ if err != nil {
+ t.Fatalf("launchInstance() error: %v", err)
+ }
+ if client.createInstancesCalls != 1 {
+ t.Fatalf("expected create instance call, got %d", client.createInstancesCalls)
+ }
+}
+
+func TestLaunchInstanceCreatesFreshInstanceForCompose(t *testing.T) {
+ client := &fakeLightsailClient{}
+ p := &Provider{client: client, ctx: context.Background(), region: DefaultRegion}
+
+ err := p.launchInstance("demo", pullpreview.LaunchOptions{
+ Tags: map[string]string{"pullpreview_target": "compose"},
+ })
+ if err != nil {
+ t.Fatalf("launchInstance() error: %v", err)
+ }
+ if client.createInstancesCalls != 1 {
+ t.Fatalf("expected create instance call, got %d", client.createInstancesCalls)
+ }
+}
+
+func TestLaunchRecreatesMismatchedDeploymentIdentity(t *testing.T) {
+ name := "gh-1-pr-1"
+ client := &fakeLightsailClient{
+ getInstanceByName: map[string]*types.Instance{
+ name: {
+ Name: aws.String(name),
+ Tags: []types.Tag{
+ {Key: strPtr("pullpreview_label"), Value: strPtr("pullpreview-helm")},
+ {Key: strPtr("pullpreview_target"), Value: strPtr("helm")},
+ {Key: strPtr("pullpreview_runtime"), Value: strPtr("k3s")},
+ },
+ },
+ },
+ }
+ p := &Provider{client: client, ctx: context.Background(), region: DefaultRegion}
+
+ _, err := p.Launch(name, pullpreview.LaunchOptions{
+ Tags: map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ })
+ if err != nil {
+ t.Fatalf("Launch() error: %v", err)
+ }
+ if client.deleteInstanceCalls != 1 {
+ t.Fatalf("expected mismatched instance delete, got %d", client.deleteInstanceCalls)
+ }
+ if client.createInstancesCalls != 1 {
+ t.Fatalf("expected recreate after delete, got %d", client.createInstancesCalls)
+ }
+ if client.putInstancePublicPortsCalls != 1 {
+ t.Fatalf("expected firewall setup after recreate, got %d", client.putInstancePublicPortsCalls)
+ }
+}
+
func strPtr(v string) *string { return &v }
diff --git a/internal/providers/lightsail/provider.go b/internal/providers/lightsail/provider.go
index 460e28d..9b16eb5 100644
--- a/internal/providers/lightsail/provider.go
+++ b/internal/providers/lightsail/provider.go
@@ -66,25 +66,36 @@ func (p *Provider) DisplayName() string {
return "AWS Lightsail"
}
-func (p *Provider) SupportsSnapshots() bool {
- return true
-}
-
-func (p *Provider) SupportsRestore() bool {
+func (p *Provider) SupportsFirewall() bool {
return true
}
-func (p *Provider) SupportsFirewall() bool {
- return true
+func (p *Provider) SupportsDeploymentTarget(target pullpreview.DeploymentTarget) bool {
+ switch pullpreview.NormalizeDeploymentTarget(string(target)) {
+ case pullpreview.DeploymentTargetCompose, pullpreview.DeploymentTargetHelm:
+ return true
+ default:
+ return false
+ }
}
func (p *Provider) BuildUserData(options pullpreview.UserDataOptions) (string, error) {
- script := pullpreview.UserData{
- AppPath: options.AppPath,
- SSHPublicKeys: options.SSHPublicKeys,
- Username: options.Username,
- }
- return script.Script(), nil
+ return pullpreview.BuildBootstrapScript(pullpreview.BootstrapOptions{
+ AppPath: options.AppPath,
+ Username: options.Username,
+ SSHPublicKeys: options.SSHPublicKeys,
+ DeploymentTarget: options.DeploymentTarget,
+ ImageName: "amazon-linux-2023",
+ HostTuning: []string{
+ "test -s /swapfile || ( fallocate -l 2G /swapfile && chmod 600 /swapfile && mkswap /swapfile && swapon /swapfile && echo '/swapfile none swap sw 0 0' | tee -a /etc/fstab )",
+ "systemctl disable --now tmp.mount",
+ "systemctl mask tmp.mount",
+ "sysctl vm.swappiness=10 && sysctl vm.vfs_cache_pressure=50",
+ "echo 'vm.swappiness=10' | tee -a /etc/sysctl.conf",
+ "echo 'vm.vfs_cache_pressure=50' | tee -a /etc/sysctl.conf",
+ },
+ PropagateRootSSH: true,
+ })
}
func init() {
diff --git a/internal/pullpreview/bootstrap.go b/internal/pullpreview/bootstrap.go
new file mode 100644
index 0000000..8ab0b42
--- /dev/null
+++ b/internal/pullpreview/bootstrap.go
@@ -0,0 +1,192 @@
+package pullpreview
+
+import (
+ "fmt"
+ "strings"
+)
+
+type BootstrapOptions struct {
+ AppPath string
+ Username string
+ SSHPublicKeys []string
+ DeploymentTarget DeploymentTarget
+ ImageName string
+ HostTuning []string
+ TrustedUserCAKey string
+ PropagateRootSSH bool
+}
+
+type UserData struct {
+ AppPath string
+ SSHPublicKeys []string
+ Username string
+}
+
+func (u UserData) Script() string {
+ script, _ := BuildBootstrapScript(BootstrapOptions{
+ AppPath: u.AppPath,
+ Username: u.Username,
+ SSHPublicKeys: u.SSHPublicKeys,
+ DeploymentTarget: DeploymentTargetCompose,
+ PropagateRootSSH: true,
+ })
+ return script
+}
+
+func BuildBootstrapScript(opts BootstrapOptions) (string, error) {
+ target := NormalizeDeploymentTarget(string(opts.DeploymentTarget))
+ if target == "" {
+ target = DeploymentTargetCompose
+ }
+ if err := target.Validate(); err != nil {
+ return "", err
+ }
+
+ username := strings.TrimSpace(opts.Username)
+ if username == "" {
+ username = "root"
+ }
+ appPath := strings.TrimSpace(opts.AppPath)
+ if appPath == "" {
+ appPath = remoteAppPath
+ }
+ homeDir := HomeDirForUser(username)
+
+ lines := []string{
+ "#!/usr/bin/env bash",
+ "set -xe ; set -o pipefail",
+ fmt.Sprintf("mkdir -p %s/.ssh", homeDir),
+ }
+ if opts.PropagateRootSSH && username != "root" {
+ lines = append(lines,
+ "if [ -f /root/.ssh/authorized_keys ]; then",
+ fmt.Sprintf(" cp /root/.ssh/authorized_keys %s/.ssh/authorized_keys", homeDir),
+ "fi",
+ )
+ }
+ if len(opts.SSHPublicKeys) > 0 {
+ lines = append(lines, fmt.Sprintf("echo '%s' >> %s/.ssh/authorized_keys", strings.Join(opts.SSHPublicKeys, "\n"), homeDir))
+ }
+ if username != "root" || len(opts.SSHPublicKeys) > 0 {
+ lines = append(lines,
+ fmt.Sprintf("chown -R %s:%s %s/.ssh", username, username, homeDir),
+ fmt.Sprintf("chmod 0700 %s/.ssh && chmod 0600 %s/.ssh/authorized_keys", homeDir, homeDir),
+ )
+ }
+ lines = append(lines,
+ fmt.Sprintf("mkdir -p %s && chown -R %s:%s %s", appPath, username, username, appPath),
+ "mkdir -p /etc/profile.d",
+ fmt.Sprintf("echo 'cd %s' > /etc/profile.d/pullpreview.sh", appPath),
+ )
+ lines = append(lines, opts.HostTuning...)
+ lines = append(lines, sharedBootstrapPackagePrep(strings.TrimSpace(opts.ImageName))...)
+ switch target {
+ case DeploymentTargetHelm:
+ lines = append(lines, sharedHelmRuntime(homeDir, username)...)
+ default:
+ lines = append(lines, sharedComposeRuntime(username)...)
+ }
+ if strings.TrimSpace(opts.TrustedUserCAKey) != "" {
+ lines = append(lines,
+ "mkdir -p /etc/ssh/sshd_config.d",
+ fmt.Sprintf("cat <<'EOF' > /etc/ssh/pullpreview-user-ca.pub\n%s\nEOF", strings.TrimSpace(opts.TrustedUserCAKey)),
+ "cat <<'EOF' > /etc/ssh/sshd_config.d/pullpreview.conf",
+ "TrustedUserCAKeys /etc/ssh/pullpreview-user-ca.pub",
+ "EOF",
+ "systemctl restart ssh || systemctl restart sshd || true",
+ )
+ }
+ lines = append(lines,
+ "mkdir -p /etc/pullpreview && touch /etc/pullpreview/ready",
+ fmt.Sprintf("chown -R %s:%s /etc/pullpreview", username, username),
+ )
+ return strings.Join(lines, "\n"), nil
+}
+
+func sharedBootstrapPackagePrep(imageName string) []string {
+ return []string{
+ fmt.Sprintf("IMAGE_NAME=%q", imageName),
+ "if command -v apt-get >/dev/null 2>&1; then",
+ " mkdir -p /etc/apt/keyrings",
+ " install -m 0755 -d /etc/apt/keyrings",
+ " apt-get update",
+ " apt-get install -y ca-certificates curl gnupg lsb-release",
+ "elif command -v dnf >/dev/null 2>&1; then",
+ " dnf -y install ca-certificates dnf-plugins-core",
+ " if ! command -v curl >/dev/null 2>&1; then",
+ " dnf -y install curl-minimal || dnf -y install curl",
+ " fi",
+ "elif command -v yum >/dev/null 2>&1; then",
+ " yum -y install ca-certificates yum-utils",
+ " if ! command -v curl >/dev/null 2>&1; then",
+ " yum -y install curl",
+ " fi",
+ "else",
+ " echo \"unsupported OS family; expected apt, dnf, or yum\"",
+ " exit 1",
+ "fi",
+ }
+}
+
+func sharedComposeRuntime(username string) []string {
+ lines := []string{
+ "if command -v apt-get >/dev/null 2>&1; then",
+ " if echo \"$IMAGE_NAME\" | grep -iq ubuntu; then",
+ " DISTRO=ubuntu",
+ " else",
+ " DISTRO=debian",
+ " fi",
+ " curl -fsSL https://download.docker.com/linux/$DISTRO/gpg -o /etc/apt/keyrings/docker.asc",
+ " chmod a+r /etc/apt/keyrings/docker.asc",
+ " echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/$DISTRO $(lsb_release -cs) stable\" > /etc/apt/sources.list.d/docker.list",
+ " apt-get update",
+ " apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
+ "elif command -v dnf >/dev/null 2>&1; then",
+ " if echo \"$IMAGE_NAME\" | grep -Eiq 'amazon[- ]linux'; then",
+ " yum -y install docker",
+ " else",
+ " dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
+ " dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
+ " fi",
+ "elif command -v yum >/dev/null 2>&1; then",
+ " if echo \"$IMAGE_NAME\" | grep -Eiq 'amazon[- ]linux'; then",
+ " yum -y install docker",
+ " else",
+ " yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
+ " yum -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin",
+ " fi",
+ "fi",
+ "if ! docker compose version >/dev/null 2>&1; then",
+ " case \"$(uname -m)\" in",
+ " x86_64|amd64) compose_arch=x86_64 ;;",
+ " aarch64|arm64) compose_arch=aarch64 ;;",
+ " *) echo \"unsupported compose architecture $(uname -m)\"; exit 1 ;;",
+ " esac",
+ " mkdir -p /usr/local/lib/docker/cli-plugins",
+ " curl -fsSL \"https://github.com/docker/compose/releases/latest/download/docker-compose-linux-${compose_arch}\" -o /usr/local/lib/docker/cli-plugins/docker-compose",
+ " chmod +x /usr/local/lib/docker/cli-plugins/docker-compose",
+ "fi",
+ }
+ if strings.TrimSpace(username) != "" && username != "root" {
+ lines = append(lines, fmt.Sprintf("usermod -aG docker %s", username))
+ }
+ lines = append(lines,
+ "docker compose version",
+ "systemctl enable --now docker || systemctl restart docker",
+ "echo 'docker system prune -f && docker image prune -a --filter=\"until=96h\" --force' > /etc/cron.daily/docker-prune && chmod a+x /etc/cron.daily/docker-prune",
+ )
+ return lines
+}
+
+func sharedHelmRuntime(homeDir, username string) []string {
+ return []string{
+ "curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='server --disable traefik --write-kubeconfig-mode 0644' sh -",
+ "curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash",
+ fmt.Sprintf("mkdir -p %s/.kube", homeDir),
+ fmt.Sprintf("cp /etc/rancher/k3s/k3s.yaml %s/.kube/config", homeDir),
+ fmt.Sprintf("chown -R %s:%s %s/.kube", username, username, homeDir),
+ "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml",
+ "until kubectl get nodes >/dev/null 2>&1; do sleep 5; done",
+ "until kubectl get nodes -o jsonpath='{range .items[*]}{range .status.conditions[?(@.type==\"Ready\")]}{.status}{\"\\n\"}{end}{end}' | grep -q True; do sleep 5; done",
+ }
+}
diff --git a/internal/pullpreview/bootstrap_test.go b/internal/pullpreview/bootstrap_test.go
new file mode 100644
index 0000000..9d9c560
--- /dev/null
+++ b/internal/pullpreview/bootstrap_test.go
@@ -0,0 +1,141 @@
+package pullpreview
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestBuildBootstrapScriptComposeIncludesSharedRuntime(t *testing.T) {
+ script, err := BuildBootstrapScript(BootstrapOptions{
+ AppPath: "/app",
+ Username: "ec2-user",
+ SSHPublicKeys: []string{"ssh-ed25519 AAA", "ssh-rsa BBB"},
+ DeploymentTarget: DeploymentTargetCompose,
+ ImageName: "ubuntu-24.04",
+ PropagateRootSSH: true,
+ })
+ if err != nil {
+ t.Fatalf("BuildBootstrapScript() error: %v", err)
+ }
+
+ checks := []string{
+ "#!/usr/bin/env bash",
+ "cp /root/.ssh/authorized_keys /home/ec2-user/.ssh/authorized_keys",
+ "echo 'ssh-ed25519 AAA\nssh-rsa BBB' >> /home/ec2-user/.ssh/authorized_keys",
+ "mkdir -p /app && chown -R ec2-user:ec2-user /app",
+ "IMAGE_NAME=\"ubuntu-24.04\"",
+ "if command -v apt-get >/dev/null 2>&1; then",
+ "elif command -v dnf >/dev/null 2>&1; then",
+ "elif command -v yum >/dev/null 2>&1; then",
+ "docker-compose-plugin",
+ "usermod -aG docker ec2-user",
+ "docker system prune -f",
+ }
+ for _, fragment := range checks {
+ if !strings.Contains(script, fragment) {
+ t.Fatalf("expected compose bootstrap to contain %q, script:\n%s", fragment, script)
+ }
+ }
+ if strings.Contains(script, "INSTALL_K3S_EXEC='server --disable traefik") {
+ t.Fatalf("did not expect k3s install in compose bootstrap:\n%s", script)
+ }
+}
+
+func TestBuildBootstrapScriptComposeOnAmazonLinuxUsesNativeDockerPackage(t *testing.T) {
+ script, err := BuildBootstrapScript(BootstrapOptions{
+ AppPath: "/app",
+ Username: "ec2-user",
+ DeploymentTarget: DeploymentTargetCompose,
+ ImageName: "amazon-linux-2023",
+ PropagateRootSSH: true,
+ })
+ if err != nil {
+ t.Fatalf("BuildBootstrapScript() error: %v", err)
+ }
+
+ checks := []string{
+ "dnf -y install ca-certificates dnf-plugins-core",
+ "if ! command -v curl >/dev/null 2>&1; then",
+ "dnf -y install curl-minimal || dnf -y install curl",
+ "if echo \"$IMAGE_NAME\" | grep -Eiq 'amazon[- ]linux'; then",
+ "yum -y install docker",
+ "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-${compose_arch}",
+ "docker compose version",
+ }
+ for _, fragment := range checks {
+ if !strings.Contains(script, fragment) {
+ t.Fatalf("expected amazon linux compose bootstrap to contain %q, script:\n%s", fragment, script)
+ }
+ }
+}
+
+func TestBuildBootstrapScriptHelmIncludesReadableKubeconfig(t *testing.T) {
+ script, err := BuildBootstrapScript(BootstrapOptions{
+ AppPath: "/app",
+ Username: "ec2-user",
+ DeploymentTarget: DeploymentTargetHelm,
+ ImageName: "amazon-linux-2023",
+ PropagateRootSSH: true,
+ })
+ if err != nil {
+ t.Fatalf("BuildBootstrapScript() error: %v", err)
+ }
+
+ checks := []string{
+ "--write-kubeconfig-mode 0644",
+ "get-helm-3",
+ "mkdir -p /home/ec2-user/.kube",
+ "cp /etc/rancher/k3s/k3s.yaml /home/ec2-user/.kube/config",
+ "chown -R ec2-user:ec2-user /home/ec2-user/.kube",
+ }
+ for _, fragment := range checks {
+ if !strings.Contains(script, fragment) {
+ t.Fatalf("expected helm bootstrap to contain %q, script:\n%s", fragment, script)
+ }
+ }
+ if strings.Contains(script, "docker-compose-plugin") {
+ t.Fatalf("did not expect docker compose install in helm bootstrap:\n%s", script)
+ }
+}
+
+func TestBuildBootstrapScriptSupportsProviderHooks(t *testing.T) {
+ script, err := BuildBootstrapScript(BootstrapOptions{
+ AppPath: "/app",
+ Username: "root",
+ DeploymentTarget: DeploymentTargetCompose,
+ ImageName: "ubuntu-24.04",
+ HostTuning: []string{
+ "echo tuning-one",
+ "echo tuning-two",
+ },
+ TrustedUserCAKey: "ssh-ed25519 TEST",
+ PropagateRootSSH: true,
+ })
+ if err != nil {
+ t.Fatalf("BuildBootstrapScript() error: %v", err)
+ }
+
+ for _, fragment := range []string{"echo tuning-one", "echo tuning-two", "TrustedUserCAKeys /etc/ssh/pullpreview-user-ca.pub"} {
+ if !strings.Contains(script, fragment) {
+ t.Fatalf("expected bootstrap hook fragment %q, script:\n%s", fragment, script)
+ }
+ }
+ if strings.Contains(script, "get-helm-3") {
+ t.Fatalf("did not expect helm installer in compose bootstrap with hooks, script:\n%s", script)
+ }
+}
+
+func TestUserDataScriptUsesSharedComposeBootstrap(t *testing.T) {
+ script := UserData{
+ AppPath: "/app",
+ SSHPublicKeys: []string{"ssh-ed25519 ROOT"},
+ Username: "root",
+ }.Script()
+
+ if !strings.Contains(script, "echo 'ssh-ed25519 ROOT' >> /root/.ssh/authorized_keys") {
+ t.Fatalf("expected shared user data to populate authorized_keys, script:\n%s", script)
+ }
+ if !strings.Contains(script, "docker-compose-plugin") {
+ t.Fatalf("expected shared compose bootstrap in user data, script:\n%s", script)
+ }
+}
diff --git a/internal/pullpreview/deploy_context.go b/internal/pullpreview/deploy_context.go
index 940a02b..2677b56 100644
--- a/internal/pullpreview/deploy_context.go
+++ b/internal/pullpreview/deploy_context.go
@@ -67,6 +67,9 @@ func (i *Instance) writeRemoteEnvFile() (map[string]string, error) {
fmt.Sprintf("PULLPREVIEW_PUBLIC_IP=%s", envValues["PULLPREVIEW_PUBLIC_IP"]),
fmt.Sprintf("PULLPREVIEW_URL=%s", envValues["PULLPREVIEW_URL"]),
fmt.Sprintf("PULLPREVIEW_FIRST_RUN=%s", envValues["PULLPREVIEW_FIRST_RUN"]),
+ fmt.Sprintf("PULLPREVIEW_DEPLOYMENT_TARGET=%s", envValues["PULLPREVIEW_DEPLOYMENT_TARGET"]),
+ fmt.Sprintf("PULLPREVIEW_NAMESPACE=%s", envValues["PULLPREVIEW_NAMESPACE"]),
+ fmt.Sprintf("PULLPREVIEW_RELEASE_NAME=%s", envValues["PULLPREVIEW_RELEASE_NAME"]),
fmt.Sprintf("COMPOSE_FILE=%s", envValues["COMPOSE_FILE"]),
"",
}, "\n")
@@ -74,10 +77,10 @@ func (i *Instance) writeRemoteEnvFile() (map[string]string, error) {
return nil, err
}
user := i.Username()
- command := fmt.Sprintf(
- "sudo mkdir -p /etc/pullpreview && sudo mv /tmp/pullpreview_env %s && sudo chown %s:%s %s && sudo chmod 0644 %s",
- remoteEnvPath, user, user, remoteEnvPath, remoteEnvPath,
- )
+ command := fmt.Sprintf(
+ "sudo mkdir -p /etc/pullpreview && sudo mv /tmp/pullpreview_env %s && sudo chown %s:%s %s && sudo chmod 0644 %s",
+ remoteEnvPath, user, user, remoteEnvPath, remoteEnvPath,
+ )
if err := i.SSH(command, nil); err != nil {
return nil, err
}
@@ -137,12 +140,19 @@ func (i *Instance) composeConfigForRemoteContext(appPath string, pullpreviewEnv
}
func (i *Instance) pullpreviewEnvValues(firstRun string) map[string]string {
+ composeFile := ""
+ if i.DeploymentTarget == DeploymentTargetCompose {
+ composeFile = strings.Join(i.ComposeFiles, ":")
+ }
return map[string]string{
- "PULLPREVIEW_PUBLIC_DNS": i.PublicDNS(),
- "PULLPREVIEW_PUBLIC_IP": i.PublicIP(),
- "PULLPREVIEW_URL": i.URL(),
- "PULLPREVIEW_FIRST_RUN": firstRun,
- "COMPOSE_FILE": strings.Join(i.ComposeFiles, ":"),
+ "PULLPREVIEW_PUBLIC_DNS": i.PublicDNS(),
+ "PULLPREVIEW_PUBLIC_IP": i.PublicIP(),
+ "PULLPREVIEW_URL": i.URL(),
+ "PULLPREVIEW_FIRST_RUN": firstRun,
+ "PULLPREVIEW_DEPLOYMENT_TARGET": string(i.DeploymentTarget),
+ "PULLPREVIEW_NAMESPACE": i.HelmNamespace(),
+ "PULLPREVIEW_RELEASE_NAME": helmReleaseName,
+ "COMPOSE_FILE": composeFile,
}
}
@@ -268,6 +278,25 @@ func (i *Instance) syncRemoteBindMountSources(syncPlan []bindMountSync) error {
return nil
}
+func (i *Instance) syncRemotePath(localSource, remoteSource string) error {
+ info, err := os.Stat(localSource)
+ if err != nil {
+ return err
+ }
+ entry := bindMountSync{
+ LocalSource: localSource,
+ RemoteSource: remoteSource,
+ IsDir: info.IsDir(),
+ }
+ if i.Logger != nil {
+ i.Logger.Infof("Syncing local path to remote host local=%s remote=%s", localSource, remoteSource)
+ }
+ if err := i.ensureRemoteBindMountTargets([]bindMountSync{entry}); err != nil {
+ return err
+ }
+ return i.rsyncBindMount(entry)
+}
+
func (i *Instance) ensureRemoteBindMountTargets(syncPlan []bindMountSync) error {
user := i.Username()
remoteDirs := map[string]struct{}{
@@ -289,13 +318,13 @@ func (i *Instance) ensureRemoteBindMountTargets(syncPlan []bindMountSync) error
for _, dir := range ordered {
quoted = append(quoted, shellQuote(dir))
}
- command := fmt.Sprintf(
- "sudo mkdir -p %s && sudo chown %s:%s %s",
- strings.Join(quoted, " "),
- user,
- user,
- strings.Join(quoted, " "),
- )
+ command := fmt.Sprintf(
+ "sudo mkdir -p %s && sudo chown %s:%s %s",
+ strings.Join(quoted, " "),
+ user,
+ user,
+ strings.Join(quoted, " "),
+ )
return i.SSH(command, nil)
}
@@ -366,11 +395,13 @@ func (i *Instance) inlinePreScript(appPath string) (string, error) {
fmt.Sprintf("source %s", remoteEnvPath),
"set +a",
}
- for _, registry := range ParseRegistryCredentials(i.Registries, i.Logger) {
- lines = append(lines,
- fmt.Sprintf("echo \"Logging into %s...\"", registry.Host),
- fmt.Sprintf("echo \"%s\" | docker login \"%s\" -u \"%s\" --password-stdin", registry.Password, registry.Host, registry.Username),
- )
+ if i.DeploymentTarget == DeploymentTargetCompose {
+ for _, registry := range ParseRegistryCredentials(i.Registries, i.Logger) {
+ lines = append(lines,
+ fmt.Sprintf("echo \"Logging into %s...\"", registry.Host),
+ fmt.Sprintf("echo \"%s\" | docker login \"%s\" -u \"%s\" --password-stdin", registry.Password, registry.Host, registry.Username),
+ )
+ }
}
if strings.TrimSpace(i.PreScript) == "" {
return strings.Join(lines, "\n") + "\n", nil
diff --git a/internal/pullpreview/deploy_context_test.go b/internal/pullpreview/deploy_context_test.go
index 29d7215..b2e387d 100644
--- a/internal/pullpreview/deploy_context_test.go
+++ b/internal/pullpreview/deploy_context_test.go
@@ -244,6 +244,23 @@ func TestInlinePreScriptLoadsLocalScriptContent(t *testing.T) {
}
}
+func TestInlinePreScriptSkipsRegistryLoginForHelm(t *testing.T) {
+ appPath := t.TempDir()
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Registries: []string{"docker://alice:secret@ghcr.io"},
+ }, outputTestProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "ec2-user"}
+
+ inline, err := inst.inlinePreScript(appPath)
+ if err != nil {
+ t.Fatalf("inlinePreScript() error: %v", err)
+ }
+ if strings.Contains(inline, "docker login") {
+ t.Fatalf("did not expect docker login in helm pre-script, got %q", inline)
+ }
+}
+
func TestParseDockerPSOutputJSONLines(t *testing.T) {
raw := strings.Join([]string{
`{"Names":"app-web-1","Status":"Exited (1) 5 seconds ago","Labels":"com.docker.compose.project=app,com.docker.compose.service=web"}`,
diff --git a/internal/pullpreview/deploy_helm.go b/internal/pullpreview/deploy_helm.go
new file mode 100644
index 0000000..bbb34d2
--- /dev/null
+++ b/internal/pullpreview/deploy_helm.go
@@ -0,0 +1,462 @@
+package pullpreview
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ helmReleaseName = "app"
+ helmFailureReportOutputSize = 12000
+)
+
+type helmChartSource struct {
+ ChartRef string
+ LocalChart string
+ RepoURL string
+ RequiresSync bool
+ SyncAppTree bool
+}
+
+func (i *Instance) HelmNamespace() string {
+ return kubernetesName("pp-" + i.Name)
+}
+
+func kubernetesName(value string) string {
+ var b strings.Builder
+ for _, r := range strings.ToLower(strings.TrimSpace(value)) {
+ switch {
+ case r >= 'a' && r <= 'z':
+ b.WriteRune(r)
+ case r >= '0' && r <= '9':
+ b.WriteRune(r)
+ default:
+ b.WriteByte('-')
+ }
+ }
+ name := strings.Trim(b.String(), "-")
+ name = strings.Join(strings.FieldsFunc(name, func(r rune) bool { return r == '-' }), "-")
+ if name == "" {
+ name = "app"
+ }
+ if len(name) <= 63 {
+ return name
+ }
+ sum := fmt.Sprintf("%x", sha1.Sum([]byte(name)))
+ return strings.Trim(name[:54], "-") + "-" + sum[:8]
+}
+
+func (i *Instance) deploymentPlaceholderReplacer() *strings.Replacer {
+ return strings.NewReplacer(
+ "{{ pullpreview_url }}", i.URL(),
+ "{{pullpreview_url}}", i.URL(),
+ "{{ pullpreview_public_dns }}", i.PublicDNS(),
+ "{{pullpreview_public_dns}}", i.PublicDNS(),
+ "{{ pullpreview_public_ip }}", i.PublicIP(),
+ "{{pullpreview_public_ip}}", i.PublicIP(),
+ "{{ namespace }}", i.HelmNamespace(),
+ "{{namespace}}", i.HelmNamespace(),
+ "{{ release_name }}", helmReleaseName,
+ "{{release_name}}", helmReleaseName,
+ )
+}
+
+func (i *Instance) expandDeploymentValue(value string) string {
+ return i.deploymentPlaceholderReplacer().Replace(value)
+}
+
+func (i *Instance) DeployWithHelm(appPath string) error {
+ if _, err := i.writeRemoteEnvFile(); err != nil {
+ return err
+ }
+
+ chartSource, err := i.resolveHelmChartSource(appPath)
+ if err != nil {
+ return err
+ }
+ valueArgs, syncForValues, err := i.helmValueArgs(appPath)
+ if err != nil {
+ return err
+ }
+
+ if chartSource.SyncAppTree || syncForValues {
+ if err := i.syncRemoteAppTree(appPath); err != nil {
+ return err
+ }
+ }
+ if chartSource.RequiresSync && chartSource.LocalChart != "" && !chartSource.SyncAppTree {
+ if err := i.syncRemotePath(chartSource.LocalChart, chartSource.ChartRef); err != nil {
+ return err
+ }
+ }
+ if err := i.runRemotePreScript(appPath); err != nil {
+ return err
+ }
+ if err := i.runHelmDeployment(chartSource, valueArgs); err != nil {
+ i.emitHelmFailureReport()
+ return err
+ }
+ return nil
+}
+
+func (i *Instance) resolveHelmChartSource(appPath string) (helmChartSource, error) {
+ chart := strings.TrimSpace(i.Chart)
+ if strings.HasPrefix(chart, "oci://") {
+ if strings.TrimSpace(i.ChartRepository) != "" {
+ return helmChartSource{}, fmt.Errorf("chart_repository is not supported with OCI chart references")
+ }
+ return helmChartSource{ChartRef: chart}, nil
+ }
+ if repoURL := strings.TrimSpace(i.ChartRepository); repoURL != "" {
+ return helmChartSource{
+ ChartRef: fmt.Sprintf("pullpreview/%s", strings.TrimLeft(chart, "/")),
+ RepoURL: repoURL,
+ }, nil
+ }
+
+ absAppPath, err := filepath.Abs(appPath)
+ if err != nil {
+ return helmChartSource{}, err
+ }
+ localChart := chart
+ if !filepath.IsAbs(localChart) {
+ localChart = filepath.Join(absAppPath, localChart)
+ }
+ localChart = filepath.Clean(localChart)
+ if _, err := os.Stat(localChart); err != nil {
+ return helmChartSource{}, fmt.Errorf("unable to access chart %s: %w", chart, err)
+ }
+ if pathWithinRoot(absAppPath, localChart) {
+ remoteChart, err := remoteBindSource(localChart, absAppPath, remoteAppPath)
+ if err != nil {
+ return helmChartSource{}, fmt.Errorf("chart %s: %w", chart, err)
+ }
+ return helmChartSource{
+ ChartRef: remoteChart,
+ LocalChart: localChart,
+ RequiresSync: true,
+ SyncAppTree: true,
+ }, nil
+ }
+ return helmChartSource{
+ ChartRef: externalHelmChartPath(localChart),
+ LocalChart: localChart,
+ RequiresSync: true,
+ }, nil
+}
+
+func pathWithinRoot(root, candidate string) bool {
+ rel, err := filepath.Rel(filepath.Clean(root), filepath.Clean(candidate))
+ if err != nil {
+ return false
+ }
+ return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)))
+}
+
+func externalHelmChartPath(localChart string) string {
+ sum := fmt.Sprintf("%x", sha1.Sum([]byte(filepath.Clean(localChart))))
+ name := sanitizeRemotePathComponent(filepath.Base(localChart))
+ return remoteAppPath + "/.pullpreview/charts/" + sum[:12] + "/" + name
+}
+
+func sanitizeRemotePathComponent(value string) string {
+ var b strings.Builder
+ for _, r := range strings.TrimSpace(value) {
+ switch {
+ case r >= 'a' && r <= 'z':
+ b.WriteRune(r)
+ case r >= 'A' && r <= 'Z':
+ b.WriteRune(r)
+ case r >= '0' && r <= '9':
+ b.WriteRune(r)
+ case r == '.', r == '-', r == '_':
+ b.WriteRune(r)
+ default:
+ b.WriteByte('-')
+ }
+ }
+ name := strings.Trim(b.String(), "-.")
+ if name == "" {
+ return "chart"
+ }
+ return name
+}
+
+func (i *Instance) helmValueArgs(appPath string) ([]string, bool, error) {
+ if len(i.ChartValues) == 0 && len(i.ChartSet) == 0 {
+ return nil, false, nil
+ }
+
+ absAppPath, err := filepath.Abs(appPath)
+ if err != nil {
+ return nil, false, err
+ }
+
+ args := []string{}
+ requiresSync := false
+ for _, raw := range i.ChartValues {
+ valuePath := strings.TrimSpace(raw)
+ if valuePath == "" {
+ continue
+ }
+ if !filepath.IsAbs(valuePath) {
+ valuePath = filepath.Join(absAppPath, valuePath)
+ }
+ valuePath = filepath.Clean(valuePath)
+ if _, err := os.Stat(valuePath); err != nil {
+ return nil, false, fmt.Errorf("unable to access chart values file %s: %w", raw, err)
+ }
+ remotePath, err := remoteBindSource(valuePath, absAppPath, remoteAppPath)
+ if err != nil {
+ return nil, false, fmt.Errorf("chart values %s: %w", raw, err)
+ }
+ args = append(args, "--values", remotePath)
+ requiresSync = true
+ }
+ for _, raw := range i.ChartSet {
+ value := strings.TrimSpace(raw)
+ if value == "" {
+ continue
+ }
+ args = append(args, "--set", i.expandDeploymentValue(value))
+ }
+ return args, requiresSync, nil
+}
+
+func (i *Instance) syncRemoteAppTree(appPath string) error {
+ absAppPath, err := filepath.Abs(appPath)
+ if err != nil {
+ return err
+ }
+ info, err := os.Stat(absAppPath)
+ if err != nil {
+ return err
+ }
+ if !info.IsDir() {
+ return fmt.Errorf("app_path %s must be a directory for deployment_target=helm", appPath)
+ }
+ if i.Logger != nil {
+ i.Logger.Infof("Syncing app directory to remote host local=%s remote=%s", absAppPath, remoteAppPath)
+ }
+ if err := i.ensureRemoteAppRoot(); err != nil {
+ return err
+ }
+
+ keyFile, certFile, err := i.writeTempKeys()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = os.Remove(keyFile)
+ if certFile != "" {
+ _ = os.Remove(certFile)
+ }
+ }()
+
+ sshArgs := []string{
+ "ssh",
+ "-o", "ServerAliveInterval=15",
+ "-o", "IdentitiesOnly=yes",
+ "-i", keyFile,
+ }
+ if certFile != "" {
+ sshArgs = append(sshArgs, "-o", "CertificateFile="+certFile)
+ }
+ sshArgs = append(sshArgs, i.SSHOptions()...)
+
+ cmd := exec.CommandContext(i.Context, "rsync",
+ "-az",
+ "--delete",
+ "--links",
+ "--omit-dir-times",
+ "--no-perms",
+ "--no-owner",
+ "--no-group",
+ "--exclude=.git/",
+ "-e", strings.Join(sshArgs, " "),
+ ensureTrailingSlash(absAppPath),
+ fmt.Sprintf("%s:%s/", i.SSHAddress(), remoteAppPath),
+ )
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := i.Runner.Run(cmd); err != nil {
+ return fmt.Errorf("rsync %s -> %s failed: %w", absAppPath, remoteAppPath, err)
+ }
+ return nil
+}
+
+func (i *Instance) ensureRemoteAppRoot() error {
+ user := i.Username()
+ command := fmt.Sprintf(
+ "sudo mkdir -p %s && sudo chown %s:%s %s",
+ shellQuote(remoteAppPath),
+ user,
+ user,
+ shellQuote(remoteAppPath),
+ )
+ return i.SSH(command, nil)
+}
+
+func (i *Instance) runHelmDeployment(source helmChartSource, valueArgs []string) error {
+ namespace := i.HelmNamespace()
+ target, err := parseProxyTLSTarget(i.expandDeploymentValue(i.ProxyTLS))
+ if err != nil {
+ return err
+ }
+ upstreamHost := target.Service
+ if !strings.Contains(upstreamHost, ".") {
+ upstreamHost = fmt.Sprintf("%s.%s.svc.cluster.local", upstreamHost, namespace)
+ }
+
+ lines := []string{
+ "set -euo pipefail",
+ "set -a",
+ fmt.Sprintf("source %s", remoteEnvPath),
+ "set +a",
+ "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml",
+ fmt.Sprintf("kubectl create namespace %s --dry-run=client -o yaml | kubectl apply -f - >/dev/null", shellQuote(namespace)),
+ }
+ if source.RepoURL != "" {
+ lines = append(lines,
+ fmt.Sprintf("helm repo add pullpreview %s --force-update >/dev/null", shellQuote(source.RepoURL)),
+ "helm repo update pullpreview >/dev/null",
+ )
+ }
+ if source.LocalChart != "" {
+ lines = append(lines, fmt.Sprintf("helm dependency build %s >/dev/null", shellQuote(source.ChartRef)))
+ }
+
+ helmArgs := []string{
+ "helm", "upgrade", "--install", helmReleaseName, source.ChartRef,
+ "--namespace", namespace,
+ "--create-namespace",
+ "--wait",
+ "--atomic",
+ }
+ helmArgs = append(helmArgs, valueArgs...)
+ lines = append(lines, shellJoin(helmArgs...))
+
+ manifest := i.renderHelmCaddyManifest(namespace, upstreamHost, target.Port)
+ lines = append(lines,
+ "cat <<'EOF' >/tmp/pullpreview-caddy.yaml",
+ manifest,
+ "EOF",
+ "kubectl apply -f /tmp/pullpreview-caddy.yaml >/dev/null",
+ fmt.Sprintf("kubectl rollout status deployment/pullpreview-caddy -n %s --timeout=10m", shellQuote(namespace)),
+ )
+
+ if i.Logger != nil {
+ i.Logger.Infof("Deploying Helm release=%s namespace=%s chart=%s", helmReleaseName, namespace, source.ChartRef)
+ }
+ return i.SSH("bash -se", bytes.NewBufferString(strings.Join(lines, "\n")+"\n"))
+}
+
+func shellJoin(args ...string) string {
+ quoted := make([]string, 0, len(args))
+ for _, arg := range args {
+ quoted = append(quoted, shellQuote(arg))
+ }
+ return strings.Join(quoted, " ")
+}
+
+func (i *Instance) renderHelmCaddyManifest(namespace, upstreamHost string, upstreamPort int) string {
+ return fmt.Sprintf(`apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: pullpreview-caddy-config
+ namespace: %s
+data:
+ Caddyfile: |
+ %s {
+ reverse_proxy %s:%d
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pullpreview-caddy
+ namespace: %s
+spec:
+ replicas: 1
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ app: pullpreview-caddy
+ template:
+ metadata:
+ labels:
+ app: pullpreview-caddy
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: caddy
+ image: caddy:2-alpine
+ command:
+ - caddy
+ args:
+ - run
+ - --config
+ - /etc/caddy/Caddyfile
+ - --adapter
+ - caddyfile
+ ports:
+ - containerPort: 80
+ hostPort: 80
+ name: http
+ - containerPort: 443
+ hostPort: 443
+ name: https
+ volumeMounts:
+ - name: config
+ mountPath: /etc/caddy/Caddyfile
+ subPath: Caddyfile
+ - name: data
+ mountPath: /data
+ - name: runtime
+ mountPath: /config
+ volumes:
+ - name: config
+ configMap:
+ name: pullpreview-caddy-config
+ - name: data
+ hostPath:
+ path: /var/lib/pullpreview/caddy-data
+ type: DirectoryOrCreate
+ - name: runtime
+ hostPath:
+ path: /var/lib/pullpreview/caddy-config
+ type: DirectoryOrCreate
+`, namespace, i.PublicDNS(), upstreamHost, upstreamPort, namespace)
+}
+
+func (i *Instance) emitHelmFailureReport() {
+ namespace := i.HelmNamespace()
+ script := strings.Join([]string{
+ "set +e",
+ "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml",
+ fmt.Sprintf("echo '---- helm status (%s/%s) ----'", namespace, helmReleaseName),
+ fmt.Sprintf("helm status %s -n %s", shellQuote(helmReleaseName), shellQuote(namespace)),
+ fmt.Sprintf("echo '---- kubectl get pods,svc,events (%s) ----'", namespace),
+ fmt.Sprintf("kubectl get pods,svc -n %s -o wide", shellQuote(namespace)),
+ fmt.Sprintf("kubectl get events -n %s --sort-by=.lastTimestamp | tail -n 50", shellQuote(namespace)),
+ fmt.Sprintf("echo '---- failing workload describe (%s) ----'", namespace),
+ fmt.Sprintf("for pod in $(kubectl get pods -n %s --no-headers 2>/dev/null | awk '$2 !~ /^([0-9]+)\\/\\1$/ {print $1}'); do kubectl describe pod -n %s \"$pod\"; kubectl logs -n %s \"$pod\" --all-containers --tail=200; done", shellQuote(namespace), shellQuote(namespace), shellQuote(namespace)),
+ }, "\n") + "\n"
+ output, err := i.SSHOutput("bash -se", bytes.NewBufferString(script))
+ if strings.TrimSpace(output) != "" {
+ if len(output) > helmFailureReportOutputSize {
+ output = output[len(output)-helmFailureReportOutputSize:]
+ }
+ fmt.Fprintln(os.Stderr, output)
+ }
+ if err != nil && i.Logger != nil {
+ i.Logger.Warnf("Unable to capture Helm diagnostics: %v", err)
+ }
+}
diff --git a/internal/pullpreview/deploy_helm_test.go b/internal/pullpreview/deploy_helm_test.go
new file mode 100644
index 0000000..f5bfac9
--- /dev/null
+++ b/internal/pullpreview/deploy_helm_test.go
@@ -0,0 +1,449 @@
+package pullpreview
+
+import (
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func (f fakeProvider) Name() string {
+ return "hetzner"
+}
+
+func (f fakeProvider) DisplayName() string {
+ return "Hetzner Cloud"
+}
+
+func (f fakeProvider) SupportsDeploymentTarget(target DeploymentTarget) bool {
+ switch NormalizeDeploymentTarget(string(target)) {
+ case DeploymentTargetCompose, DeploymentTargetHelm:
+ return true
+ default:
+ return false
+ }
+}
+
+type fakeLightsailProvider struct{}
+
+func (f fakeLightsailProvider) Launch(name string, opts LaunchOptions) (AccessDetails, error) {
+ return AccessDetails{}, nil
+}
+
+func (f fakeLightsailProvider) Terminate(name string) error { return nil }
+
+func (f fakeLightsailProvider) Running(name string) (bool, error) { return false, nil }
+
+func (f fakeLightsailProvider) ListInstances(tags map[string]string) ([]InstanceSummary, error) {
+ return nil, nil
+}
+
+func (f fakeLightsailProvider) Username() string { return "ec2-user" }
+
+func (f fakeLightsailProvider) Name() string {
+ return "lightsail"
+}
+
+func (f fakeLightsailProvider) DisplayName() string {
+ return "AWS Lightsail"
+}
+
+func (f fakeLightsailProvider) SupportsDeploymentTarget(target DeploymentTarget) bool {
+ switch NormalizeDeploymentTarget(string(target)) {
+ case DeploymentTargetCompose, DeploymentTargetHelm:
+ return true
+ default:
+ return false
+ }
+}
+
+type scriptCaptureRunner struct {
+ args [][]string
+ inputs []string
+}
+
+func (r *scriptCaptureRunner) Run(cmd *exec.Cmd) error {
+ r.args = append(r.args, append([]string{}, cmd.Args...))
+ if cmd.Stdin != nil {
+ body, err := io.ReadAll(cmd.Stdin)
+ if err != nil {
+ return err
+ }
+ r.inputs = append(r.inputs, string(body))
+ } else {
+ r.inputs = append(r.inputs, "")
+ }
+ return nil
+}
+
+func TestValidateDeploymentConfigForHelm(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ProxyTLS: "{{ release_name }}-wordpress:80",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root"}
+
+ if err := inst.ValidateDeploymentConfig(); err != nil {
+ t.Fatalf("ValidateDeploymentConfig() error: %v", err)
+ }
+}
+
+func TestValidateDeploymentConfigRejectsHelmWithoutProxyTLS(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ }, fakeProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err == nil || !strings.Contains(err.Error(), "proxy_tls") {
+ t.Fatalf("expected proxy_tls validation error, got %v", err)
+ }
+}
+
+func TestValidateDeploymentConfigRejectsComposeWithHelmOptions(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetCompose,
+ Chart: "wordpress",
+ }, fakeProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err == nil || !strings.Contains(err.Error(), "require deployment_target=helm") {
+ t.Fatalf("expected compose/helm validation error, got %v", err)
+ }
+}
+
+func TestValidateDeploymentConfigAcceptsHelmForLightsailProvider(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ProxyTLS: "app-wordpress:80",
+ }, fakeLightsailProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err != nil {
+ t.Fatalf("expected lightsail helm validation to pass, got %v", err)
+ }
+}
+
+func TestValidateDeploymentConfigRejectsHelmSpecificComposeOverrides(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ProxyTLS: "app-wordpress:80",
+ ComposeFiles: []string{"docker-compose.preview.yml"},
+ }, fakeProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err == nil || !strings.Contains(err.Error(), "compose_files") {
+ t.Fatalf("expected compose_files validation error, got %v", err)
+ }
+
+ inst = NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ProxyTLS: "app-wordpress:80",
+ ComposeOptions: []string{"--no-build"},
+ }, fakeProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err == nil || !strings.Contains(err.Error(), "compose_options") {
+ t.Fatalf("expected compose_options validation error, got %v", err)
+ }
+}
+
+func TestValidateDeploymentConfigRejectsHelmRegistries(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ProxyTLS: "app-wordpress:80",
+ Registries: []string{"docker://alice:secret@ghcr.io"},
+ }, fakeProvider{}, nil)
+
+ if err := inst.ValidateDeploymentConfig(); err == nil || !strings.Contains(err.Error(), "registries") {
+ t.Fatalf("expected registries validation error, got %v", err)
+ }
+}
+
+func TestExpandDeploymentValue(t *testing.T) {
+ inst := NewInstance("Demo App", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ProxyTLS: "{{ release_name }}-wordpress:80",
+ DNS: "rev2.click",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root"}
+
+ got := inst.expandDeploymentValue("https://{{ pullpreview_public_dns }}/{{ namespace }}/{{ release_name }}")
+ if got != "https://Demo-App-ip-1-2-3-4.rev2.click/pp-demo-app/app" {
+ t.Fatalf("unexpected expanded value: %q", got)
+ }
+}
+
+func TestResolveHelmChartSourceForLocalChart(t *testing.T) {
+ appPath := t.TempDir()
+ chartPath := filepath.Join(appPath, "charts", "demo")
+ if err := os.MkdirAll(chartPath, 0755); err != nil {
+ t.Fatalf("mkdir chart path: %v", err)
+ }
+ if err := os.WriteFile(filepath.Join(chartPath, "Chart.yaml"), []byte("apiVersion: v2\nname: demo\nversion: 0.1.0\n"), 0644); err != nil {
+ t.Fatalf("write chart: %v", err)
+ }
+
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "charts/demo",
+ ProxyTLS: "demo:80",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root"}
+
+ source, err := inst.resolveHelmChartSource(appPath)
+ if err != nil {
+ t.Fatalf("resolveHelmChartSource() error: %v", err)
+ }
+ if source.ChartRef != "/app/charts/demo" {
+ t.Fatalf("unexpected chart ref: %q", source.ChartRef)
+ }
+ if !source.RequiresSync {
+ t.Fatalf("expected local chart to require sync")
+ }
+ if !source.SyncAppTree {
+ t.Fatalf("expected in-tree chart to sync app tree")
+ }
+}
+
+func TestResolveHelmChartSourceForLocalChartOutsideAppPath(t *testing.T) {
+ root := t.TempDir()
+ appPath := filepath.Join(root, "app")
+ chartPath := filepath.Join(root, "chart")
+ if err := os.MkdirAll(appPath, 0755); err != nil {
+ t.Fatalf("mkdir app path: %v", err)
+ }
+ if err := os.MkdirAll(chartPath, 0755); err != nil {
+ t.Fatalf("mkdir chart path: %v", err)
+ }
+ if err := os.WriteFile(filepath.Join(chartPath, "Chart.yaml"), []byte("apiVersion: v2\nname: demo\nversion: 0.1.0\n"), 0644); err != nil {
+ t.Fatalf("write chart: %v", err)
+ }
+
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "../chart",
+ ProxyTLS: "demo:80",
+ }, fakeProvider{}, nil)
+
+ source, err := inst.resolveHelmChartSource(appPath)
+ if err != nil {
+ t.Fatalf("resolveHelmChartSource() error: %v", err)
+ }
+ if source.ChartRef == "" || !strings.HasPrefix(source.ChartRef, "/app/.pullpreview/charts/") {
+ t.Fatalf("unexpected external chart ref: %q", source.ChartRef)
+ }
+ if !source.RequiresSync {
+ t.Fatalf("expected external chart to require sync")
+ }
+ if source.SyncAppTree {
+ t.Fatalf("did not expect external chart to require full app tree sync")
+ }
+}
+
+func TestResolveHelmChartSourceForRepositoryChart(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ProxyTLS: "app-wordpress:80",
+ }, fakeProvider{}, nil)
+
+ source, err := inst.resolveHelmChartSource(t.TempDir())
+ if err != nil {
+ t.Fatalf("resolveHelmChartSource() error: %v", err)
+ }
+ if source.ChartRef != "pullpreview/wordpress" {
+ t.Fatalf("unexpected chart ref: %q", source.ChartRef)
+ }
+ if source.RepoURL != "https://charts.bitnami.com/bitnami" {
+ t.Fatalf("unexpected repo url: %q", source.RepoURL)
+ }
+ if source.RequiresSync {
+ t.Fatalf("expected repo chart to avoid sync")
+ }
+}
+
+func TestResolveHelmChartSourceForOCIChart(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "oci://registry-1.docker.io/bitnamicharts/wordpress",
+ ProxyTLS: "app-wordpress:80",
+ }, fakeProvider{}, nil)
+
+ source, err := inst.resolveHelmChartSource(t.TempDir())
+ if err != nil {
+ t.Fatalf("resolveHelmChartSource() error: %v", err)
+ }
+ if source.ChartRef != "oci://registry-1.docker.io/bitnamicharts/wordpress" {
+ t.Fatalf("unexpected chart ref: %q", source.ChartRef)
+ }
+ if source.RepoURL != "" || source.RequiresSync {
+ t.Fatalf("unexpected OCI chart source: %#v", source)
+ }
+}
+
+func TestHelmValueArgsExpandsPlaceholdersAndSyncsValuesFiles(t *testing.T) {
+ appPath := t.TempDir()
+ for _, path := range []string{
+ filepath.Join(appPath, "values.yaml"),
+ filepath.Join(appPath, "overrides", "preview.yaml"),
+ } {
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ t.Fatalf("mkdir values dir: %v", err)
+ }
+ if err := os.WriteFile(path, []byte("key: value\n"), 0644); err != nil {
+ t.Fatalf("write values file: %v", err)
+ }
+ }
+
+ inst := NewInstance("Demo App", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ChartValues: []string{"values.yaml", "overrides/preview.yaml"},
+ ChartSet: []string{
+ "service.type=ClusterIP",
+ "ingress.hostname={{ pullpreview_public_dns }}",
+ "url={{ pullpreview_url }}",
+ },
+ ProxyTLS: "{{ release_name }}-wordpress:80",
+ DNS: "rev2.click",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root"}
+
+ args, requiresSync, err := inst.helmValueArgs(appPath)
+ if err != nil {
+ t.Fatalf("helmValueArgs() error: %v", err)
+ }
+ want := []string{
+ "--values", "/app/values.yaml",
+ "--values", "/app/overrides/preview.yaml",
+ "--set", "service.type=ClusterIP",
+ "--set", "ingress.hostname=Demo-App-ip-1-2-3-4.rev2.click",
+ "--set", "url=https://Demo-App-ip-1-2-3-4.rev2.click:443",
+ }
+ if len(args) != len(want) {
+ t.Fatalf("unexpected helm args length: got=%#v want=%#v", args, want)
+ }
+ for idx := range want {
+ if args[idx] != want[idx] {
+ t.Fatalf("unexpected helm arg %d: got=%q want=%q all=%#v", idx, args[idx], want[idx], args)
+ }
+ }
+ if !requiresSync {
+ t.Fatalf("expected local values files to require sync")
+ }
+}
+
+func TestRunHelmDeploymentBuildsExpectedScriptForRepoChart(t *testing.T) {
+ inst := NewInstance("Demo App", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ProxyTLS: "{{ release_name }}-wordpress:80",
+ DNS: "rev2.click",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root", PrivateKey: "PRIVATE", CertKey: "CERT"}
+ runner := &scriptCaptureRunner{}
+ inst.Runner = runner
+
+ err := inst.runHelmDeployment(helmChartSource{
+ ChartRef: "pullpreview/wordpress",
+ RepoURL: "https://charts.bitnami.com/bitnami",
+ }, []string{
+ "--values", "/app/values.yaml",
+ "--set", "service.type=ClusterIP",
+ "--set", "ingress.hostname=Demo-App-ip-1-2-3-4.rev2.click",
+ })
+ if err != nil {
+ t.Fatalf("runHelmDeployment() error: %v", err)
+ }
+ if len(runner.args) != 1 || len(runner.inputs) != 1 {
+ t.Fatalf("expected one ssh invocation, got args=%d inputs=%d", len(runner.args), len(runner.inputs))
+ }
+
+ sshArgs := strings.Join(runner.args[0], " ")
+ if !strings.Contains(sshArgs, "CertificateFile=") || !strings.Contains(sshArgs, "root@1.2.3.4") || !strings.Contains(sshArgs, "bash -se") {
+ t.Fatalf("unexpected ssh args: %s", sshArgs)
+ }
+
+ script := runner.inputs[0]
+ checks := []string{
+ "source /etc/pullpreview/env",
+ "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml",
+ "kubectl create namespace 'pp-demo-app' --dry-run=client -o yaml | kubectl apply -f - >/dev/null",
+ "helm repo add pullpreview 'https://charts.bitnami.com/bitnami' --force-update >/dev/null",
+ "helm repo update pullpreview >/dev/null",
+ "'helm' 'upgrade' '--install' 'app' 'pullpreview/wordpress' '--namespace' 'pp-demo-app' '--create-namespace' '--wait' '--atomic' '--values' '/app/values.yaml' '--set' 'service.type=ClusterIP' '--set' 'ingress.hostname=Demo-App-ip-1-2-3-4.rev2.click'",
+ "cat <<'EOF' >/tmp/pullpreview-caddy.yaml",
+ "Demo-App-ip-1-2-3-4.rev2.click {",
+ "reverse_proxy app-wordpress.pp-demo-app.svc.cluster.local:80",
+ "kubectl rollout status deployment/pullpreview-caddy -n 'pp-demo-app' --timeout=10m",
+ }
+ for _, check := range checks {
+ if !strings.Contains(script, check) {
+ t.Fatalf("expected script to contain %q, script:\n%s", check, script)
+ }
+ }
+}
+
+func TestRunHelmDeploymentBuildsDependencyStepForLocalChart(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "charts/demo",
+ ProxyTLS: "app-wordpress:80",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root", PrivateKey: "PRIVATE"}
+ runner := &scriptCaptureRunner{}
+ inst.Runner = runner
+
+ err := inst.runHelmDeployment(helmChartSource{
+ ChartRef: "/app/charts/demo",
+ LocalChart: "/tmp/demo",
+ }, nil)
+ if err != nil {
+ t.Fatalf("runHelmDeployment() error: %v", err)
+ }
+ if len(runner.inputs) != 1 {
+ t.Fatalf("expected one ssh script, got %d", len(runner.inputs))
+ }
+ script := runner.inputs[0]
+ if !strings.Contains(script, "helm dependency build '/app/charts/demo' >/dev/null") {
+ t.Fatalf("expected helm dependency build for local chart, script:\n%s", script)
+ }
+ if strings.Contains(script, "helm repo add pullpreview") {
+ t.Fatalf("did not expect repo add for local chart, script:\n%s", script)
+ }
+}
+
+func TestRenderHelmCaddyManifest(t *testing.T) {
+ inst := NewInstance("demo", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ Chart: "wordpress",
+ ChartRepository: "https://charts.bitnami.com/bitnami",
+ ProxyTLS: "app-wordpress:80",
+ DNS: "rev2.click",
+ }, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "root"}
+
+ manifest := inst.renderHelmCaddyManifest(inst.HelmNamespace(), "app-wordpress.pp-demo.svc.cluster.local", 80)
+ if !strings.Contains(manifest, "name: pullpreview-caddy") {
+ t.Fatalf("expected caddy deployment in manifest: %s", manifest)
+ }
+ if !strings.Contains(manifest, "command:\n - caddy") {
+ t.Fatalf("expected caddy command in manifest: %s", manifest)
+ }
+ if !strings.Contains(manifest, "demo-ip-1-2-3-4.rev2.click") {
+ t.Fatalf("expected public DNS in manifest: %s", manifest)
+ }
+ if !strings.Contains(manifest, "reverse_proxy app-wordpress.pp-demo.svc.cluster.local:80") {
+ t.Fatalf("expected reverse proxy upstream in manifest: %s", manifest)
+ }
+}
diff --git a/internal/pullpreview/deployment_identity.go b/internal/pullpreview/deployment_identity.go
new file mode 100644
index 0000000..20630f7
--- /dev/null
+++ b/internal/pullpreview/deployment_identity.go
@@ -0,0 +1,68 @@
+package pullpreview
+
+import (
+ "crypto/sha1"
+ "fmt"
+ "strings"
+)
+
+const defaultPullPreviewLabel = "pullpreview"
+
+func canonicalLabelValue(label string) string {
+ return strings.ToLower(NormalizeName(strings.TrimSpace(label)))
+}
+
+func labelScopeKey(label string) string {
+ canonical := canonicalLabelValue(label)
+ if canonical == "" || canonical == defaultPullPreviewLabel {
+ return ""
+ }
+ sum := fmt.Sprintf("%x", sha1.Sum([]byte(canonical)))
+ return "l" + sum[:6]
+}
+
+func deploymentRuntime(target DeploymentTarget) string {
+ switch NormalizeDeploymentTarget(string(target)) {
+ case DeploymentTargetHelm:
+ return "k3s"
+ default:
+ return "docker"
+ }
+}
+
+func DeploymentIdentityMismatch(existing, desired map[string]string) (string, bool) {
+ labelDesired := canonicalLabelValue(desired["pullpreview_label"])
+ labelExisting := canonicalLabelValue(existing["pullpreview_label"])
+ if labelDesired != "" {
+ if labelExisting == "" {
+ if labelDesired != defaultPullPreviewLabel {
+ return fmt.Sprintf("pullpreview_label missing (wanted %q)", labelDesired), true
+ }
+ } else if labelExisting != labelDesired {
+ return fmt.Sprintf("pullpreview_label mismatch (existing=%q wanted=%q)", labelExisting, labelDesired), true
+ }
+ }
+
+ for key, defaultValue := range map[string]string{
+ "pullpreview_target": string(DeploymentTargetCompose),
+ "pullpreview_runtime": deploymentRuntime(DeploymentTargetCompose),
+ } {
+ want := strings.TrimSpace(strings.ToLower(desired[key]))
+ if want == "" {
+ continue
+ }
+ got := strings.TrimSpace(strings.ToLower(existing[key]))
+ if got == "" {
+ if want != defaultValue {
+ return fmt.Sprintf("%s missing (wanted %q)", key, want), true
+ }
+ continue
+ }
+ if got != want {
+ return fmt.Sprintf("%s mismatch (existing=%q wanted=%q)", key, got, want), true
+ }
+ }
+
+ return "", false
+}
+
diff --git a/internal/pullpreview/deployment_identity_test.go b/internal/pullpreview/deployment_identity_test.go
new file mode 100644
index 0000000..d90cd15
--- /dev/null
+++ b/internal/pullpreview/deployment_identity_test.go
@@ -0,0 +1,70 @@
+package pullpreview
+
+import "testing"
+
+func TestLabelScopeKey(t *testing.T) {
+ if got := labelScopeKey("pullpreview"); got != "" {
+ t.Fatalf("expected default label to have empty scope, got %q", got)
+ }
+ if got := labelScopeKey("PullPreview Helm"); got == "" {
+ t.Fatalf("expected non-default label to have scope")
+ }
+ if gotA, gotB := labelScopeKey("pullpreview-helm"), labelScopeKey("PullPreview Helm"); gotA != gotB {
+ t.Fatalf("expected canonical label scope key, got %q vs %q", gotA, gotB)
+ }
+}
+
+func TestDeploymentIdentityMismatch(t *testing.T) {
+ if reason, mismatch := DeploymentIdentityMismatch(
+ map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ ); mismatch {
+ t.Fatalf("expected matching identity, got mismatch %q", reason)
+ }
+
+ if reason, mismatch := DeploymentIdentityMismatch(
+ map[string]string{
+ "pullpreview_label": "pullpreview-helm",
+ "pullpreview_target": "helm",
+ "pullpreview_runtime": "k3s",
+ },
+ map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ ); !mismatch || reason == "" {
+ t.Fatalf("expected mismatch for different label/target/runtime")
+ }
+
+ if reason, mismatch := DeploymentIdentityMismatch(
+ map[string]string{},
+ map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "compose",
+ "pullpreview_runtime": "docker",
+ },
+ ); mismatch {
+ t.Fatalf("expected legacy default compose identity to remain compatible, got %q", reason)
+ }
+
+ if reason, mismatch := DeploymentIdentityMismatch(
+ map[string]string{},
+ map[string]string{
+ "pullpreview_label": "pullpreview",
+ "pullpreview_target": "helm",
+ "pullpreview_runtime": "k3s",
+ },
+ ); !mismatch || reason == "" {
+ t.Fatalf("expected legacy instance mismatch for helm identity")
+ }
+}
+
diff --git a/internal/pullpreview/deployment_target.go b/internal/pullpreview/deployment_target.go
new file mode 100644
index 0000000..8219b1b
--- /dev/null
+++ b/internal/pullpreview/deployment_target.go
@@ -0,0 +1,26 @@
+package pullpreview
+
+import (
+ "fmt"
+ "strings"
+)
+
+func NormalizeDeploymentTarget(value string) DeploymentTarget {
+ switch strings.ToLower(strings.TrimSpace(value)) {
+ case "", string(DeploymentTargetCompose):
+ return DeploymentTargetCompose
+ case string(DeploymentTargetHelm):
+ return DeploymentTargetHelm
+ default:
+ return DeploymentTarget(strings.ToLower(strings.TrimSpace(value)))
+ }
+}
+
+func (t DeploymentTarget) Validate() error {
+ switch NormalizeDeploymentTarget(string(t)) {
+ case DeploymentTargetCompose, DeploymentTargetHelm:
+ return nil
+ default:
+ return fmt.Errorf("unsupported deployment target %q", t)
+ }
+}
diff --git a/internal/pullpreview/github_sync.go b/internal/pullpreview/github_sync.go
index fe468e0..0608539 100644
--- a/internal/pullpreview/github_sync.go
+++ b/internal/pullpreview/github_sync.go
@@ -199,7 +199,10 @@ func clearDanglingDeployments(repo string, opts GithubSyncOptions, provider Prov
activeInstanceNames := []string{}
removedInstanceNames := []string{}
for _, inst := range instances {
- if !instanceMatchesCleanupVariant(inst, opts.DeploymentVariant) {
+ if !instanceMatchesCleanupLabel(inst, opts.Label) {
+ continue
+ }
+ if !instanceMatchesCleanupTarget(inst, opts.Common.DeploymentTarget) {
continue
}
ref, ok := cleanupInstanceReference(inst)
@@ -326,6 +329,35 @@ func instanceMatchesCleanupVariant(inst InstanceSummary, expectedVariant string)
return ok && strings.EqualFold(parsed.Variant, expectedVariant)
}
+func canonicalCleanupLabel(label string) string {
+ return canonicalLabelValue(label)
+}
+
+func instanceMatchesCleanupLabel(inst InstanceSummary, expectedLabel string) bool {
+ expected := canonicalCleanupLabel(expectedLabel)
+ if expected == "" {
+ return true
+ }
+ actual := canonicalCleanupLabel(firstTagValue(inst.Tags, "pullpreview_label"))
+ if actual == "" {
+ return expected == defaultPullPreviewLabel
+ }
+ return actual == expected
+}
+
+func instanceMatchesCleanupTarget(inst InstanceSummary, expectedTarget DeploymentTarget) bool {
+ expected := NormalizeDeploymentTarget(string(expectedTarget))
+ if expected == "" {
+ expected = DeploymentTargetCompose
+ }
+ actualRaw := strings.TrimSpace(firstTagValue(inst.Tags, "pullpreview_target"))
+ if actualRaw == "" {
+ return expected == DeploymentTargetCompose
+ }
+ actual := NormalizeDeploymentTarget(actualRaw)
+ return actual == expected
+}
+
type parsedInstanceName struct {
Variant string
PRNumber string
@@ -431,7 +463,7 @@ func (g *GithubSync) Sync() error {
action := g.guessAction()
if action == actionIgnored {
if g.logger != nil {
- g.logger.Infof("Ignoring event %s", action)
+ g.logger.Infof("Ignoring event %s", g.ignoredEventDetails())
}
return nil
}
@@ -456,13 +488,27 @@ func (g *GithubSync) Sync() error {
case actionPRDown, actionBranchDown:
instance := NewInstance(g.instanceName(), g.opts.Common, g.provider, g.logger)
_ = g.updateGitHubStatus(statusDestroying, "")
+ namesToDestroy := []string{}
running, _ := instance.Running()
if running {
- if g.runDown != nil {
- _ = g.runDown(DownOptions{Name: instance.Name}, g.provider, g.logger)
+ namesToDestroy = append(namesToDestroy, instance.Name)
+ }
+ if extraNames, err := g.matchingScopeInstanceNames(); err != nil {
+ if g.logger != nil {
+ g.logger.Warnf("Unable to list matching instances for cleanup: %v", err)
+ }
+ } else {
+ namesToDestroy = append(namesToDestroy, extraNames...)
+ }
+ namesToDestroy = uniqueStrings(namesToDestroy)
+ if len(namesToDestroy) == 0 {
+ if g.logger != nil {
+ g.logger.Warnf("No matching instances found for cleanup. Continuing...")
+ }
+ } else if g.runDown != nil {
+ for _, name := range namesToDestroy {
+ _ = g.runDown(DownOptions{Name: name}, g.provider, g.logger)
}
- } else if g.logger != nil {
- g.logger.Warnf("Instance %s already down. Continuing...", instance.Name)
}
if g.prClosed() {
if g.logger != nil {
@@ -984,6 +1030,46 @@ func (g *GithubSync) prHasLabel(searchedLabel string) bool {
return false
}
+func (g *GithubSync) prLabels() []string {
+ if g.pr() == nil {
+ return nil
+ }
+ labels := make([]string, 0, len(g.pr().Labels))
+ for _, label := range g.pr().Labels {
+ if label.Name == "" {
+ continue
+ }
+ labels = append(labels, label.Name)
+ }
+ return labels
+}
+
+func (g *GithubSync) ignoredEventDetails() string {
+ parts := []string{fmt.Sprintf("action=%q", g.event.Action)}
+ if prNumber := g.prNumber(); prNumber != 0 {
+ parts = append(parts, fmt.Sprintf("pr=%d", prNumber))
+ }
+ if g.event.Label != nil && g.event.Label.Name != "" {
+ parts = append(parts, fmt.Sprintf("event_label=%q", g.event.Label.Name))
+ }
+ if g.opts.Label != "" {
+ parts = append(parts, fmt.Sprintf("configured_label=%q", g.opts.Label))
+ }
+ if labels := g.prLabels(); len(labels) > 0 {
+ parts = append(parts, fmt.Sprintf("pr_labels=%q", strings.Join(labels, ",")))
+ }
+
+ reason := "no matching action for event"
+ switch {
+ case g.pullRequest() && (g.event.Action == "labeled" || g.event.Action == "unlabeled") && g.event.Label != nil && !strings.EqualFold(g.event.Label.Name, g.opts.Label):
+ reason = "event label does not match configured label"
+ case (g.push() || g.prSynchronize()) && !g.prHasLabel(""):
+ reason = "configured label is not present on the PR"
+ }
+ parts = append(parts, fmt.Sprintf("reason=%q", reason))
+ return strings.Join(parts, " ")
+}
+
func (g *GithubSync) prNumber() int {
if g.pullRequest() {
return g.event.PullRequest.Number
@@ -1046,8 +1132,15 @@ func (g *GithubSync) validateDeploymentVariant() error {
return nil
}
+func (g *GithubSync) instanceScopeKey() string {
+ return labelScopeKey(g.opts.Label)
+}
+
func (g *GithubSync) instanceName() string {
parts := []string{"gh", fmt.Sprintf("%d", g.repoID())}
+ if scope := g.instanceScopeKey(); scope != "" {
+ parts = append(parts, scope)
+ }
if g.deploymentVariant() != "" {
parts = append(parts, g.deploymentVariant())
}
@@ -1061,6 +1154,9 @@ func (g *GithubSync) instanceName() string {
func (g *GithubSync) instanceSubdomain() string {
components := []string{}
+ if scope := g.instanceScopeKey(); scope != "" {
+ components = append(components, scope)
+ }
if g.deploymentVariant() != "" {
components = append(components, g.deploymentVariant())
}
@@ -1076,14 +1172,24 @@ func (g *GithubSync) instanceSubdomain() string {
}
func (g *GithubSync) defaultInstanceTags() map[string]string {
+ target := NormalizeDeploymentTarget(string(g.opts.Common.DeploymentTarget))
+ if target == "" {
+ target = DeploymentTargetCompose
+ }
tags := map[string]string{
- "repo_name": g.repoName(),
- "repo_id": fmt.Sprintf("%d", g.repoID()),
- "org_name": g.orgName(),
- "org_id": fmt.Sprintf("%d", g.orgID()),
- "version": Version,
- "pullpreview_repo": g.repo(),
- "pullpreview_kind": "branch",
+ "repo_name": g.repoName(),
+ "repo_id": fmt.Sprintf("%d", g.repoID()),
+ "org_name": g.orgName(),
+ "org_id": fmt.Sprintf("%d", g.orgID()),
+ "version": Version,
+ "pullpreview_repo": g.repo(),
+ "pullpreview_kind": "branch",
+ "pullpreview_label": canonicalCleanupLabel(g.opts.Label),
+ "pullpreview_target": string(target),
+ "pullpreview_runtime": deploymentRuntime(target),
+ }
+ if scope := g.instanceScopeKey(); scope != "" {
+ tags["pullpreview_scope"] = scope
}
if branch := g.branch(); branch != "" {
tags["pullpreview_branch"] = branch
@@ -1107,6 +1213,52 @@ func (g *GithubSync) buildInstance() *Instance {
return instance
}
+func (g *GithubSync) currentCleanupRef() cleanupInstanceRef {
+ if g.prNumber() != 0 {
+ return cleanupInstanceRef{PRNumber: fmt.Sprintf("%d", g.prNumber())}
+ }
+ branch := g.branch()
+ return cleanupInstanceRef{Branch: branch, BranchNormalized: NormalizeName(branch)}
+}
+
+func (g *GithubSync) matchingScopeInstanceNames() ([]string, error) {
+ instances, err := g.provider.ListInstances(repoCleanupTags(g.repo()))
+ if err != nil {
+ return nil, err
+ }
+ ref := g.currentCleanupRef()
+ names := []string{}
+ for _, inst := range instances {
+ if !instanceMatchesCleanupLabel(inst, g.opts.Label) {
+ continue
+ }
+ if !instanceMatchesCleanupVariant(inst, g.opts.DeploymentVariant) {
+ continue
+ }
+ if !instanceMatchesCleanupTarget(inst, g.opts.Common.DeploymentTarget) {
+ continue
+ }
+ instRef, ok := cleanupInstanceReference(inst)
+ if !ok {
+ continue
+ }
+ switch {
+ case ref.PRNumber != "":
+ if instRef.PRNumber != ref.PRNumber {
+ continue
+ }
+ case ref.BranchNormalized != "":
+ if instRef.BranchNormalized != ref.BranchNormalized {
+ continue
+ }
+ default:
+ continue
+ }
+ names = append(names, inst.Name)
+ }
+ return uniqueStrings(names), nil
+}
+
func mergeStringMap(base, extra map[string]string) map[string]string {
result := map[string]string{}
for k, v := range base {
@@ -1120,19 +1272,24 @@ func mergeStringMap(base, extra map[string]string) map[string]string {
func instanceToCommon(inst *Instance) CommonOptions {
return CommonOptions{
- Admins: inst.Admins,
- AdminPublicKeys: inst.AdminPublicKeys,
- Context: inst.Context,
- CIDRs: inst.CIDRs,
- Registries: inst.Registries,
- ProxyTLS: inst.ProxyTLS,
- DNS: inst.DNS,
- Ports: inst.Ports,
- InstanceType: inst.Size,
- DefaultPort: inst.DefaultPort,
- Tags: inst.Tags,
- ComposeFiles: inst.ComposeFiles,
- ComposeOptions: inst.ComposeOptions,
- PreScript: inst.PreScript,
+ Admins: inst.Admins,
+ AdminPublicKeys: inst.AdminPublicKeys,
+ Context: inst.Context,
+ DeploymentTarget: inst.DeploymentTarget,
+ CIDRs: inst.CIDRs,
+ Registries: inst.Registries,
+ ProxyTLS: inst.ProxyTLS,
+ DNS: inst.DNS,
+ Ports: inst.Ports,
+ InstanceType: inst.Size,
+ DefaultPort: inst.DefaultPort,
+ Tags: inst.Tags,
+ ComposeFiles: inst.ComposeFiles,
+ ComposeOptions: inst.ComposeOptions,
+ Chart: inst.Chart,
+ ChartRepository: inst.ChartRepository,
+ ChartValues: inst.ChartValues,
+ ChartSet: inst.ChartSet,
+ PreScript: inst.PreScript,
}
}
diff --git a/internal/pullpreview/github_sync_test.go b/internal/pullpreview/github_sync_test.go
index e355029..bf1ad18 100644
--- a/internal/pullpreview/github_sync_test.go
+++ b/internal/pullpreview/github_sync_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "fmt"
"log"
"os"
"path/filepath"
@@ -248,6 +249,47 @@ func TestSyncLabeledFixtureRunsUp(t *testing.T) {
}
}
+func TestSyncLogsIgnoredLabeledEventDetailsForDifferentConfiguredLabel(t *testing.T) {
+ t.Setenv("PULLPREVIEW_TEST", "1")
+ event := loadFixtureEvent(t, "github_event_labeled.json")
+ event.Label = &GitHubLabel{Name: "pullpreview-helm"}
+ event.PullRequest.Labels = []GitHubLabel{
+ {Name: "pullpreview"},
+ {Name: "pullpreview-helm"},
+ }
+
+ client := &fakeGitHub{latestSHA: event.PullRequest.Head.SHA}
+ sync := newSync(event, GithubSyncOptions{Label: "pullpreview", Common: CommonOptions{}}, client, fakeProvider{running: true})
+ var logs bytes.Buffer
+ logger := NewLogger(LevelInfo)
+ logger.base = log.New(&logs, "", 0)
+ sync.logger = logger
+
+ upCalled := false
+ sync.runUp = func(opts UpOptions, provider Provider, logger *Logger) (*Instance, error) {
+ upCalled = true
+ return nil, nil
+ }
+
+ if err := sync.Sync(); err != nil {
+ t.Fatalf("Sync() returned error: %v", err)
+ }
+ if upCalled {
+ t.Fatalf("expected runUp not to be called")
+ }
+
+ logOutput := logs.String()
+ if !strings.Contains(logOutput, `event_label="pullpreview-helm"`) {
+ t.Fatalf("expected event label details in logs: %s", logOutput)
+ }
+ if !strings.Contains(logOutput, `configured_label="pullpreview"`) {
+ t.Fatalf("expected configured label details in logs: %s", logOutput)
+ }
+ if !strings.Contains(logOutput, `reason="event label does not match configured label"`) {
+ t.Fatalf("expected ignored-event reason in logs: %s", logOutput)
+ }
+}
+
func TestSyncLabeledProxyTLSUsesHTTPSURLInComment(t *testing.T) {
t.Setenv("PULLPREVIEW_TEST", "1")
t.Setenv("GITHUB_SERVER_URL", "https://github.com")
@@ -558,10 +600,10 @@ func TestClearDanglingDeploymentsDestroysInstancesNotLinkedToActivePR(t *testing
}
provider := &scheduledCleanupProvider{
instances: []InstanceSummary{
- {Name: "gh-1-pr-10", Tags: map[string]string{"pr_number": "10"}},
- {Name: "gh-1-pr-11", Tags: map[string]string{"pr_number": "11"}},
- {Name: "gh-1-branch-main", Tags: map[string]string{"pullpreview_branch": "main"}},
- {Name: "gh-1-branch-feature-x", Tags: map[string]string{}}, // legacy branch instance without branch tag
+ {Name: "gh-1-pr-10", Tags: map[string]string{"pr_number": "10", "pullpreview_label": "pullpreview-custom"}},
+ {Name: "gh-1-pr-11", Tags: map[string]string{"pr_number": "11", "pullpreview_label": "pullpreview-custom"}},
+ {Name: "gh-1-branch-main", Tags: map[string]string{"pullpreview_branch": "main", "pullpreview_label": "pullpreview-custom"}},
+ {Name: "gh-1-branch-feature-x", Tags: map[string]string{"pullpreview_label": "pullpreview-custom"}},
},
}
destroyed := []string{}
@@ -605,14 +647,29 @@ func TestClearDanglingDeploymentsDestroysInstancesNotLinkedToActivePR(t *testing
}
}
-func TestClearDanglingDeploymentsScopesCleanupByDeploymentVariant(t *testing.T) {
- client := &fakeGitHub{}
+func TestClearDanglingDeploymentsCleansAllVariantsForMatchingLabel(t *testing.T) {
+ client := &fakeGitHub{
+ issues: []*gh.Issue{
+ {
+ Number: gh.Int(10),
+ State: gh.String("open"),
+ PullRequestLinks: &gh.PullRequestLinks{},
+ },
+ {
+ Number: gh.Int(20),
+ State: gh.String("closed"),
+ PullRequestLinks: &gh.PullRequestLinks{},
+ },
+ },
+ }
provider := &scheduledCleanupProvider{
instances: []InstanceSummary{
{Name: "gh-1-env1-pr-10", Tags: map[string]string{"pr_number": "10", "pullpreview_variant": "env1"}},
+ {Name: "gh-1-env2-pr-10", Tags: map[string]string{"pr_number": "10", "pullpreview_variant": "env2"}},
+ {Name: "gh-1-env1-pr-20", Tags: map[string]string{"pr_number": "20", "pullpreview_variant": "env1"}},
{Name: "gh-1-env2-pr-20", Tags: map[string]string{"pr_number": "20", "pullpreview_variant": "env2"}},
{Name: "gh-1-env1-pr-30", Tags: map[string]string{}}, // legacy env1 instance without variant tag
- {Name: "gh-1-env2-pr-40", Tags: map[string]string{}}, // legacy env2 instance without variant tag
+ {Name: "gh-1-env2-pr-30", Tags: map[string]string{}}, // legacy env2 instance without variant tag
},
}
destroyed := []string{}
@@ -632,13 +689,13 @@ func TestClearDanglingDeploymentsScopesCleanupByDeploymentVariant(t *testing.T)
}
sort.Strings(destroyed)
- wantDestroyed := []string{"gh-1-env1-pr-10", "gh-1-env1-pr-30"}
+ wantDestroyed := []string{"gh-1-env1-pr-20", "gh-1-env1-pr-30", "gh-1-env2-pr-20", "gh-1-env2-pr-30"}
if strings.Join(destroyed, ",") != strings.Join(wantDestroyed, ",") {
- t.Fatalf("unexpected destroyed instances for env1 cleanup: got=%v want=%v", destroyed, wantDestroyed)
+ t.Fatalf("unexpected destroyed instances for label-wide cleanup: got=%v want=%v", destroyed, wantDestroyed)
}
}
-func TestClearDanglingDeploymentsWithoutVariantSkipsVariantInstances(t *testing.T) {
+func TestClearDanglingDeploymentsWithoutVariantCleansAllVariants(t *testing.T) {
client := &fakeGitHub{}
provider := &scheduledCleanupProvider{
instances: []InstanceSummary{
@@ -663,9 +720,40 @@ func TestClearDanglingDeploymentsWithoutVariantSkipsVariantInstances(t *testing.
}
sort.Strings(destroyed)
- wantDestroyed := []string{"gh-1-pr-10"}
+ wantDestroyed := []string{"gh-1-env1-pr-20", "gh-1-env2-pr-30", "gh-1-pr-10"}
+ if strings.Join(destroyed, ",") != strings.Join(wantDestroyed, ",") {
+ t.Fatalf("unexpected destroyed instances for label-wide cleanup without variant: got=%v want=%v", destroyed, wantDestroyed)
+ }
+}
+
+func TestClearDanglingDeploymentsSkipsInstancesFromDifferentLabel(t *testing.T) {
+ client := &fakeGitHub{}
+ provider := &scheduledCleanupProvider{
+ instances: []InstanceSummary{
+ {Name: "gh-1-pr-10", Tags: map[string]string{"pr_number": "10", "pullpreview_label": "pullpreview"}},
+ {Name: "gh-1-pr-11", Tags: map[string]string{"pr_number": "11", "pullpreview_label": "pullpreview-helm"}},
+ {Name: "gh-1-pr-12", Tags: map[string]string{"pr_number": "12"}}, // legacy instance without label tag
+ },
+ }
+ destroyed := []string{}
+ originalRunDown := runDownFunc
+ defer func() { runDownFunc = originalRunDown }()
+ runDownFunc = func(opts DownOptions, provider Provider, logger *Logger) error {
+ destroyed = append(destroyed, opts.Name)
+ return nil
+ }
+
+ err := clearDanglingDeployments("org/repo", GithubSyncOptions{
+ Label: "pullpreview",
+ }, provider, client, nil)
+ if err != nil {
+ t.Fatalf("clearDanglingDeployments() error: %v", err)
+ }
+
+ sort.Strings(destroyed)
+ wantDestroyed := []string{"gh-1-pr-10", "gh-1-pr-12"}
if strings.Join(destroyed, ",") != strings.Join(wantDestroyed, ",") {
- t.Fatalf("unexpected destroyed instances for default cleanup: got=%v want=%v", destroyed, wantDestroyed)
+ t.Fatalf("unexpected destroyed instances for label-scoped cleanup: got=%v want=%v", destroyed, wantDestroyed)
}
}
@@ -803,6 +891,188 @@ func TestInstanceMatchesCleanupVariantPrecedence(t *testing.T) {
}
}
+func TestInstanceMatchesCleanupLabel(t *testing.T) {
+ if !instanceMatchesCleanupLabel(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{"pullpreview_label": "pullpreview-helm"},
+ }, "PullPreview Helm") {
+ t.Fatalf("expected canonical label match")
+ }
+
+ if instanceMatchesCleanupLabel(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{"pullpreview_label": "pullpreview"},
+ }, "pullpreview-helm") {
+ t.Fatalf("expected mismatched label tag to be skipped")
+ }
+
+ if instanceMatchesCleanupLabel(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{},
+ }, "pullpreview-helm") {
+ t.Fatalf("expected missing label tag to be rejected for scoped labels")
+ }
+
+ if !instanceMatchesCleanupLabel(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{},
+ }, "pullpreview") {
+ t.Fatalf("expected legacy instance without label tag to remain eligible for default label")
+ }
+}
+
+func TestInstanceMatchesCleanupTarget(t *testing.T) {
+ if !instanceMatchesCleanupTarget(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{"pullpreview_target": "helm"},
+ }, DeploymentTargetHelm) {
+ t.Fatalf("expected explicit target tag to match")
+ }
+
+ if instanceMatchesCleanupTarget(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{},
+ }, DeploymentTargetHelm) {
+ t.Fatalf("expected missing target tag to be rejected for helm cleanup")
+ }
+
+ if !instanceMatchesCleanupTarget(InstanceSummary{
+ Name: "gh-1-pr-10",
+ Tags: map[string]string{},
+ }, DeploymentTargetCompose) {
+ t.Fatalf("expected missing target tag to remain eligible for compose cleanup")
+ }
+}
+
+func TestDefaultInstanceTagsIncludeCanonicalLabel(t *testing.T) {
+ event := loadFixtureEvent(t, "github_event_labeled.json")
+ sync := newSync(event, GithubSyncOptions{
+ Label: "PullPreview Helm",
+ Common: CommonOptions{DeploymentTarget: DeploymentTargetHelm},
+ }, &fakeGitHub{}, fakeProvider{running: true})
+
+ tags := sync.defaultInstanceTags()
+ if tags["pullpreview_label"] != "pullpreview-helm" {
+ t.Fatalf("unexpected canonical label tag: %#v", tags)
+ }
+ if tags["pullpreview_target"] != "helm" || tags["pullpreview_runtime"] != "k3s" {
+ t.Fatalf("expected target/runtime tags, got %#v", tags)
+ }
+ if tags["pullpreview_scope"] == "" {
+ t.Fatalf("expected non-default label scope tag, got %#v", tags)
+ }
+}
+
+func TestInstanceNameUsesScopeForNonDefaultLabel(t *testing.T) {
+ event := loadFixtureEvent(t, "github_event_labeled.json")
+
+ defaultSync := newSync(event, GithubSyncOptions{
+ Label: "pullpreview",
+ Common: CommonOptions{},
+ }, &fakeGitHub{}, fakeProvider{running: true})
+ expectedDefault := NormalizeName(fmt.Sprintf("gh-%d-pr-%d", defaultSync.repoID(), defaultSync.prNumber()))
+ if got := defaultSync.instanceName(); got != expectedDefault {
+ t.Fatalf("unexpected default label instance name: %q", got)
+ }
+
+ helmSync := newSync(event, GithubSyncOptions{
+ Label: "pullpreview-helm",
+ Common: CommonOptions{DeploymentTarget: DeploymentTargetHelm},
+ }, &fakeGitHub{}, fakeProvider{running: true})
+ scope := labelScopeKey("pullpreview-helm")
+ if scope == "" {
+ t.Fatalf("expected non-empty scope for non-default label")
+ }
+ if !strings.Contains(helmSync.instanceName(), scope) {
+ t.Fatalf("expected instance name %q to include scope %q", helmSync.instanceName(), scope)
+ }
+ if !strings.Contains(helmSync.instanceSubdomain(), scope) {
+ t.Fatalf("expected subdomain %q to include scope %q", helmSync.instanceSubdomain(), scope)
+ }
+}
+
+func TestMatchingScopeInstanceNamesIncludesLegacyInstanceForScopedLabel(t *testing.T) {
+ event := loadFixtureEvent(t, "github_event_unlabeled.json")
+ event.Label = &GitHubLabel{Name: "pullpreview-helm"}
+ if event.PullRequest != nil {
+ event.PullRequest.Labels = []GitHubLabel{}
+ }
+ provider := &scheduledCleanupProvider{
+ instances: []InstanceSummary{
+ {
+ Name: NormalizeName(fmt.Sprintf("gh-%d-pr-%d", event.Repository.ID, event.PullRequest.Number)),
+ Tags: map[string]string{
+ "pr_number": fmt.Sprintf("%d", event.PullRequest.Number),
+ "pullpreview_label": "pullpreview-helm",
+ "pullpreview_target": "helm",
+ },
+ },
+ },
+ }
+ client := &fakeGitHub{}
+ sync := newSync(event, GithubSyncOptions{
+ Label: "pullpreview-helm",
+ Common: CommonOptions{DeploymentTarget: DeploymentTargetHelm},
+ }, client, provider)
+
+ inst := provider.instances[0]
+ ref, ok := cleanupInstanceReference(inst)
+ if !ok {
+ t.Fatalf("expected cleanup reference for instance %#v", inst)
+ }
+ if ref.PRNumber != fmt.Sprintf("%d", event.PullRequest.Number) {
+ t.Fatalf("unexpected cleanup ref %#v for event PR %d", ref, event.PullRequest.Number)
+ }
+ if !instanceMatchesCleanupLabel(inst, sync.opts.Label) {
+ t.Fatalf("expected label filter to match instance %#v", inst)
+ }
+ if !instanceMatchesCleanupVariant(inst, sync.opts.DeploymentVariant) {
+ t.Fatalf("expected variant filter to match instance %#v", inst)
+ }
+ if !instanceMatchesCleanupTarget(inst, sync.opts.Common.DeploymentTarget) {
+ t.Fatalf("expected target filter to match instance %#v", inst)
+ }
+
+ names, err := sync.matchingScopeInstanceNames()
+ if err != nil {
+ t.Fatalf("matchingScopeInstanceNames() error: %v", err)
+ }
+ expectedName := NormalizeName(fmt.Sprintf("gh-%d-pr-%d", event.Repository.ID, event.PullRequest.Number))
+ if len(names) != 1 || names[0] != expectedName {
+ t.Fatalf("expected legacy instance match, got %v", names)
+ }
+}
+
+func TestMatchingScopeInstanceNamesSkipsUntaggedLegacyInstanceForScopedLabel(t *testing.T) {
+ event := loadFixtureEvent(t, "github_event_unlabeled.json")
+ event.Label = &GitHubLabel{Name: "pullpreview-helm"}
+ if event.PullRequest != nil {
+ event.PullRequest.Labels = []GitHubLabel{}
+ }
+ provider := &scheduledCleanupProvider{
+ instances: []InstanceSummary{
+ {
+ Name: NormalizeName(fmt.Sprintf("gh-%d-pr-%d", event.Repository.ID, event.PullRequest.Number)),
+ Tags: map[string]string{
+ "pr_number": fmt.Sprintf("%d", event.PullRequest.Number),
+ },
+ },
+ },
+ }
+ sync := newSync(event, GithubSyncOptions{
+ Label: "pullpreview-helm",
+ Common: CommonOptions{DeploymentTarget: DeploymentTargetHelm},
+ }, &fakeGitHub{}, provider)
+
+ names, err := sync.matchingScopeInstanceNames()
+ if err != nil {
+ t.Fatalf("matchingScopeInstanceNames() error: %v", err)
+ }
+ if len(names) != 0 {
+ t.Fatalf("expected untagged legacy instance to be skipped, got %v", names)
+ }
+}
+
func writeFixtureToTempEventFile(t *testing.T, event GitHubEvent) string {
t.Helper()
path := filepath.Join(t.TempDir(), "event.json")
diff --git a/internal/pullpreview/instance.go b/internal/pullpreview/instance.go
index 27470c2..0549815 100644
--- a/internal/pullpreview/instance.go
+++ b/internal/pullpreview/instance.go
@@ -14,7 +14,25 @@ import (
"time"
)
-const remoteAppPath = "/app"
+const (
+ remoteAppPath = "/app"
+ instanceSSHReadyInterval = 5 * time.Second
+ instanceSSHReadyWaitWindow = 5 * time.Minute
+ sshReadyDiagnosticCommand = `if test -f /etc/pullpreview/ready; then
+ echo ready-marker-present
+ exit 0
+fi
+echo ready-marker-missing
+if command -v cloud-init >/dev/null 2>&1; then
+ echo "-- cloud-init status --"
+ sudo -n cloud-init status --long 2>/dev/null || cloud-init status --long 2>/dev/null || sudo -n cloud-init status 2>/dev/null || cloud-init status 2>/dev/null || true
+fi
+if sudo -n test -f /var/log/cloud-init-output.log 2>/dev/null || test -f /var/log/cloud-init-output.log; then
+ echo "-- cloud-init-output tail --"
+ sudo -n tail -n 40 /var/log/cloud-init-output.log 2>/dev/null || tail -n 40 /var/log/cloud-init-output.log 2>/dev/null || true
+fi
+exit 1`
+)
type Runner interface {
Run(cmd *exec.Cmd) error
@@ -28,31 +46,47 @@ func (r SystemRunner) Run(cmd *exec.Cmd) error {
return cmd.Run()
}
+var runSSHCombinedOutput = func(cmd *exec.Cmd) ([]byte, error) {
+ return cmd.CombinedOutput()
+}
+
+var waitUntilInstanceSSHReady = func(ctx context.Context, probe func() bool) bool {
+ return WaitUntilContext(ctx, pollAttemptsForWindow(instanceSSHReadyWaitWindow, instanceSSHReadyInterval), instanceSSHReadyInterval, probe)
+}
+
+var errInstanceSSHUnavailable = errors.New("can't connect to instance over SSH")
+
type Instance struct {
- Name string
- Subdomain string
- Admins []string
- AdminPublicKeys []string
- Context context.Context
- CIDRs []string
- ComposeFiles []string
- ComposeOptions []string
- DefaultPort string
- DNS string
- Ports []string
- ProxyTLS string
- Provider Provider
- Registries []string
- Size string
- Tags map[string]string
- PreScript string
- Access AccessDetails
- Logger *Logger
- Runner Runner
+ Name string
+ Subdomain string
+ Admins []string
+ AdminPublicKeys []string
+ Context context.Context
+ DeploymentTarget DeploymentTarget
+ CIDRs []string
+ ComposeFiles []string
+ ComposeOptions []string
+ Chart string
+ ChartRepository string
+ ChartValues []string
+ ChartSet []string
+ DefaultPort string
+ DNS string
+ Ports []string
+ ProxyTLS string
+ Provider Provider
+ Registries []string
+ Size string
+ Tags map[string]string
+ PreScript string
+ Access AccessDetails
+ Logger *Logger
+ Runner Runner
}
func NewInstance(name string, opts CommonOptions, provider Provider, logger *Logger) *Instance {
normalized := NormalizeName(name)
+ target := NormalizeDeploymentTarget(string(opts.DeploymentTarget))
defaultPort := defaultString(opts.DefaultPort, "80")
proxyTLS := strings.TrimSpace(opts.ProxyTLS)
if proxyTLS != "" {
@@ -62,25 +96,30 @@ func NewInstance(name string, opts CommonOptions, provider Provider, logger *Log
defaultPort = "443"
}
return &Instance{
- Name: normalized,
- Subdomain: NormalizeName(name),
- Admins: opts.Admins,
- AdminPublicKeys: opts.AdminPublicKeys,
- Context: ensureContext(opts.Context),
- CIDRs: defaultSlice(opts.CIDRs, []string{"0.0.0.0/0"}),
- ComposeFiles: defaultSlice(opts.ComposeFiles, []string{"docker-compose.yml"}),
- ComposeOptions: defaultSlice(opts.ComposeOptions, []string{"--build"}),
- DefaultPort: defaultPort,
- DNS: defaultString(opts.DNS, "my.preview.run"),
- Ports: opts.Ports,
- ProxyTLS: proxyTLS,
- Provider: provider,
- Registries: opts.Registries,
- Size: opts.InstanceType,
- Tags: defaultMap(opts.Tags),
- PreScript: opts.PreScript,
- Logger: logger,
- Runner: SystemRunner{},
+ Name: normalized,
+ Subdomain: NormalizeName(name),
+ Admins: opts.Admins,
+ AdminPublicKeys: opts.AdminPublicKeys,
+ Context: ensureContext(opts.Context),
+ DeploymentTarget: target,
+ CIDRs: defaultSlice(opts.CIDRs, []string{"0.0.0.0/0"}),
+ ComposeFiles: defaultSlice(opts.ComposeFiles, []string{"docker-compose.yml"}),
+ ComposeOptions: defaultSlice(opts.ComposeOptions, []string{"--build"}),
+ Chart: strings.TrimSpace(opts.Chart),
+ ChartRepository: strings.TrimSpace(opts.ChartRepository),
+ ChartValues: opts.ChartValues,
+ ChartSet: opts.ChartSet,
+ DefaultPort: defaultPort,
+ DNS: defaultString(opts.DNS, "my.preview.run"),
+ Ports: opts.Ports,
+ ProxyTLS: proxyTLS,
+ Provider: provider,
+ Registries: opts.Registries,
+ Size: opts.InstanceType,
+ Tags: defaultMap(opts.Tags),
+ PreScript: opts.PreScript,
+ Logger: logger,
+ Runner: SystemRunner{},
}
}
@@ -112,9 +151,86 @@ func defaultMap(value map[string]string) map[string]string {
return value
}
+func providerName(provider Provider) string {
+ if metadata, ok := provider.(ProviderMetadata); ok {
+ return strings.ToLower(strings.TrimSpace(metadata.Name()))
+ }
+ return ""
+}
+
+func providerSupportsDeploymentTarget(provider Provider, target DeploymentTarget) bool {
+ if supported, ok := provider.(SupportsDeploymentTarget); ok {
+ return supported.SupportsDeploymentTarget(target)
+ }
+ return NormalizeDeploymentTarget(string(target)) == DeploymentTargetCompose
+}
+
+func sameStringSlice(got, want []string) bool {
+ if len(got) != len(want) {
+ return false
+ }
+ for idx := range got {
+ if strings.TrimSpace(got[idx]) != strings.TrimSpace(want[idx]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (i *Instance) ValidateDeploymentConfig() error {
+ if err := i.DeploymentTarget.Validate(); err != nil {
+ return err
+ }
+
+ switch i.DeploymentTarget {
+ case DeploymentTargetCompose:
+ if strings.TrimSpace(i.Chart) != "" || strings.TrimSpace(i.ChartRepository) != "" || len(i.ChartValues) > 0 || len(i.ChartSet) > 0 {
+ return fmt.Errorf("chart, chart_repository, chart_values, and chart_set require deployment_target=helm")
+ }
+ case DeploymentTargetHelm:
+ if !providerSupportsDeploymentTarget(i.Provider, DeploymentTargetHelm) {
+ return fmt.Errorf("deployment_target=helm is unsupported for provider=%s", providerName(i.Provider))
+ }
+ if strings.TrimSpace(i.Chart) == "" {
+ return fmt.Errorf("deployment_target=helm requires chart")
+ }
+ if strings.TrimSpace(i.ProxyTLS) == "" {
+ return fmt.Errorf("deployment_target=helm requires proxy_tls")
+ }
+ if len(uniqueStrings(i.Registries)) > 0 {
+ return fmt.Errorf("registries is unsupported with deployment_target=helm")
+ }
+ if len(i.ComposeFiles) > 0 && !sameStringSlice(i.ComposeFiles, []string{"docker-compose.yml"}) {
+ return fmt.Errorf("compose_files is unsupported with deployment_target=helm")
+ }
+ if len(i.ComposeOptions) > 0 && !sameStringSlice(i.ComposeOptions, []string{"--build"}) {
+ return fmt.Errorf("compose_options is unsupported with deployment_target=helm")
+ }
+ default:
+ return fmt.Errorf("unsupported deployment target %q", i.DeploymentTarget)
+ }
+ return nil
+}
+
func (i *Instance) LaunchAndWait() error {
+ for attempt := 0; attempt < 2; attempt++ {
+ err := i.launchAndWait()
+ if !errors.Is(err, errInstanceSSHUnavailable) || attempt == 1 {
+ return err
+ }
+ if i.Logger != nil {
+ i.Logger.Warnf("Instance never reached ready state; terminating and retrying once name=%s", i.Name)
+ }
+ if err := i.Terminate(); err != nil {
+ return fmt.Errorf("instance not ready and terminate failed: %w", err)
+ }
+ }
+ return nil
+}
+
+func (i *Instance) launchAndWait() error {
if i.Logger != nil {
- i.Logger.Infof("Creating or restoring instance name=%s size=%s", i.Name, i.Size)
+ i.Logger.Infof("Creating instance name=%s size=%s", i.Name, i.Size)
}
userData := UserData{
@@ -124,9 +240,10 @@ func (i *Instance) LaunchAndWait() error {
}.Script()
if provider, ok := i.Provider.(UserDataProvider); ok {
generatedUserData, err := provider.BuildUserData(UserDataOptions{
- AppPath: remoteAppPath,
- SSHPublicKeys: i.SSHPublicKeys(),
- Username: i.Username(),
+ AppPath: remoteAppPath,
+ DeploymentTarget: i.DeploymentTarget,
+ SSHPublicKeys: i.SSHPublicKeys(),
+ Username: i.Username(),
})
if err != nil {
return err
@@ -152,7 +269,7 @@ func (i *Instance) LaunchAndWait() error {
i.Username(),
)
}
- if ok := WaitUntilContext(i.Context, 30, 5*time.Second, func() bool {
+ if ok := waitUntilInstanceSSHReady(i.Context, func() bool {
if i.Logger != nil {
i.Logger.Infof(
"Waiting for SSH username=%s ip=%s ssh=\"ssh %s\"",
@@ -163,7 +280,12 @@ func (i *Instance) LaunchAndWait() error {
}
return i.SSHReady()
}); !ok {
- return errors.New("can't connect to instance over SSH")
+ if i.Logger != nil {
+ if diagErr := i.SSHReadyDiagnostic(); diagErr != nil {
+ i.Logger.Warnf("SSH readiness diagnostics: %v", diagErr)
+ }
+ }
+ return errInstanceSSHUnavailable
}
if i.Logger != nil {
i.Logger.Infof("Instance ssh access OK")
@@ -183,6 +305,18 @@ func (i *Instance) SSHReady() bool {
return i.SSH("test -f /etc/pullpreview/ready", nil) == nil
}
+func (i *Instance) SSHReadyDiagnostic() error {
+ output, err := i.SSHOutput(sshReadyDiagnosticCommand, nil)
+ if err == nil {
+ return nil
+ }
+ output = strings.TrimSpace(output)
+ if output == "" {
+ return err
+ }
+ return fmt.Errorf("%w: %s", err, output)
+}
+
func (i *Instance) PublicIP() string {
return i.Access.IPAddress
}
@@ -210,7 +344,7 @@ func (i *Instance) PortsWithDefaults() []string {
proxyTLSEnabled := strings.TrimSpace(i.ProxyTLS) != ""
ports := []string{}
for _, port := range i.Ports {
- if proxyTLSEnabled && firewallRuleTargetsPort(port, 80) {
+ if proxyTLSEnabled && i.DeploymentTarget != DeploymentTargetHelm && firewallRuleTargetsPort(port, 80) {
continue
}
ports = append(ports, port)
@@ -317,6 +451,24 @@ func (i *Instance) appendRemoteFile(input io.Reader, target, mode string) error
return i.SSH(command, input)
}
+func (i *Instance) sshArgs(keyFile, certFile string) []string {
+ args := []string{}
+ if i.Logger != nil && i.Logger.level <= LevelDebug {
+ args = append(args, "-v")
+ }
+ args = append(args,
+ "-o", "ServerAliveInterval=15",
+ "-o", "IdentitiesOnly=yes",
+ "-i", keyFile,
+ )
+ if strings.TrimSpace(certFile) != "" {
+ args = append(args, "-o", "CertificateFile="+certFile)
+ }
+ args = append(args, i.SSHOptions()...)
+ args = append(args, i.SSHAddress())
+ return args
+}
+
func (i *Instance) SSH(command string, input io.Reader) error {
keyFile, certFile, err := i.writeTempKeys()
if err != nil {
@@ -329,27 +481,35 @@ func (i *Instance) SSH(command string, input io.Reader) error {
}
}()
- args := []string{}
- if i.Logger != nil && i.Logger.level <= LevelDebug {
- args = append(args, "-v")
- }
- args = append(args,
- "-o", "ServerAliveInterval=15",
- "-o", "IdentitiesOnly=yes",
- "-i", keyFile,
- )
- args = append(args, i.SSHOptions()...)
- args = append(args, i.SSHAddress())
+ args := i.sshArgs(keyFile, certFile)
args = append(args, command)
cmd := exec.CommandContext(i.Context, "ssh", args...)
cmd.Stdin = input
- if input == nil {
- cmd.Stdin = os.Stdin
- }
return i.Runner.Run(cmd)
}
+func (i *Instance) SSHOutput(command string, input io.Reader) (string, error) {
+ keyFile, certFile, err := i.writeTempKeys()
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ _ = os.Remove(keyFile)
+ if certFile != "" {
+ _ = os.Remove(certFile)
+ }
+ }()
+
+ args := i.sshArgs(keyFile, certFile)
+ args = append(args, command)
+
+ cmd := exec.CommandContext(i.Context, "ssh", args...)
+ cmd.Stdin = input
+ output, err := runSSHCombinedOutput(cmd)
+ return string(output), err
+}
+
func (i *Instance) SSHAddress() string {
username := i.Username()
if username == "" {
@@ -360,6 +520,8 @@ func (i *Instance) SSHAddress() string {
func (i *Instance) SSHOptions() []string {
return []string{
+ "-o", "BatchMode=yes",
+ "-o", "IdentityAgent=none",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "LogLevel=ERROR",
@@ -401,7 +563,14 @@ func (i *Instance) EnsureRemoteAuthorizedKeysOwner() error {
}
func (i *Instance) DeployApp(appPath string) error {
- return i.DeployWithDockerContext(appPath)
+ switch i.DeploymentTarget {
+ case DeploymentTargetHelm:
+ return i.DeployWithHelm(appPath)
+ case DeploymentTargetCompose:
+ return i.DeployWithDockerContext(appPath)
+ default:
+ return fmt.Errorf("unsupported deployment target %q", i.DeploymentTarget)
+ }
}
func (i *Instance) SetupScripts() error {
diff --git a/internal/pullpreview/instance_test.go b/internal/pullpreview/instance_test.go
index 608c31e..1cae75c 100644
--- a/internal/pullpreview/instance_test.go
+++ b/internal/pullpreview/instance_test.go
@@ -1,6 +1,8 @@
package pullpreview
import (
+ "context"
+ "errors"
"os"
"os/exec"
"path/filepath"
@@ -17,6 +19,33 @@ func (r *captureRunner) Run(cmd *exec.Cmd) error {
return nil
}
+type launchSpyProvider struct {
+ launchOpts []LaunchOptions
+ terminateCalls int
+}
+
+func (p *launchSpyProvider) Launch(name string, opts LaunchOptions) (AccessDetails, error) {
+ p.launchOpts = append(p.launchOpts, opts)
+ return AccessDetails{IPAddress: "1.2.3.4", Username: "ec2-user", PrivateKey: "PRIVATE"}, nil
+}
+
+func (p *launchSpyProvider) Terminate(name string) error {
+ p.terminateCalls++
+ return nil
+}
+
+func (p *launchSpyProvider) Running(name string) (bool, error) {
+ return false, nil
+}
+
+func (p *launchSpyProvider) ListInstances(tags map[string]string) ([]InstanceSummary, error) {
+ return nil, nil
+}
+
+func (p *launchSpyProvider) Username() string {
+ return "ec2-user"
+}
+
func TestPortsWithDefaultsDeduplicatesValues(t *testing.T) {
inst := NewInstance("example", CommonOptions{
Ports: []string{"443/tcp", "22", "443/tcp"},
@@ -82,6 +111,32 @@ func TestProxyTLSForcesHTTPSDefaults(t *testing.T) {
}
}
+func TestProxyTLSKeepsPort80ForHelm(t *testing.T) {
+ inst := NewInstance("my-app", CommonOptions{
+ DeploymentTarget: DeploymentTargetHelm,
+ DNS: "my.preview.run",
+ DefaultPort: "8080",
+ Ports: []string{"80/tcp", "443/tcp"},
+ ProxyTLS: "app-wordpress:80",
+ }, fakeProvider{}, nil)
+
+ ports := inst.PortsWithDefaults()
+ expected := map[string]bool{
+ "80/tcp": true,
+ "443/tcp": true,
+ "443": true,
+ "22": true,
+ }
+ if len(ports) != len(expected) {
+ t.Fatalf("unexpected ports list: %#v", ports)
+ }
+ for _, port := range ports {
+ if !expected[port] {
+ t.Fatalf("unexpected port %q in %#v", port, ports)
+ }
+ }
+}
+
func TestFirewallRuleTargetsPort(t *testing.T) {
cases := []struct {
rule string
@@ -165,6 +220,9 @@ func TestSSHBuildsCommandWithExpectedArguments(t *testing.T) {
if !strings.Contains(args, "ec2-user@1.2.3.4") || !strings.Contains(args, "echo ok") {
t.Fatalf("unexpected ssh command args: %s", args)
}
+ if !strings.Contains(args, "BatchMode=yes") || !strings.Contains(args, "IdentityAgent=none") {
+ t.Fatalf("expected non-interactive ssh options, got: %s", args)
+ }
}
func TestSetupSSHAccessAppendsAuthorizedKeys(t *testing.T) {
@@ -185,3 +243,65 @@ func TestSetupSSHAccessAppendsAuthorizedKeys(t *testing.T) {
t.Fatalf("expected SetupSSHAccess to append authorized_keys, command: %s", command)
}
}
+
+func TestSSHReadyDiagnosticIncludesRemoteDetails(t *testing.T) {
+ inst := NewInstance("my-app", CommonOptions{}, fakeProvider{}, nil)
+ inst.Access = AccessDetails{IPAddress: "1.2.3.4", Username: "ec2-user", PrivateKey: "PRIVATE"}
+
+ original := runSSHCombinedOutput
+ defer func() { runSSHCombinedOutput = original }()
+
+ runSSHCombinedOutput = func(cmd *exec.Cmd) ([]byte, error) {
+ args := strings.Join(cmd.Args, " ")
+ if !strings.Contains(args, "ready-marker-missing") {
+ t.Fatalf("expected SSH readiness diagnostic command, got %s", args)
+ }
+ return []byte("ready-marker-missing\n-- cloud-init status --\nstatus: error"), errors.New("exit status 1")
+ }
+
+ err := inst.SSHReadyDiagnostic()
+ if err == nil {
+ t.Fatalf("expected SSHReadyDiagnostic() error")
+ }
+ if !strings.Contains(err.Error(), "ready-marker-missing") {
+ t.Fatalf("expected ready marker context in error, got %v", err)
+ }
+ if !strings.Contains(err.Error(), "status: error") {
+ t.Fatalf("expected cloud-init details in error, got %v", err)
+ }
+}
+
+func TestLaunchAndWaitRetriesOnceAfterSSHTimeout(t *testing.T) {
+ provider := &launchSpyProvider{}
+ inst := NewInstance("my-app", CommonOptions{}, provider, nil)
+
+ originalWait := waitUntilInstanceSSHReady
+ defer func() { waitUntilInstanceSSHReady = originalWait }()
+
+ waitCalls := 0
+ waitUntilInstanceSSHReady = func(ctx context.Context, probe func() bool) bool {
+ waitCalls++
+ return false
+ }
+
+ originalSSH := runSSHCombinedOutput
+ defer func() { runSSHCombinedOutput = originalSSH }()
+
+ runSSHCombinedOutput = func(cmd *exec.Cmd) ([]byte, error) {
+ return []byte("ready-marker-missing"), errors.New("exit status 1")
+ }
+
+ err := inst.LaunchAndWait()
+ if !errors.Is(err, errInstanceSSHUnavailable) {
+ t.Fatalf("LaunchAndWait() error = %v, want %v", err, errInstanceSSHUnavailable)
+ }
+ if len(provider.launchOpts) != 2 {
+ t.Fatalf("expected two launch attempts, got %d", len(provider.launchOpts))
+ }
+ if waitCalls != 2 {
+ t.Fatalf("expected two SSH wait cycles, got %d", waitCalls)
+ }
+ if provider.terminateCalls != 1 {
+ t.Fatalf("expected one terminate on SSH timeout, got %d", provider.terminateCalls)
+ }
+}
diff --git a/internal/pullpreview/types.go b/internal/pullpreview/types.go
index 7c79f45..6194008 100644
--- a/internal/pullpreview/types.go
+++ b/internal/pullpreview/types.go
@@ -5,6 +5,13 @@ import (
"time"
)
+type DeploymentTarget string
+
+const (
+ DeploymentTargetCompose DeploymentTarget = "compose"
+ DeploymentTargetHelm DeploymentTarget = "helm"
+)
+
type Provider interface {
Launch(name string, opts LaunchOptions) (AccessDetails, error)
Terminate(name string) error
@@ -18,18 +25,14 @@ type ProviderMetadata interface {
DisplayName() string
}
-type SupportsSnapshots interface {
- SupportsSnapshots() bool
-}
-
-type SupportsRestore interface {
- SupportsRestore() bool
-}
-
type SupportsFirewall interface {
SupportsFirewall() bool
}
+type SupportsDeploymentTarget interface {
+ SupportsDeploymentTarget(target DeploymentTarget) bool
+}
+
type UserDataProvider interface {
BuildUserData(options UserDataOptions) (string, error)
}
@@ -42,9 +45,10 @@ type AccessDetails struct {
}
type UserDataOptions struct {
- AppPath string
- SSHPublicKeys []string
- Username string
+ AppPath string
+ DeploymentTarget DeploymentTarget
+ SSHPublicKeys []string
+ Username string
}
type LaunchOptions struct {
@@ -66,24 +70,30 @@ type InstanceSummary struct {
}
type CommonOptions struct {
- Region string
- Image string
- Admins []string
- AdminPublicKeys []string
- Context context.Context
- CIDRs []string
- Registries []string
- ProxyTLS string
- DNS string
- Ports []string
- InstanceType string
- DefaultPort string
- Tags map[string]string
- ComposeFiles []string
- ComposeOptions []string
- PreScript string
- Preflight bool
- EnableLock bool
+ ProviderName string
+ Region string
+ Image string
+ Admins []string
+ AdminPublicKeys []string
+ Context context.Context
+ CIDRs []string
+ Registries []string
+ ProxyTLS string
+ DNS string
+ Ports []string
+ InstanceType string
+ DefaultPort string
+ Tags map[string]string
+ DeploymentTarget DeploymentTarget
+ ComposeFiles []string
+ ComposeOptions []string
+ Chart string
+ ChartRepository string
+ ChartValues []string
+ ChartSet []string
+ PreScript string
+ Preflight bool
+ EnableLock bool
}
type DownOptions struct {
diff --git a/internal/pullpreview/up.go b/internal/pullpreview/up.go
index 23acd6d..6db8d7b 100644
--- a/internal/pullpreview/up.go
+++ b/internal/pullpreview/up.go
@@ -15,6 +15,9 @@ func RunUp(opts UpOptions, provider Provider, logger *Logger) (*Instance, error)
if opts.Subdomain != "" {
instance.WithSubdomain(opts.Subdomain)
}
+ if err := instance.ValidateDeploymentConfig(); err != nil {
+ return nil, err
+ }
appPath := opts.AppPath
clonePath, cloneCleanup, err := instance.CloneIfURL(appPath)
@@ -71,7 +74,11 @@ func RunUp(opts UpOptions, provider Provider, logger *Logger) (*Instance, error)
fmt.Printf(" %s\n\n", instance.URL())
fmt.Println(instructions)
fmt.Println("Then to view the logs:")
- fmt.Println(" docker-compose logs --tail 1000 -f")
+ if instance.DeploymentTarget == DeploymentTargetHelm {
+ fmt.Printf(" ssh %s sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl logs -n %s deploy/pullpreview-caddy -f\n", instance.SSHAddress(), instance.HelmNamespace())
+ } else {
+ fmt.Println(" docker-compose logs --tail 1000 -f")
+ }
fmt.Println()
return instance, nil
diff --git a/internal/pullpreview/user_data.go b/internal/pullpreview/user_data.go
deleted file mode 100644
index 3869f35..0000000
--- a/internal/pullpreview/user_data.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package pullpreview
-
-import (
- "strings"
-)
-
-type UserData struct {
- AppPath string
- SSHPublicKeys []string
- Username string
-}
-
-func (u UserData) Script() string {
- homeDir := HomeDirForUser(u.Username)
- lines := []string{
- "#!/bin/bash",
- "set -xe ; set -o pipefail",
- }
- if len(u.SSHPublicKeys) > 0 {
- lines = append(lines, "echo '"+strings.Join(u.SSHPublicKeys, "\n")+"' > "+homeDir+"/.ssh/authorized_keys")
- }
- lines = append(lines,
- "mkdir -p "+u.AppPath+" && chown -R "+u.Username+":"+u.Username+" "+u.AppPath,
- "echo 'cd "+u.AppPath+"' > /etc/profile.d/pullpreview.sh",
- "test -s /swapfile || ( fallocate -l 2G /swapfile && chmod 600 /swapfile && mkswap /swapfile && swapon /swapfile && echo '/swapfile none swap sw 0 0' | tee -a /etc/fstab )",
- "systemctl disable --now tmp.mount",
- "systemctl mask tmp.mount",
- "sysctl vm.swappiness=10 && sysctl vm.vfs_cache_pressure=50",
- "echo 'vm.swappiness=10' | tee -a /etc/sysctl.conf",
- "echo 'vm.vfs_cache_pressure=50' | tee -a /etc/sysctl.conf",
- "yum install -y docker",
- "curl -L \"https://github.com/docker/compose/releases/download/v2.18.1/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose",
- "chmod +x /usr/local/bin/docker-compose",
- "usermod -aG docker "+u.Username,
- "systemctl restart docker",
- "echo 'docker system prune -f && docker image prune -a --filter=\"until=96h\" --force' > /etc/cron.daily/docker-prune && chmod a+x /etc/cron.daily/docker-prune",
- "mkdir -p /etc/pullpreview && touch /etc/pullpreview/ready && chown -R "+u.Username+":"+u.Username+" /etc/pullpreview",
- )
- return strings.Join(lines, "\n")
-}
diff --git a/internal/pullpreview/user_data_test.go b/internal/pullpreview/user_data_test.go
deleted file mode 100644
index 29d7d70..0000000
--- a/internal/pullpreview/user_data_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package pullpreview
-
-import (
- "strings"
- "testing"
-)
-
-func TestUserDataScriptIncludesExpectedCommands(t *testing.T) {
- script := UserData{
- AppPath: "/app",
- Username: "ec2-user",
- SSHPublicKeys: []string{"ssh-ed25519 AAA", "ssh-rsa BBB"},
- }.Script()
-
- checks := []string{
- "#!/bin/bash",
- "echo 'ssh-ed25519 AAA\nssh-rsa BBB' > /home/ec2-user/.ssh/authorized_keys",
- "mkdir -p /app && chown -R ec2-user:ec2-user /app",
- "yum install -y docker",
- "mkdir -p /etc/pullpreview && touch /etc/pullpreview/ready && chown -R ec2-user:ec2-user /etc/pullpreview",
- }
- for _, fragment := range checks {
- if !strings.Contains(script, fragment) {
- t.Fatalf("expected script to contain %q, script:\n%s", fragment, script)
- }
- }
-}
-
-func TestUserDataScriptWithoutSSHKeys(t *testing.T) {
- script := UserData{
- AppPath: "/app",
- Username: "ec2-user",
- }.Script()
- if strings.Contains(script, "authorized_keys") {
- t.Fatalf("did not expect authorized_keys setup without keys, script:\n%s", script)
- }
-}
-
-func TestUserDataScriptRootUserAndAppPaths(t *testing.T) {
- script := UserData{
- AppPath: "/app",
- Username: "root",
- }.Script()
- if !strings.Contains(script, "mkdir -p /app && chown -R root:root /app") {
- t.Fatalf("expected root app path in script:\n%s", script)
- }
- if strings.Contains(script, "authorized_keys") {
- t.Fatalf("did not expect authorized_keys setup without keys, script:\n%s", script)
- }
-}
-
-func TestUserDataScriptRootUserAndSSHKeys(t *testing.T) {
- script := UserData{
- AppPath: "/app",
- Username: "root",
- SSHPublicKeys: []string{"ssh-ed25519 ROOT"},
- }.Script()
- if !strings.Contains(script, "echo 'ssh-ed25519 ROOT' > /root/.ssh/authorized_keys") {
- t.Fatalf("expected root authorized_keys setup in script:\n%s", script)
- }
-}
diff --git a/internal/pullpreview/utils.go b/internal/pullpreview/utils.go
index 1e85dbe..5e80c05 100644
--- a/internal/pullpreview/utils.go
+++ b/internal/pullpreview/utils.go
@@ -35,6 +35,20 @@ func WaitUntilContext(ctx context.Context, maxRetries int, interval time.Duratio
}
}
+func pollAttemptsForWindow(window, interval time.Duration) int {
+ if window <= 0 || interval <= 0 {
+ return 1
+ }
+
+ // WaitUntilContext checks once before sleeping, so add one attempt to cover
+ // the full window when the probe fails quickly.
+ attempts := int(window / interval)
+ if window%interval != 0 {
+ attempts++
+ }
+ return attempts + 1
+}
+
func EnsureContext(ctx context.Context) context.Context {
if ctx == nil {
return context.Background()
diff --git a/internal/pullpreview/utils_test.go b/internal/pullpreview/utils_test.go
new file mode 100644
index 0000000..06b78c8
--- /dev/null
+++ b/internal/pullpreview/utils_test.go
@@ -0,0 +1,48 @@
+package pullpreview
+
+import (
+ "testing"
+ "time"
+)
+
+func TestPollAttemptsForWindow(t *testing.T) {
+ tests := []struct {
+ name string
+ window time.Duration
+ interval time.Duration
+ want int
+ }{
+ {
+ name: "full five minute window at five second cadence",
+ window: 5 * time.Minute,
+ interval: 5 * time.Second,
+ want: 61,
+ },
+ {
+ name: "rounds up partial interval windows",
+ window: 301 * time.Second,
+ interval: 5 * time.Second,
+ want: 62,
+ },
+ {
+ name: "non-positive window falls back to one attempt",
+ window: 0,
+ interval: 5 * time.Second,
+ want: 1,
+ },
+ {
+ name: "non-positive interval falls back to one attempt",
+ window: 5 * time.Minute,
+ interval: 0,
+ want: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := pollAttemptsForWindow(tt.window, tt.interval); got != tt.want {
+ t.Fatalf("pollAttemptsForWindow(%s, %s) = %d, want %d", tt.window, tt.interval, got, tt.want)
+ }
+ })
+ }
+}