diff --git a/.github/workflows/cre-local-env-tests.yaml b/.github/workflows/cre-local-env-tests.yaml index f75f28c2fe0..0ecd84f8e72 100644 --- a/.github/workflows/cre-local-env-tests.yaml +++ b/.github/workflows/cre-local-env-tests.yaml @@ -112,7 +112,7 @@ jobs: uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 with: aws-region: ${{ secrets.QA_AWS_REGION }} - role-to-assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + role-to-assume: ${{ secrets.AWS_CTF_READ_ACCESS_ROLE_ARN }} role-duration-seconds: 1800 mask-aws-account-id: true @@ -151,7 +151,8 @@ jobs: env: DISABLE_DX_TRACKING: true GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_ECR: ${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.us-west-2.amazonaws.com + MAIN_AWS_ECR: ${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.us-west-2.amazonaws.com + SDLC_AWS_ECR: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.us-west-2.amazonaws.com run: | # Remove chip_ingress/chip_config sections since CI role lacks ECR permissions for the Atlas repo awk '/^\[chip_ingress\.build_config\]/,/^$/{next} /^\[chip_ingress\.pull_config\]/,/^$/{next} /^\[chip_config\.build_config\]/,/^$/{next} /^\[chip_config\.pull_config\]/,/^$/{next} {print}' configs/setup.toml > configs/setup.toml.tmp && mv configs/setup.toml.tmp configs/setup.toml @@ -165,6 +166,7 @@ jobs: CTF_CONFIGS: "./configs/workflow-gateway-don.toml" CTF_JD_IMAGE: "${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/job-distributor:0.22.1" CTF_CHAINLINK_IMAGE: "${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink:${{ github.event_name == 'pull_request' && format('nightly-{0}-plugins', steps.set-date.outputs.date) || inputs.chainlink_image_tag }}" + CTF_CHIP_ROUTER_IMAGE: "${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/local-cre-chip-router:v1.0.0" DISABLE_DX_TRACKING: "true" CI: "true" run: | diff --git a/.github/workflows/cre-regression-system-tests.yaml b/.github/workflows/cre-regression-system-tests.yaml index 49416beaffb..e67d3f02766 100644 --- a/.github/workflows/cre-regression-system-tests.yaml +++ b/.github/workflows/cre-regression-system-tests.yaml @@ -106,6 +106,7 @@ jobs: # Beholder stack will be started only for the Beholder tests CHIP_INGRESS_IMAGE: ${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/atlas-chip-ingress:da84cb72d3a160e02896247d46ab4b9806ebee2f CHIP_CONFIG_IMAGE: ${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/atlas-chip-config:7b4e9ee68fd1c737dd3480b5a3ced0188f29b969 + CTF_CHIP_ROUTER_IMAGE: "${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/local-cre-chip-router:v1.0.0" BILLING_PLATFORM_SERVICE_IMAGE: ${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/billing-platform-service:v1.45.0 steps: diff --git a/.github/workflows/cre-soak-memory-leak.yml b/.github/workflows/cre-soak-memory-leak.yml index 77e9d9081a8..a3aa6308f52 100644 --- a/.github/workflows/cre-soak-memory-leak.yml +++ b/.github/workflows/cre-soak-memory-leak.yml @@ -118,6 +118,7 @@ jobs: working-directory: system-tests/tests env: GITHUB_TOKEN: ${{ steps.github-token.outputs.access-token || '' }} + CTF_CHIP_ROUTER_IMAGE: "${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/local-cre-chip-router:v1.0.0" run: | gotestsum \ --jsonfile=/tmp/gotest.log \ diff --git a/.github/workflows/cre-system-tests.yaml b/.github/workflows/cre-system-tests.yaml index dffb2896a01..7c621ee3f11 100644 --- a/.github/workflows/cre-system-tests.yaml +++ b/.github/workflows/cre-system-tests.yaml @@ -250,6 +250,7 @@ jobs: env: CTF_JD_IMAGE: "${{ secrets.AWS_ACCOUNT_ID_PROD }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/job-distributor:0.22.1" CTF_CHAINLINK_IMAGE: "${{ steps.resolve-chainlink-image.outputs.resolved_image }}" + CTF_CHIP_ROUTER_IMAGE: "${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/local-cre-chip-router:v1.0.0" CTF_CONFIGS: ${{ matrix.tests.configs }} CRE_VERSION: ${{ matrix.tests.cre_version }} TEST_NAME: ${{ matrix.tests.test_name }} diff --git a/core/scripts/cre/environment/README.md b/core/scripts/cre/environment/README.md index 342892f492b..cff02a5f9df 100644 --- a/core/scripts/cre/environment/README.md +++ b/core/scripts/cre/environment/README.md @@ -15,7 +15,7 @@ Slack: #topic-local-dev-environments - [Start Environment](#start-environment) - [Using a pre-built Chainlink image](#using-a-pre-built-chainlink-image) - [Beholder](#beholder) - - [Beholder vs. ChIP Test Sink](#beholder-vs-chip-test-sink-port-conflict-and-using-both-together) + - [Chip Router Topology](#chip-router-topology) - [Storage](#storage) - [Purging environment state](#purging-environment-state) - [Stop Environment](#stop-environment) @@ -76,7 +76,7 @@ Slack: #topic-local-dev-environments - [Automated Hot Swapping with fswatch](#automated-hot-swapping-with-fswatch) 8. [Telemetry Configuration](#telemetry-configuration) - [OTEL Stack (OpenTelemetry)](#otel-stack-opentelemetry) - - [Chip Ingress (Beholder)](#chip-ingress-beholder) + - [Chip Router and Beholder](#chip-router-and-beholder) - [Expected Error Messages](#expected-error-messages) 9. [Using a Specific Docker Image for Chainlink Node](#using-a-specific-docker-image-for-chainlink-node) 10. [Using Existing EVM & P2P Keys](#using-existing-evm--p2p-keys) @@ -147,8 +147,8 @@ It will compile local CRE as `local_cre`. With it installed you will be able to # QUICKSTART ``` -# e.g. AWS_ECR=.dkr.ecr..amazonaws.com -AWS_ECR= go run . env start --auto-setup +# e.g. MAIN_AWS_ECR= SDLC_AWS_ECR= +MAIN_AWS_ECR= SDLC_AWS_ECR= go run . env start --auto-setup ``` > You can find `PROD_ACCOUNT_ID` and `REGION` in the `[profile prod]` section of the [AWS CLI configuration guide](https://smartcontract-it.atlassian.net/wiki/spaces/INFRA/pages/1045495923/Configure+the+AWS+CLI#Configure). If for some reason you want to limit the AWS config to bare minimum, include only `staging-default` profile and `cl-secure-sso` session entries. @@ -161,11 +161,17 @@ Refer to [this document](https://docs.google.com/document/d/1HtVLv2ipx2jvU15WYOi Environment can be setup by running `go run . env setup` inside `core/scripts/cre/environment` folder. Its configuration is defined in [configs/setup.toml](configs/setup.toml) file. It will make sure that: - you have AWS CLI installed and configured - you have GH CLI installed and authenticated -- you have required Job Distributor, Chip Ingress, and Chip Config images +- you have required Job Distributor, Chip Router, Chip Ingress, and Chip Config images **Image Versioning:** -Docker images for Beholder services (chip-ingress, chip-config) use commit-based tags instead of mutable tags like `local-cre`. This ensures you always know which version is running and prevents hard-to-debug issues from version mismatches. The exact versions are defined in [configs/setup.toml](configs/setup.toml). +Managed CRE images use local aliases with commit-based tags instead of mutable tags like `latest` or account-qualified ECR names. For example, env TOMLs use `chip-router:`, while [configs/setup.toml](configs/setup.toml) defines how that alias is built locally or pulled from ECR and retagged locally. The setup config now distinguishes between the main CRE registry (`MAIN_AWS_ECR`) and the separate Chip Router registry (`SDLC_AWS_ECR`). + +`env start` treats Chip Router as required infrastructure. It resolves the effective router image in this order: +- `CTF_CHIP_ROUTER_IMAGE`, if set +- `chip_router.image` from your env TOML + +If the effective image is missing locally, startup uses the same build-or-pull fallback flow as the Beholder images. The committed env TOMLs intentionally use the local alias, not the full remote ECR image. **Plugin installation during image build:** @@ -231,7 +237,7 @@ Apply this to **all** nodes in the nodeset. Nightly images are built by the [Doc ### Beholder -When environment is started with `--with-beholder` or with `-b` flag after the DON is ready we will boot up `Chip Ingress` and `Red Panda`, create a `cre` topic and download and install workflow-related protobufs from the [chainlink-protos](https://github.com/smartcontractkit/chainlink-protos/tree/main/workflows) repository. +When environment is started with `--with-beholder` or with `-b` flag after the DON is ready we boot up real ChIP Ingress and Red Panda, create a `cre` topic, and download and install workflow-related protobufs from the [chainlink-protos](https://github.com/smartcontractkit/chainlink-protos/tree/main/workflows) repository. Once up and running you will be able to access [CRE topic view](http://localhost:8080/topics/cre) to see workflow-emitted events. These include both standard events emitted by the Workflow Engine and custom events emitted from your workflow. @@ -249,8 +255,8 @@ Beholder requires `chip-ingress` and `chip-config` Docker images with specific v When starting Beholder, the system will: - **In CI (`CI=true`)**: Skip image checks (docker-compose will pull at runtime) -- **Interactive terminal**: Auto-build missing images from sources. If build fails and `AWS_ECR` is set, you'll be offered to pull from ECR instead -- **Non-interactive (tests, scripts)**: Auto-pull from ECR if `AWS_ECR` is set, otherwise fail with instructions +- **Interactive terminal**: Auto-build missing images from sources. If build fails and the required registry env vars are set, you'll be offered to pull from ECR instead +- **Non-interactive (tests, scripts)**: Auto-pull from ECR if the required registry env vars are set, otherwise fail with instructions To manually ensure images are available, run: ```bash @@ -258,37 +264,38 @@ To manually ensure images are available, run: go run . env setup # Or pull from ECR (requires AWS SSO access) -AWS_ECR=.dkr.ecr.us-west-2.amazonaws.com go run . env setup +MAIN_AWS_ECR= SDLC_AWS_ECR= go run . env setup ``` -#### Beholder vs. ChIP Test Sink: Port Conflict and Using Both Together +#### Chip Router Topology -Both the **real Beholder** (Chip Ingress + Red Panda) and the **ChIP Test Sink** (used by CRE system tests for assertions) bind to the same gRPC port by default (50051). Chainlink nodes are configured to send workflow telemetry to `host.docker.internal:50051`, so only one service can receive on that port at a time. +Chip Router is the single owner of ChIP ingress on `50051`. Chainlink nodes send workflow telemetry to the router, and the router fans that traffic out to downstream subscribers. -**Default behavior in tests:** -- Most CRE smoke/regression tests use the **test sink** (`t_helpers.StartChipTestSink`). The sink listens on 50051, receives CloudEvents from nodes, and runs test assertions. No Kafka/Red Panda. -- Beholder-specific tests (e.g. `Test_CRE_V2_Suite` with Cron Beholder scenario, `Test_CRE_V1_Billing_Cron_Beholder`) use **real Beholder** via `t_helpers.StartBeholder`. They start Beholder on 50051, consume from Kafka, and run assertions. The test cleanup stops Beholder so subsequent tests can use the test sink. +That means: +- test sinks no longer bind the node ingress port directly +- real ChIP / Beholder no longer owns `50051` +- both paths are treated as downstream subscribers behind the same ingress owner -**To use both together** (test assertions + Red Panda/Kafka observability): +Current local port layout: +- `50050`: Chip Router admin API +- `50051`: Chip Router ingress gRPC +- `50052`: chip-config +- `50053`: real ChIP / Beholder ingress gRPC -1. **Start Beholder on a different port** (e.g. 50052): - ```bash - go run . env beholder start --grpc-port 50052 - ``` - Or, when starting the full environment: - ```bash - go run . env start --with-beholder --grpc-port 50052 - ``` +In tests: +- sink-backed tests start an ephemeral sink and register it with Chip Router +- Beholder-backed tests start real ChIP on `50053` and register it with Chip Router -2. **Run the test sink on the default port (50051)** so it receives events from nodes. The test sink must listen on 50051 because node config is fixed to that port. +Router component output is persisted in [state/local_cre.toml](state/local_cre.toml) under `chip_router.out`. Subscriber IDs remain separate runtime artifacts because they are lifecycle bookkeeping, not part of the environment topology. -3. **Configure the test sink to forward to Beholder** by setting `UpstreamEndpoint` in the sink config. The `chiptestsink` package supports this, but `t_helpers.StartChipTestSink` does not expose it. To use both: - - Use `chiptestsink.NewServer` directly with `Config{UpstreamEndpoint: "localhost:50052", ...}` instead of `StartChipTestSink`, or - - Extend the test helper to accept an optional upstream endpoint. +To override the router image without changing committed TOMLs, set: -4. **Resulting flow:** Nodes → test sink (50051) → assertions + forward → Beholder (50052) → Kafka/Red Panda. +```bash +export CTF_CHIP_ROUTER_IMAGE=chip-router: +``` -**Summary:** Use either Beholder or the test sink alone for simplicity. Use both only when you need test assertions and Red Panda observability in the same run; then run Beholder on a non-default port and configure the sink to forward to it. +This override wins over `chip_router.image` in the env TOML. +Chip Router pulls use `SDLC_AWS_ECR`; the rest of the managed CRE images use `MAIN_AWS_ECR`. ### Storage @@ -1713,19 +1720,28 @@ ctf obs u This provides access to Grafana, Prometheus, and Loki for monitoring and log aggregation. -### Chip Ingress (Beholder) -Nodes send workflow events to `chip-ingress:50051` for workflow monitoring. Start Chip Ingress either: +### Chip Router and Beholder +Nodes send workflow events to `host.docker.internal:50051`, which is owned by Chip Router. Chip Router fans out those events to registered downstream subscribers such as test sinks and real ChIP / Beholder. + +Start the full environment plus Beholder with: -**Option 1: Start with environment** ```bash go run . env start --with-beholder ``` -**Option 2: Start separately** +Or start Beholder separately after the environment is already up: + ```bash go run . env beholder start ``` +Chip Router ports: +- admin: `50050` +- ingress gRPC: `50051` + +Real ChIP / Beholder downstream port: +- gRPC: `50053` + ### OTel Tracing Configuration To enable OpenTelemetry (OTel) tracing for workflow engines and see traces in Tempo/Grafana, **multiple configuration toggles must be set**: diff --git a/core/scripts/cre/environment/configs/setup.toml b/core/scripts/cre/environment/configs/setup.toml index 313c6e82996..9742ac1af0e 100644 --- a/core/scripts/cre/environment/configs/setup.toml +++ b/core/scripts/cre/environment/configs/setup.toml @@ -12,7 +12,19 @@ local_image = "job-distributor:0.22.1" [job_distributor.pull_config] local_image = "job-distributor:0.22.1" -ecr_image = "{{.ECR}}/job-distributor:0.22.1" +ecr_image = "{{.MAIN_ECR}}/job-distributor:0.22.1" + +[chip_router.build_config] +repository = "https://github.com/smartcontractkit/chainlink-testing-framework" +branch = "main" +commit = "838769782600ad166f1afd2bca0de02ef4c42862" +dockerfile = "framework/components/chiprouter/Dockerfile" +docker_ctx = "framework/components/chiprouter" +local_image = "local-cre-chip-router:v1.0.1" + +[chip_router.pull_config] +local_image = "local-cre-chip-router:v1.0.1" +ecr_image = "{{.SDLC_ECR}}/local-cre-chip-router:v1.0.1" [chip_ingress.build_config] repository = "https://github.com/smartcontractkit/atlas" @@ -25,7 +37,7 @@ pre_run = "pushd chip-ingress && go mod vendor && popd" [chip_ingress.pull_config] local_image = "chip-ingress:da84cb72d3a160e02896247d46ab4b9806ebee2f" -ecr_image = "{{.ECR}}/atlas-chip-ingress:da84cb72d3a160e02896247d46ab4b9806ebee2f" +ecr_image = "{{.MAIN_ECR}}/atlas-chip-ingress:da84cb72d3a160e02896247d46ab4b9806ebee2f" [chip_config.build_config] repository = "https://github.com/smartcontractkit/atlas" @@ -38,7 +50,7 @@ pre_run = "pushd chip-config && go mod vendor && popd" [chip_config.pull_config] local_image = "chip-config:7b4e9ee68fd1c737dd3480b5a3ced0188f29b969" -ecr_image = "{{.ECR}}/atlas-chip-config:7b4e9ee68fd1c737dd3480b5a3ced0188f29b969" +ecr_image = "{{.MAIN_ECR}}/atlas-chip-config:7b4e9ee68fd1c737dd3480b5a3ced0188f29b969" [billing_platform_service.build_config] repository = "https://github.com/smartcontractkit/billing-platform-service" @@ -50,7 +62,7 @@ local_image = "billing-platform-service:local-cre" [billing_platform_service.pull_config] local_image = "billing-platform-service:local-cre" -ecr_image = "{{.ECR}}/billing-platform-service:1.36.1" +ecr_image = "{{.MAIN_ECR}}/billing-platform-service:1.36.1" [observability] repository = "https://github.com/smartcontractkit/chainlink-observability" diff --git a/core/scripts/cre/environment/configs/workflow-don-solana.toml b/core/scripts/cre/environment/configs/workflow-don-solana.toml index 947ef9e5eef..14a6452c83a 100644 --- a/core/scripts/cre/environment/configs/workflow-don-solana.toml +++ b/core/scripts/cre/environment/configs/workflow-don-solana.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-don-tron.toml b/core/scripts/cre/environment/configs/workflow-don-tron.toml index 711e6efe0b7..bceba577cdb 100755 --- a/core/scripts/cre/environment/configs/workflow-don-tron.toml +++ b/core/scripts/cre/environment/configs/workflow-don-tron.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-capabilities-don.toml b/core/scripts/cre/environment/configs/workflow-gateway-capabilities-don.toml index b94d80787b6..8b70049da33 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-capabilities-don.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-capabilities-don.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-don-aptos.toml b/core/scripts/cre/environment/configs/workflow-gateway-don-aptos.toml index 2748039bff0..6746b4e6997 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-don-aptos.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-don-aptos.toml @@ -1,6 +1,9 @@ # Same as workflow-gateway-don.toml but with Aptos chain and a single Aptos capability. # Anvil 1337: registry and gateway. Aptos: local devnet (chain_id 4). Run: env config path , then env start. +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml b/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml index 01ae469e74d..09b6571935b 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml @@ -4,6 +4,9 @@ # # Used by: system-tests/tests/smoke/cre/v2_grpc_source_test.go +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-don.toml b/core/scripts/cre/environment/configs/workflow-gateway-don.toml index 197853faa28..5bbb9f55a1f 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-don.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-don.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-legacy-vault-don.toml b/core/scripts/cre/environment/configs/workflow-gateway-legacy-vault-don.toml index 40c5cb85d16..b352240acd3 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-legacy-vault-don.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-legacy-vault-don.toml @@ -1,5 +1,8 @@ # NOTE: Identical to workflow-gatewway-capabilities.toml but with a vault capability config override # to disable the new pending queue feature. +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-mock-don.toml b/core/scripts/cre/environment/configs/workflow-gateway-mock-don.toml index 0e66be77870..bf3d098e09e 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-mock-don.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-mock-don.toml @@ -1,3 +1,6 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] chain_id = "1337" container_name = "anvil-1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-sharded-5-dons.toml b/core/scripts/cre/environment/configs/workflow-gateway-sharded-5-dons.toml index e9453a70c50..5f4feb374fc 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-sharded-5-dons.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-sharded-5-dons.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/configs/workflow-gateway-sharded-don.toml b/core/scripts/cre/environment/configs/workflow-gateway-sharded-don.toml index c1943d030ba..992212c2b7d 100644 --- a/core/scripts/cre/environment/configs/workflow-gateway-sharded-don.toml +++ b/core/scripts/cre/environment/configs/workflow-gateway-sharded-don.toml @@ -1,4 +1,7 @@ +[chip_router] + image = "local-cre-chip-router:v1.0.1" + [[blockchains]] type = "anvil" chain_id = "1337" diff --git a/core/scripts/cre/environment/environment/beholder.go b/core/scripts/cre/environment/environment/beholder.go index 826498bcf80..badc93b7254 100644 --- a/core/scripts/cre/environment/environment/beholder.go +++ b/core/scripts/cre/environment/environment/beholder.go @@ -23,7 +23,9 @@ import ( "github.com/spf13/cobra" "github.com/smartcontractkit/chainlink-testing-framework/framework" + ctfchiprouter "github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter" chipingressset "github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose/chip_ingress_set" + "github.com/smartcontractkit/chainlink/system-tests/lib/cre/chiprouter" envconfig "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/config" "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/stagegen" libformat "github.com/smartcontractkit/chainlink/system-tests/lib/format" @@ -287,6 +289,10 @@ func startBeholderCmd() *cobra.Command { return fmt.Errorf("failed to set TESTCONTAINERS_RYUK_DISABLED environment variable: %w", setErr) } + if routerErr := hydrateChipRouterForBeholder(cmd.Context()); routerErr != nil { + return errors.Wrap(routerErr, "failed to hydrate chip ingress router. Please make sure that local CRE environment is started and that the chip ingress router is running") + } + startBeholderErr = startBeholder(cmd.Context(), timeout, port) if startBeholderErr != nil { // remove the stack if the error is not related to proto registration @@ -305,7 +311,7 @@ func startBeholderCmd() *cobra.Command { } cmd.Flags().DurationVarP(&timeout, "wait-on-error-timeout", "w", 15*time.Second, "Time to wait before removing Docker containers if environment fails to start (e.g. 10s, 1m, 1h)") - cmd.Flags().IntVarP(&port, "grpc-port", "g", mustStringToInt(chipingressset.DEFAULT_CHIP_INGRESS_GRPC_PORT), "GRPC port for Chip Ingress") + cmd.Flags().IntVarP(&port, "grpc-port", "g", ctfchiprouter.DefaultBeholderGRPCPort, "GRPC port for downstream Chip Ingress") return cmd } @@ -319,6 +325,51 @@ func mustStringToInt(in string) int { return out } +func hydrateChipRouterForBeholder(ctx context.Context) error { + return chiprouter.EnsureStarted(ctx, relativePathToRepoRoot, "") +} + +func loadPersistedBeholderState(relativePathToRepoRoot string) (*envconfig.ChipIngressConfig, error) { + absPath := envconfig.MustChipIngressStateFileAbsPath(relativePathToRepoRoot) + if _, err := os.Stat(absPath); err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, errors.Wrap(err, "failed to stat persisted Beholder state") + } + + cfg := &envconfig.ChipIngressConfig{} + if err := cfg.Load(absPath); err != nil { + return nil, errors.Wrap(err, "failed to load persisted Beholder state") + } + + return cfg, nil +} + +func persistedBeholderGRPCEndpoint(cfg *envconfig.ChipIngressConfig) string { + if cfg == nil || cfg.ChipIngress == nil || cfg.ChipIngress.Output == nil || cfg.ChipIngress.Output.ChipIngress == nil { + return "" + } + + return strings.TrimSpace(cfg.ChipIngress.Output.ChipIngress.GRPCExternalURL) +} + +func restorePersistedBeholderState(relativePathToRepoRoot string, cfg *envconfig.ChipIngressConfig) error { + if cfg == nil { + return nil + } + return cfg.Store(envconfig.MustChipIngressStateFileAbsPath(relativePathToRepoRoot)) +} + +func reconcilePersistedBeholderWithRouter(ctx context.Context, cfg *envconfig.ChipIngressConfig) error { + endpoint := persistedBeholderGRPCEndpoint(cfg) + if endpoint == "" { + return errors.New("persisted Beholder state is missing chip ingress grpc endpoint") + } + + return registerBeholderEndpointWithRouter(ctx, endpoint) +} + var stopBeholderCmd = &cobra.Command{ Use: "stop", Short: "Stop the Beholder", @@ -330,6 +381,17 @@ var stopBeholderCmd = &cobra.Command{ } func stopBeholder() error { + subscriberID, loadSubscriberErr := loadBeholderSubscriberID(relativePathToRepoRoot) + if loadSubscriberErr != nil && !os.IsNotExist(loadSubscriberErr) { + framework.L.Warn().Err(loadSubscriberErr).Msg("failed to load Beholder router subscriber id") + } + if subscriberID != "" { + unregisterErr := chiprouter.UnregisterSubscriber(context.Background(), relativePathToRepoRoot, subscriberID) + if unregisterErr != nil && !os.IsNotExist(unregisterErr) && !strings.Contains(unregisterErr.Error(), "local CRE state file not found") && !strings.Contains(unregisterErr.Error(), "no such file or directory") { + framework.L.Warn().Err(unregisterErr).Msg("failed to unregister Beholder from chip ingress router") + } + } + setErr := os.Setenv("CTF_CONFIGS", DefaultBeholderConfigFile) if setErr != nil { return fmt.Errorf("failed to set CTF_CONFIGS environment variable: %w", setErr) @@ -350,7 +412,13 @@ func removeBeholderStateFiles(relativePathToRepoRoot string) error { return errors.Wrap(absErr, "error getting absolute path for chip ingress state file") } - return os.Remove(absPath) + if err := os.Remove(absPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Remove(beholderSubscriberIDPath(relativePathToRepoRoot)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } func isPortAvailable(addr string) bool { @@ -368,12 +436,20 @@ var protoRegistrationErrMsg = "proto registration failed" // MissingImage represents an image that needs to be built or pulled type MissingImage struct { Name string - Tag string FullImage string BuildConfig BuildConfig PullConfig PullConfig } +func newMissingImage(name string, cfg ImageConfig) MissingImage { + return MissingImage{ + Name: name, + FullImage: cfg.BuildConfig.LocalImage, + BuildConfig: cfg.BuildConfig, + PullConfig: cfg.PullConfig, + } +} + // ensureChipImagesExist checks if required chip images exist and auto-builds them if missing. // In CI environments (CI=true), this check is skipped as images will be pulled at runtime. func ensureChipImagesExist(ctx context.Context, cfg *SetupConfigFile) error { @@ -383,42 +459,35 @@ func ensureChipImagesExist(ctx context.Context, cfg *SetupConfigFile) error { return nil } + var requiredImages []MissingImage + if cfg.ChipIngress != nil { + requiredImages = append(requiredImages, newMissingImage("chip-ingress", ImageConfig{ + BuildConfig: cfg.ChipIngress.BuildConfig, + PullConfig: cfg.ChipIngress.PullConfig, + })) + } + if cfg.ChipConfig != nil { + requiredImages = append(requiredImages, newMissingImage("chip-config", ImageConfig{ + BuildConfig: cfg.ChipConfig.BuildConfig, + PullConfig: cfg.ChipConfig.PullConfig, + })) + } + + return ensureManagedImagesExist(ctx, cfg.General.AWSProfile, requiredImages) +} + +func ensureManagedImagesExist(ctx context.Context, awsProfile string, requiredImages []MissingImage) error { dockerClient, err := client.NewClientWithOpts(client.WithAPIVersionNegotiation()) if err != nil { return errors.Wrap(err, "failed to create Docker client") } defer dockerClient.Close() - // Check if Docker is running _, err = dockerClient.Ping(ctx) if err != nil { return errors.Wrap(err, "Docker is not running") } - // Collect required images - var requiredImages []MissingImage - - if cfg.ChipIngress != nil { - requiredImages = append(requiredImages, MissingImage{ - Name: "chip-ingress", - Tag: cfg.ChipIngress.BuildConfig.Commit, - FullImage: cfg.ChipIngress.BuildConfig.LocalImage, - BuildConfig: cfg.ChipIngress.BuildConfig, - PullConfig: cfg.ChipIngress.PullConfig, - }) - } - - if cfg.ChipConfig != nil { - requiredImages = append(requiredImages, MissingImage{ - Name: "chip-config", - Tag: cfg.ChipConfig.BuildConfig.Commit, - FullImage: cfg.ChipConfig.BuildConfig.LocalImage, - BuildConfig: cfg.ChipConfig.BuildConfig, - PullConfig: cfg.ChipConfig.PullConfig, - }) - } - - // Find missing images var missing []MissingImage for _, img := range requiredImages { _, err := dockerClient.ImageInspect(ctx, img.FullImage) @@ -434,23 +503,21 @@ func ensureChipImagesExist(ctx context.Context, cfg *SetupConfigFile) error { return nil } - ecrURL := os.Getenv("AWS_ECR") + missingRegistryVars := missingRegistryEnvVars(missing) interactive := isInteractiveTerminal() // Non-interactive mode handling if !interactive { - if ecrURL != "" { - // Non-interactive with AWS_ECR - pull images - framework.L.Info().Msgf("Non-interactive mode with AWS_ECR set. Pulling %d missing image(s) from ECR...", len(missing)) - return pullAllImages(ctx, cfg, missing) + if len(missingRegistryVars) == 0 { + framework.L.Info().Msgf("Non-interactive mode with required ECR env vars set. Pulling %d missing image(s) from ECR...", len(missing)) + return pullAllImages(ctx, awsProfile, missing) } - // Non-interactive without AWS_ECR - fail with instructions - framework.L.Error().Msgf("Missing %d required image(s) and AWS_ECR is not set:", len(missing)) + framework.L.Error().Msgf("Missing %d required image(s) and required ECR env vars are not set:", len(missing)) for _, img := range missing { framework.L.Error().Msgf(" - %s", img.FullImage) } - printChipImagePullInstructions() - return errors.Errorf("missing %d required image(s). Set AWS_ECR to enable auto-pull or run 'go run . env setup' manually", len(missing)) + printChipImagePullInstructions(missingRegistryVars) + return errors.Errorf("missing %d required image(s). Set %s to enable auto-pull or run 'go run . env setup' manually", len(missing), strings.Join(missingRegistryVars, ", ")) } // Interactive mode - try building first @@ -478,14 +545,14 @@ func ensureChipImagesExist(ctx context.Context, cfg *SetupConfigFile) error { } // Some builds failed - offer to pull all failed images - return handleChipImageBuildFailures(ctx, cfg, failedBuilds, buildErrors) + return handleChipImageBuildFailures(ctx, awsProfile, failedBuilds, buildErrors) } // pullAllImages pulls all specified images from ECR -func pullAllImages(ctx context.Context, cfg *SetupConfigFile, images []MissingImage) error { +func pullAllImages(ctx context.Context, awsProfile string, images []MissingImage) error { for _, img := range images { framework.L.Info().Msgf("Pulling %s from ECR...", img.Name) - _, pullErr := img.PullConfig.Pull(ctx, cfg.General.AWSProfile) + _, pullErr := img.PullConfig.Pull(ctx, awsProfile) if pullErr != nil { return errors.Wrapf(pullErr, "failed to pull %s", img.Name) } @@ -505,7 +572,7 @@ func isInteractiveTerminal() bool { } // handleChipImageBuildFailures handles build failures by offering to pull all failed images -func handleChipImageBuildFailures(ctx context.Context, cfg *SetupConfigFile, failedImages []MissingImage, buildErrors []error) error { +func handleChipImageBuildFailures(ctx context.Context, awsProfile string, failedImages []MissingImage, buildErrors []error) error { // List all failed images fmt.Println() framework.L.Error().Msgf("Failed to build %d image(s):", len(failedImages)) @@ -513,14 +580,14 @@ func handleChipImageBuildFailures(ctx context.Context, cfg *SetupConfigFile, fai framework.L.Error().Msgf(" - %s: %v", img.FullImage, buildErrors[i]) } - ecrURL := os.Getenv("AWS_ECR") - if ecrURL != "" { + missingRegistryVars := missingRegistryEnvVars(failedImages) + if len(missingRegistryVars) == 0 { shouldPull := false if isInteractiveTerminal() { // Interactive mode - ask user fmt.Println() - fmt.Printf("AWS_ECR is set. Would you like to pull all %d failed image(s) from ECR instead? [Y/n] ", len(failedImages)) + fmt.Printf("Required ECR env vars are set. Would you like to pull all %d failed image(s) from ECR instead? [Y/n] ", len(failedImages)) reader := bufio.NewReader(os.Stdin) input, _ := reader.ReadString('\n') @@ -537,7 +604,7 @@ func handleChipImageBuildFailures(ctx context.Context, cfg *SetupConfigFile, fai // Pull all failed images for _, img := range failedImages { framework.L.Info().Msgf("Pulling %s from ECR...", img.Name) - _, pullErr := img.PullConfig.Pull(ctx, cfg.General.AWSProfile) + _, pullErr := img.PullConfig.Pull(ctx, awsProfile) if pullErr != nil { return errors.Wrapf(pullErr, "failed to pull %s", img.Name) } @@ -548,19 +615,41 @@ func handleChipImageBuildFailures(ctx context.Context, cfg *SetupConfigFile, fai } // Show manual instructions - printChipImagePullInstructions() + printChipImagePullInstructions(missingRegistryVars) return errors.Errorf("failed to build %d image(s)", len(failedImages)) } -// printChipImagePullInstructions prints helpful instructions for pulling images manually -func printChipImagePullInstructions() { +func missingRegistryEnvVars(images []MissingImage) []string { + seen := make(map[string]struct{}) + var missing []string + for _, img := range images { + for _, envVar := range img.PullConfig.MissingRegistryEnvVars() { + if _, ok := seen[envVar]; ok { + continue + } + seen[envVar] = struct{}{} + missing = append(missing, envVar) + } + } + return missing +} + +// printChipImagePullInstructions prints helpful instructions for pulling images manually. +func printChipImagePullInstructions(requiredEnvVars []string) { fmt.Println() fmt.Println("────────────────────────────────────────────────────────────────") fmt.Println("To pull pre-built images instead, run:") fmt.Println() - fmt.Println(" AWS_ECR=.dkr.ecr.us-west-2.amazonaws.com go run . env setup") + if len(requiredEnvVars) == 0 { + requiredEnvVars = []string{mainECREnvVarName, sdlcECREnvVarName} + } + assignments := make([]string, 0, len(requiredEnvVars)) + for _, envVar := range requiredEnvVars { + assignments = append(assignments, envVar+"=") + } + fmt.Printf(" %s go run . env setup\n", strings.Join(assignments, " ")) fmt.Println() - fmt.Println("Replace with prod AWS account number.") + fmt.Printf("Set the required registry env vars: %s.\n", strings.Join(requiredEnvVars, ", ")) fmt.Println("See: https://smartcontract-it.atlassian.net/wiki/spaces/INFRA/pages/1045495923") fmt.Println("────────────────────────────────────────────────────────────────") fmt.Println() @@ -603,9 +692,8 @@ func startBeholder(cmdContext context.Context, cleanupWait time.Duration, port i fmt.Print(libformat.PurpleText("%s", stageGen.Wrap("Starting Chip Ingress stack"))) if !isPortAvailable(":" + strconv.Itoa(port)) { - return fmt.Errorf(`port %d is already in use. Most probably an instance of ChIP Test Sink is already running. -If you want to use both together start ChIP Ingress on a different port with '--grpc-port' flag -and make sure that the sink is pointing to correct upstream endpoint ('localhost:' in most cases)`, port) + return fmt.Errorf(`port %d is already in use. Either an instance of CHiP Router or ChIP Test Sink is already running. +If you want to use both together start ChIP Ingress on a different port with '--grpc-port' flag`, port) } // Load setup config to check for required images @@ -696,6 +784,10 @@ and make sure that the sink is pointing to correct upstream endpoint ('localhost fmt.Println() framework.L.Info().Msgf("Red Panda Console URL: %s", out.RedPanda.ConsoleExternalURL) + if err := registerBeholderWithRouter(cmdContext, port); err != nil { + return errors.Wrap(err, "failed to register Beholder with chip ingress router") + } + topicsErr := chipingressset.CreateTopics(cmdContext, out.RedPanda.KafkaExternalURL, in.Kafka.Topics) if topicsErr != nil { return errors.Wrap(topicsErr, "failed to create topics") @@ -714,6 +806,45 @@ and make sure that the sink is pointing to correct upstream endpoint ('localhost return in.Store(envconfig.MustChipIngressStateFileAbsPath(relativePathToRepoRoot)) } +func registerBeholderWithRouter(ctx context.Context, port int) error { + return registerBeholderEndpointWithRouter(ctx, fmt.Sprintf("127.0.0.1:%d", port)) +} + +func registerBeholderEndpointWithRouter(ctx context.Context, endpoint string) error { + previousID, err := loadBeholderSubscriberID(relativePathToRepoRoot) + if err == nil && previousID != "" { + _ = chiprouter.UnregisterSubscriber(ctx, relativePathToRepoRoot, previousID) + } + + id, err := chiprouter.RegisterSubscriber(ctx, relativePathToRepoRoot, "beholder", endpoint) + if err != nil { + return err + } + + // Persist the fixed alias so stopBeholder can remove it without reading transient test output. + if id == "" { + return errors.New("empty subscriber id returned when registering Beholder") + } + + statePath := beholderSubscriberIDPath(relativePathToRepoRoot) + if writeErr := os.WriteFile(statePath, []byte(id), 0o600); writeErr != nil { + return errors.Wrap(writeErr, "failed to persist Beholder router subscriber id") + } + return nil +} + +func loadBeholderSubscriberID(relativePathToRepoRoot string) (string, error) { + raw, err := os.ReadFile(beholderSubscriberIDPath(relativePathToRepoRoot)) + if err != nil { + return "", err + } + return strings.TrimSpace(string(raw)), nil +} + +func beholderSubscriberIDPath(relativePathToRepoRoot string) string { + return filepath.Join(relativePathToRepoRoot, envconfig.StateDirname, "chip_ingress_router_beholder_subscriber") +} + func parseConfigsAndRegisterProtos(ctx context.Context, schemaSets []chipingressset.SchemaSet, chipIngressOutput *chipingressset.ChipIngressOutput) error { if len(schemaSets) == 0 { framework.L.Warn().Msg("no proto configs provided, skipping proto registration") diff --git a/core/scripts/cre/environment/environment/environment.go b/core/scripts/cre/environment/environment/environment.go index e39de360694..ab10127c816 100644 --- a/core/scripts/cre/environment/environment/environment.go +++ b/core/scripts/cre/environment/environment/environment.go @@ -57,6 +57,7 @@ const ( manualCtfCleanupMsg = `unexpected startup error. this may have stranded resources. please manually remove containers with 'ctf' label and delete their volumes` manualBeholderCleanupMsg = `unexpected startup error. this may have stranded resources. please manually remove the 'chip-ingress' stack` manualBillingCleanupMsg = `unexpected startup error. this may have stranded resources. please manually remove the 'billing-platform-service' stack` + CTFChipRouterImageEnvVar = "CTF_CHIP_ROUTER_IMAGE" ) var ( @@ -256,6 +257,11 @@ func startCmd() *cobra.Command { return fmt.Errorf("with-plugins-docker-image flag is no longer supported. Set Docker image in TOML config instead (%s) for each nodeset under the [nodesets.nodesets.node_specs.node.image] field", effectiveConfig) } + persistedBeholderState, persistedBeholderStateErr := loadPersistedBeholderState(relativePathToRepoRoot) + if persistedBeholderStateErr != nil { + framework.L.Warn().Err(persistedBeholderStateErr).Msg("failed to load persisted Beholder state before startup cleanup") + } + cleanUpErr := envconfig.RemoveAllEnvironmentStateDir(relativePathToRepoRoot) if cleanUpErr != nil { return errors.Wrap(cleanUpErr, "failed to clean up environment state files") @@ -273,6 +279,7 @@ func startCmd() *cobra.Command { if err := in.Load(os.Getenv("CTF_CONFIGS")); err != nil { return errors.Wrap(err, "failed to load environment configuration") } + applyChipRouterImageOverride(in) // Skip Docker operations for Kubernetes provider (Docker not needed) isDocker := in.Infra != nil && !in.Infra.IsKubernetes() @@ -284,6 +291,10 @@ func startCmd() *cobra.Command { return err } + if err := ensureChipRouterImageExists(cmdContext, in, setupConfig.ConfigPath); err != nil { + return err + } + // This will not work with remote images that require authentication, but it will catch early most of the issues with missing env setup if err := ensureDockerImagesExist(cmdContext, framework.L, in); err != nil { return err @@ -368,6 +379,19 @@ func startCmd() *cobra.Command { return errors.Wrap(startErr, "failed to start environment") } + storeErr := in.Store(envconfig.MustLocalCREStateFileAbsPath(relativePathToRepoRoot)) + if storeErr != nil { + return errors.Wrap(storeErr, "failed to store local CRE state") + } + + if !withBeholder && persistedBeholderState != nil { + if err := reconcilePersistedBeholderWithRouter(cmdContext, persistedBeholderState); err != nil { + framework.L.Warn().Err(err).Msg("failed to re-register persisted Beholder with chip ingress router") + } else if err := restorePersistedBeholderState(relativePathToRepoRoot, persistedBeholderState); err != nil { + framework.L.Warn().Err(err).Msg("failed to restore persisted Beholder state after router re-registration") + } + } + registryChainOut := output.CreEnvironment.Blockchains[0] sErr := StartCmdGenerateSettingsFile(registryChainOut, output) @@ -494,7 +518,7 @@ func startCmd() *cobra.Command { if stErr != nil { return errors.Wrap(stErr, "failed to set addresses on Config") } - storeErr := in.Store(envconfig.MustLocalCREStateFileAbsPath(relativePathToRepoRoot)) + storeErr = in.Store(envconfig.MustLocalCREStateFileAbsPath(relativePathToRepoRoot)) if storeErr != nil { return errors.Wrap(storeErr, "failed to store local CRE state") } @@ -698,6 +722,7 @@ func StartCLIEnvironment( universalSetupInput := &creenv.SetupInput{ NodeSets: in.NodeSets, BlockchainsInput: in.Blockchains, + ChipRouterInput: in.ChipRouter, ContractVersions: env.ContractVersions(), WithV2Registries: env.WithV2Registries(), JdInput: in.JD, @@ -759,7 +784,7 @@ func PrintCRELogo() { func setDefaultCtfConfigs() error { if os.Getenv("CTF_CONFIGS") == "" { - if err := os.Setenv("CTF_CONFIGS", "configs/workflow-gateway-don.toml"); err != nil { + if err := os.Setenv("CTF_CONFIGS", "configs/workflow-gateway-capabilities-don.toml"); err != nil { return fmt.Errorf("failed to set CTF_CONFIGS environment variable: %w", err) } @@ -775,6 +800,61 @@ func setDefaultCtfConfigs() error { return nil } +func applyChipRouterImageOverride(in *envconfig.Config) { + if in == nil || in.ChipRouter == nil { + return + } + + override := strings.TrimSpace(os.Getenv(CTFChipRouterImageEnvVar)) + if override == "" { + return + } + + in.ChipRouter.Image = override + framework.L.Info().Msgf("Using Chip Router image override from %s: %s", CTFChipRouterImageEnvVar, override) +} + +func effectiveChipRouterImage(configImage string) (string, error) { + if override := strings.TrimSpace(os.Getenv(CTFChipRouterImageEnvVar)); override != "" { + return override, nil + } + if strings.TrimSpace(configImage) != "" { + return configImage, nil + } + return "", errors.New("no chip router image found") +} + +func ensureChipRouterImageExists(ctx context.Context, in *envconfig.Config, setupConfigPath string) error { + if in == nil || in.ChipRouter == nil || (in.Infra != nil && in.Infra.IsKubernetes()) { + return nil + } + + effectiveImage, err := effectiveChipRouterImage(in.ChipRouter.Image) + if err != nil { + return errors.Wrap(err, "failed to get effective chip router image") + } + in.ChipRouter.Image = effectiveImage + + setupCfg, err := ReadSetupConfig(setupConfigPath) + if err != nil { + return errors.Wrap(err, "failed to read setup config for chip router image validation") + } + if setupCfg.ChipRouter == nil { + return errors.New("chip_router configuration is missing from setup config") + } + + routerImage := newMissingImage("chip-router", ImageConfig{ + BuildConfig: setupCfg.ChipRouter.BuildConfig, + PullConfig: setupCfg.ChipRouter.PullConfig, + }.WithLocalImage(effectiveImage)) + + if err := ensureManagedImagesExist(ctx, setupCfg.General.AWSProfile, []MissingImage{routerImage}); err != nil { + return errors.Wrapf(err, "Chip Router image '%s' is not available", effectiveImage) + } + + return nil +} + func hasBuiltDockerImage(in *envconfig.Config) bool { for _, nodeset := range in.NodeSets { for _, nodeSpec := range nodeset.NodeSpecs { @@ -1040,6 +1120,9 @@ func allEnvironmentStateFiles() ([]string, error) { func initLocalCREStageGen(in *envconfig.Config) *stagegen.StageGen { stages := 9 + if in.ChipRouter != nil { + stages++ + } if in.S3ProviderInput != nil { stages++ } diff --git a/core/scripts/cre/environment/environment/setup.go b/core/scripts/cre/environment/environment/setup.go index 4e15f3d5d04..cb1cfb74e62 100644 --- a/core/scripts/cre/environment/environment/setup.go +++ b/core/scripts/cre/environment/environment/setup.go @@ -60,6 +60,7 @@ func init() { type SetupConfigFile struct { General GeneralConfig `toml:"general"` JobDistributor JobDistributorConfig `toml:"job_distributor"` + ChipRouter *ChipRouterConfig `toml:"chip_router"` ChipIngress *ChipIngressConfig `toml:"chip_ingress"` ChipConfig *ChipConfigConfig `toml:"chip_config"` BillingService *BillingServiceConfig `toml:"billing_platform_service"` @@ -78,6 +79,12 @@ type JobDistributorConfig struct { PullConfig PullConfig `toml:"pull_config"` } +// ChipRouterConfig contains chip router image configuration +type ChipRouterConfig struct { + BuildConfig BuildConfig `toml:"build_config"` + PullConfig PullConfig `toml:"pull_config"` +} + // ChipIngressConfig contains chip ingress image configuration type ChipIngressConfig struct { BuildConfig BuildConfig `toml:"build_config"` @@ -103,11 +110,20 @@ type ObservabilityConfig struct { TargetPath string `toml:"target_path"` } -var ( - ECR = os.Getenv("AWS_ECR") // TODO this can be moved to an env file +const DefaultSetupConfigPath = "configs/setup.toml" + +const ( + mainECREnvVarName = "MAIN_AWS_ECR" + sdlcECREnvVarName = "SDLC_AWS_ECR" ) -const DefaultSetupConfigPath = "configs/setup.toml" +func mainECR() string { + return os.Getenv(mainECREnvVarName) +} + +func sdlcECR() string { + return os.Getenv(sdlcECREnvVarName) +} type EnsureOption = string @@ -225,6 +241,10 @@ func (c BuildConfig) Build(ctx context.Context) (localImage string, err error) { tag = c.Branch commit = c.Commit ) + if strings.TrimSpace(c.LocalRepo) != "" { + repo = c.LocalRepo + } + logger := framework.L name := strings.ReplaceAll(strings.Split(c.LocalImage, ":")[0], "-", " ") name = cases.Title(language.English).String(name) @@ -291,9 +311,20 @@ type PullConfig struct { EcrImage string `toml:"ecr_image"` } +func (c PullConfig) MissingRegistryEnvVars() []string { + var missing []string + if strings.Contains(c.EcrImage, "{{.MAIN_ECR}}") && mainECR() == "" { + missing = append(missing, mainECREnvVarName) + } + if strings.Contains(c.EcrImage, "{{.SDLC_ECR}}") && sdlcECR() == "" { + missing = append(missing, sdlcECREnvVarName) + } + return missing +} + func (c PullConfig) Pull(ctx context.Context, awsProfile string) (localImage string, err error) { - if ECR == "" { - return "", errors.New("AWS_ECR environment variable is not set. See README for more details and references to find the correct ECR URL or visit https://smartcontract-it.atlassian.net/wiki/spaces/INFRA/pages/1045495923/Configure+the+AWS+CLI") + if missing := c.MissingRegistryEnvVars(); len(missing) > 0 { + return "", fmt.Errorf("%s environment variable(s) must be set. See README for setup details and https://smartcontract-it.atlassian.net/wiki/spaces/INFRA/pages/1045495923/Configure+the+AWS+CLI", strings.Join(missing, ", ")) } tmpl, tmplErr := template.New("ecr-image").Parse(c.EcrImage) @@ -302,7 +333,8 @@ func (c PullConfig) Pull(ctx context.Context, awsProfile string) (localImage str } templateData := map[string]string{ - "ECR": ECR, + "MAIN_ECR": mainECR(), + "SDLC_ECR": sdlcECR(), } var configBuffer bytes.Buffer @@ -319,6 +351,13 @@ type ImageConfig struct { PullConfig PullConfig } +func (c ImageConfig) WithLocalImage(localImage string) ImageConfig { + out := c + out.BuildConfig.LocalImage = localImage + out.PullConfig.LocalImage = localImage + return out +} + func (c ImageConfig) Ensure(ctx context.Context, dockerClient *client.Client, awsProfile string, noPrompt bool, defaultOption EnsureOption, purge bool) (localImage string, err error) { // If purge flag is set, remove existing images first if purge { @@ -333,7 +372,7 @@ func (c ImageConfig) Ensure(ctx context.Context, dockerClient *client.Client, aw logger.Warn().Msgf("Failed to remove local image %s: %v", c.BuildConfig.LocalImage, err) } - // Remove ECR image if it exists + // Remove remote-tagged image if it exists _, err = dockerClient.ImageRemove(ctx, c.PullConfig.EcrImage, image.RemoveOptions{Force: true}) if err != nil { logger.Warn().Msgf("Failed to remove ECR image %s: %v", c.PullConfig.EcrImage, err) @@ -487,6 +526,23 @@ func RunSetup(ctx context.Context, config SetupConfig, noPrompt, purge, withBill return } + var chipRouterLocalImage string + if cfg.ChipRouter != nil { + chipRouterConfig := ImageConfig{ + BuildConfig: cfg.ChipRouter.BuildConfig, + PullConfig: cfg.ChipRouter.PullConfig, + } + + var err error + chipRouterLocalImage, err = chipRouterConfig.Ensure(ctx, dockerClient, cfg.General.AWSProfile, noPrompt, PullOption, purge) + if err != nil { + setupErr = errors.Wrap(err, "failed to ensure Chip Router image") + return + } + } else { + logger.Warn().Str("config file", config.ConfigPath).Msg("Skipping Chip Router setup, because configuration is not provided in the config file") + } + var chipIngressLocalImage string if cfg.ChipIngress != nil { chipConfig := ImageConfig{ @@ -560,6 +616,9 @@ func RunSetup(ctx context.Context, config SetupConfig, noPrompt, purge, withBill logger.Info().Msg("✅ Setup Summary:") logger.Info().Msg(" ✓ Docker is installed and configured correctly") logger.Info().Msgf(" ✓ Job Distributor image %s is available", jdLocalImage) + if chipRouterLocalImage != "" { + logger.Info().Msgf(" ✓ Chip Router image %s is available", chipRouterLocalImage) + } if chipIngressLocalImage != "" { logger.Info().Msgf(" ✓ Atlas Chip Ingress image %s is available", chipIngressLocalImage) } @@ -744,8 +803,8 @@ func checkDockerConfiguration() error { return nil } -// localImageExists checks if the local image or ECR image exists -// if ECR image exists, it tags it as the local image +// localImageExists checks if the local image or rendered remote image exists +// if the rendered remote image exists, it tags it as the local image func localImageExists(ctx context.Context, dockerClient *client.Client, localImage, ecrImage string) (bool, error) { logger := framework.L name := strings.ReplaceAll(strings.Split(localImage, ":")[0], "-", " ") @@ -757,7 +816,7 @@ func localImageExists(ctx context.Context, dockerClient *client.Client, localIma return true, nil } - // Check if ECR image exists + // Check if rendered remote image exists _, err = dockerClient.ImageInspect(ctx, ecrImage) if err == nil { logger.Info().Msgf("✓ %s image (%s) is available", name, ecrImage) @@ -771,7 +830,7 @@ func localImageExists(ctx context.Context, dockerClient *client.Client, localIma return false, nil } -// pullImage pulls the Job Distributor image from ECR +// pullImage pulls the configured image from its remote registry and retags it locally. func pullImage(ctx context.Context, awsProfile string, localImage, ecrImage string) (string, error) { logger := framework.L name := strings.ReplaceAll(strings.Split(localImage, ":")[0], "-", " ") diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 8f3c0914764..ada77d649f3 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -53,7 +53,8 @@ require ( github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20260119171452-39c98c3b33cd github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20260326111235-8c09d1a4491f github.com/smartcontractkit/chainlink-protos/job-distributor v0.18.0 - github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.12 + github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e + github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 github.com/smartcontractkit/chainlink-testing-framework/lib v1.54.5 github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.5 @@ -584,7 +585,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 // indirect @@ -592,11 +593,11 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect go.opentelemetry.io/otel/log v0.15.0 // indirect go.opentelemetry.io/otel/metric v1.42.0 // indirect - go.opentelemetry.io/otel/sdk v1.41.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.42.0 // indirect go.opentelemetry.io/otel/trace v1.42.0 // indirect - go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 35cacedb2ad..f432c76e461 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1702,8 +1702,10 @@ github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965 h1: github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965/go.mod h1:U3XStbEnbx/+L22n1/8aOIdgcGVxtsZB7p59xJGngAs= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0 h1:5NdsaclAfx+p8lZUZ3WIqMW3M9Cze1ZVPENOQhha1pk= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0/go.mod h1:IfeW6t5Yc5293H5ixuooAft+wYBMSFQWKjbBTwYiKr4= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.12 h1:1SkcN0ABoqhiuPua5jfLPjMu2dcVN+RvsUB6/BBZtN0= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.12/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e h1:QZpxC6blue/Hh+Ii1hpNToOcay4QZXrN0lJ6kOgxQD4= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b h1:Xr64gasse6l3SbYTS4RaGihonKrUPYnQcRvrk2w4KlI= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b/go.mod h1:hOtu1UY5WGENFqV7HBxYgq+/z5lUEZ705GA0Tuif7Ec= github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 h1:8D2DUnn7mLUZOLhPDGGFKKvBrgU6LQd00tq2VOprvfI= github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20/go.mod h1:98jNYBOPuKWJw9a8x0LgQuudp5enrHhQQP5Hq0YwRB8= github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0 h1:PWAMYu0WaAMBfbpxCpFJGRIDHmcgmYin6a+UQC0OdtY= @@ -1992,8 +1994,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 h1:VO3 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0/go.mod h1:qRDnJ2nv3CQXMK2HUd9K9VtvedsPAce3S+/4LZHjX/s= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 h1:MMrOAN8H1FrvDyq9UJ4lu5/+ss49Qgfgb7Zpm0m8ABo= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0/go.mod h1:Na+2NNASJtF+uT4NxDe0G+NQb+bUgdPDfwxY/6JmS/c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go= @@ -2010,21 +2012,21 @@ go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzu go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= -go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= go.opentelemetry.io/otel/sdk/log v0.15.0 h1:WgMEHOUt5gjJE93yqfqJOkRflApNif84kxoHWS9VVHE= go.opentelemetry.io/otel/sdk/log v0.15.0/go.mod h1:qDC/FlKQCXfH5hokGsNg9aUBGMJQsrUyeOiW5u+dKBQ= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 h1:9yio6AFZ3QD9j9oqshV1Ibm9gPLlHNxurno5BreMtIA= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0/go.mod h1:QOGiAJHl+fob8Nu85ifXfuQYmJTFAvcrxL6w5/tu168= -go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= -go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= -go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= diff --git a/go.md b/go.md index 3bedafbab43..5d3c09e7b55 100644 --- a/go.md +++ b/go.md @@ -402,6 +402,9 @@ flowchart LR click chainlink-sui/deployment href "https://github.com/smartcontractkit/chainlink-sui" chainlink-testing-framework/framework --> chainlink-testing-framework/wasp click chainlink-testing-framework/framework href "https://github.com/smartcontractkit/chainlink-testing-framework" + chainlink-testing-framework/framework/components/chiprouter --> chainlink-common/pkg/chipingress + chainlink-testing-framework/framework/components/chiprouter --> chainlink-testing-framework/framework + click chainlink-testing-framework/framework/components/chiprouter href "https://github.com/smartcontractkit/chainlink-testing-framework" chainlink-testing-framework/framework/components/dockercompose --> chainlink-common/pkg/chipingress chainlink-testing-framework/framework/components/dockercompose --> chainlink-testing-framework/framework chainlink-testing-framework/framework/components/dockercompose --> freeport @@ -463,6 +466,7 @@ flowchart LR chainlink/load-tests --> chainlink-testing-framework/havoc chainlink/load-tests --> chainlink/integration-tests click chainlink/load-tests href "https://github.com/smartcontractkit/chainlink" + chainlink/system-tests/lib --> chainlink-testing-framework/framework/components/chiprouter chainlink/system-tests/lib --> chainlink-testing-framework/framework/components/dockercompose chainlink/system-tests/lib --> chainlink-testing-framework/framework/components/fake chainlink/system-tests/lib --> chainlink/deployment @@ -680,6 +684,7 @@ flowchart LR subgraph chainlink-testing-framework-repo[chainlink-testing-framework] chainlink-testing-framework/framework + chainlink-testing-framework/framework/components/chiprouter chainlink-testing-framework/framework/components/dockercompose chainlink-testing-framework/framework/components/fake chainlink-testing-framework/havoc diff --git a/system-tests/lib/cre/chiprouter/router.go b/system-tests/lib/cre/chiprouter/router.go new file mode 100644 index 00000000000..bbe483d5444 --- /dev/null +++ b/system-tests/lib/cre/chiprouter/router.go @@ -0,0 +1,223 @@ +package chiprouter + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "net/netip" + "os" + "strings" + "sync" + "time" + + pkgerrors "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + envconfig "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/config" +) + +const ( + adminRequestTimeout = 5 * time.Second +) + +type registerSubscriberRequest struct { + Name string `json:"name"` + Endpoint string `json:"endpoint"` +} + +type registerSubscriberResponse struct { + ID string `json:"id"` +} + +type healthResponse struct { + AdminURL string `json:"admin_url"` + GRPCURL string `json:"grpc_url"` +} + +type client struct { + state *envconfig.ChipIngressRouterState + httpClient *http.Client +} + +var ( + clientOnce sync.Once + clientInst *client + errClient error +) + +func getClient(relativePathToRepoRoot string) (*client, error) { + clientOnce.Do(func() { + st, err := envconfig.LoadChipIngressRouterStateFromLocalCRE(relativePathToRepoRoot) + if err != nil { + errClient = err + return + } + clientInst = &client{ + state: st, + httpClient: &http.Client{Timeout: adminRequestTimeout}, + } + }) + + return clientInst, errClient +} + +func EnsureStarted(ctx context.Context, relativePathToRepoRoot, _ string) error { + c, err := getClient(relativePathToRepoRoot) + if err != nil { + if os.IsNotExist(err) { + return pkgerrors.New("local CRE state file not found; start the environment first") + } + return err + } + + if !isHTTPReady(ctx, c.state.AdminURL) { + return fmt.Errorf("chip ingress router admin endpoint is not reachable: %s", c.state.AdminURL) + } + if !isTCPReady(c.state.GRPCURL) { + return fmt.Errorf("chip ingress router grpc endpoint is not reachable: %s", c.state.GRPCURL) + } + return nil +} + +func RegisterSubscriber(ctx context.Context, relativePathToRepoRoot, name, endpoint string) (string, error) { + c, err := getClient(relativePathToRepoRoot) + if err != nil { + return "", err + } + + normalizedEndpoint := normalizeEndpointForRouter(endpoint, c.state) + + body, err := json.Marshal(registerSubscriberRequest{Name: name, Endpoint: normalizedEndpoint}) + if err != nil { + return "", pkgerrors.Wrap(err, "marshal chip router register request") + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, strings.TrimRight(c.state.AdminURL, "/")+"/subscribers", bytes.NewReader(body)) + if err != nil { + return "", pkgerrors.Wrap(err, "create chip router register request") + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", pkgerrors.Wrap(err, "perform chip router register request") + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("chip router register request failed with status %s", resp.Status) + } + + var out registerSubscriberResponse + if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { + return "", pkgerrors.Wrap(err, "decode chip router register response") + } + if out.ID == "" { + return "", pkgerrors.New("chip router register response missing subscriber id") + } + + return out.ID, nil +} + +func UnregisterSubscriber(ctx context.Context, relativePathToRepoRoot, id string) error { + if strings.TrimSpace(id) == "" { + return nil + } + + c, err := getClient(relativePathToRepoRoot) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, strings.TrimRight(c.state.AdminURL, "/")+"/subscribers/"+id, nil) + if err != nil { + return pkgerrors.Wrap(err, "create chip router unregister request") + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return pkgerrors.Wrap(err, "perform chip router unregister request") + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("chip router unregister request failed with status %s", resp.Status) + } + return nil +} + +func normalizeEndpointForRouter(endpoint string, st *envconfig.ChipIngressRouterState) string { + if st == nil || strings.TrimSpace(st.ContainerName) == "" { + return endpoint + } + + host, port, err := net.SplitHostPort(strings.TrimSpace(endpoint)) + if err != nil { + return endpoint + } + + if !requiresHostGateway(host) { + return endpoint + } + + dockerHost := strings.TrimPrefix(framework.HostDockerInternal(), "http://") + return net.JoinHostPort(dockerHost, port) +} + +func requiresHostGateway(host string) bool { + switch strings.TrimSpace(host) { + case "", "localhost": + return true + } + + addr, err := netip.ParseAddr(host) + if err != nil { + return false + } + + return addr.IsLoopback() || addr.IsUnspecified() +} + +func isHTTPReady(ctx context.Context, adminURL string) bool { + _, err := fetchHealth(ctx, adminURL) + return err == nil +} + +func fetchHealth(ctx context.Context, adminURL string) (*healthResponse, error) { + if strings.TrimSpace(adminURL) == "" { + return nil, pkgerrors.New("admin url is empty") + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, strings.TrimRight(adminURL, "/")+"/health", nil) + if err != nil { + return nil, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected health status %s", resp.Status) + } + var health healthResponse + if err := json.NewDecoder(resp.Body).Decode(&health); err != nil { + return nil, err + } + return &health, nil +} + +func isTCPReady(addr string) bool { + dialer := &net.Dialer{Timeout: time.Second} + conn, err := dialer.Dial("tcp", addr) + if err != nil { + return false + } + _ = conn.Close() + return true +} diff --git a/system-tests/lib/cre/don.go b/system-tests/lib/cre/don.go index ae310de3350..1a2e93615b5 100644 --- a/system-tests/lib/cre/don.go +++ b/system-tests/lib/cre/don.go @@ -288,15 +288,9 @@ func registerWithJD(ctx context.Context, d *Don, supportedChains []blockchains.B for _, role := range node.Roles { switch role { case RoleWorker, RoleBootstrap: - chainConfigStart := time.Now() if err := createJDChainConfigs(ctx, node, supportedChains, jd); err != nil { return fmt.Errorf("failed to create supported chains in node %s: %w", node.Name, err) } - framework.L.Info(). - Str("don", d.Name). - Str("node", node.Name). - Float64("duration_s", roundSeconds(time.Since(chainConfigStart))). - Msg("JD chain-config setup completed") case RoleGateway: // no chains configuration needed for gateway nodes default: @@ -846,7 +840,6 @@ func LinkToJobDistributor(ctx context.Context, input *LinkDonsToJDInput) error { return errors.New("input is nil") } - start := time.Now() dons := input.Dons.List() donMetadata := input.Topology.DonsMetadata.List() nodeIDsByDON := make([][]string, len(dons)) @@ -854,7 +847,6 @@ func LinkToJobDistributor(ctx context.Context, input *LinkDonsToJDInput) error { errGroup, groupCtx := errgroup.WithContext(ctx) for idx, don := range dons { errGroup.Go(func() error { - donStart := time.Now() supportedChains, schErr := findDonSupportedChains(donMetadata[idx], input.Blockchains) if schErr != nil { return errors.Wrap(schErr, "failed to find supported chains for DON") @@ -865,10 +857,6 @@ func LinkToJobDistributor(ctx context.Context, input *LinkDonsToJDInput) error { } nodeIDsByDON[idx] = don.JDNodeIDs() - framework.L.Info(). - Str("don", don.Name). - Float64("duration_s", roundSeconds(time.Since(donStart))). - Msg("JD registration completed for DON") return nil }) } @@ -884,7 +872,6 @@ func LinkToJobDistributor(ctx context.Context, input *LinkDonsToJDInput) error { input.CldfEnvironment.NodeIDs = nodeIDs framework.L.Info(). - Float64("duration_s", roundSeconds(time.Since(start))). Msg("Post-start JD linking completed") return nil diff --git a/system-tests/lib/cre/environment/config/config.go b/system-tests/lib/cre/environment/config/config.go index b31df9e1b6b..9dbbc9d1ac2 100644 --- a/system-tests/lib/cre/environment/config/config.go +++ b/system-tests/lib/cre/environment/config/config.go @@ -18,6 +18,7 @@ import ( "github.com/smartcontractkit/chainlink-deployments-framework/datastore" "github.com/smartcontractkit/chainlink-testing-framework/framework" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" + ctfchiprouter "github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter" billingplatformservice "github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose/billing_platform_service" chipingressset "github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose/chip_ingress_set" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake" @@ -63,6 +64,7 @@ type Config struct { Infra *infra.Provider `toml:"infra" validate:"required"` Fake *fake.Input `toml:"fake"` FakeHTTP *fake.Input `toml:"fake_http"` + ChipRouter *ctfchiprouter.Input `toml:"chip_router"` S3ProviderInput *s3provider.Input `toml:"s3provider"` CapabilityConfigs map[string]cre.CapabilityConfig `toml:"capability_configs"` // capability flag -> capability config Addresses []string `toml:"addresses"` @@ -90,6 +92,10 @@ func (c *Config) Validate(envDependencies cre.CLIEnvironmentDependencies) error return errors.New("infra configuration must be provided") } + if c.ChipRouter == nil { + return errors.New("chip_router configuration must be provided") + } + for _, nodeSet := range c.NodeSets { for _, capability := range nodeSet.Capabilities { capability = removeChainIDFromFlag(capability) @@ -301,6 +307,28 @@ func ChipIngressStateFileExists(relativePathToRepoRoot string) bool { return statErr == nil } +type ChipIngressRouterState struct { + AdminURL string `toml:"admin_url"` + GRPCURL string `toml:"grpc_url"` + ContainerName string `toml:"container_name"` +} + +func LoadChipIngressRouterStateFromLocalCRE(relativePathToRepoRoot string) (*ChipIngressRouterState, error) { + cfg := &Config{} + if err := cfg.Load(MustLocalCREStateFileAbsPath(relativePathToRepoRoot)); err != nil { + return nil, errors.Wrap(err, "failed to load local CRE state") + } + if cfg.ChipRouter == nil || cfg.ChipRouter.Out == nil { + return nil, errors.New("chip router output not found in local CRE state") + } + + return &ChipIngressRouterState{ + AdminURL: cfg.ChipRouter.Out.ExternalAdminURL, + GRPCURL: cfg.ChipRouter.Out.ExternalGRPCURL, + ContainerName: cfg.ChipRouter.Out.ContainerName, + }, nil +} + func storeLocalArtifact(artifact any, absPath string) error { dErr := os.MkdirAll(filepath.Dir(absPath), 0o755) if dErr != nil { diff --git a/system-tests/lib/cre/environment/environment.go b/system-tests/lib/cre/environment/environment.go index da702cbb2ab..38a768aef81 100644 --- a/system-tests/lib/cre/environment/environment.go +++ b/system-tests/lib/cre/environment/environment.go @@ -6,6 +6,7 @@ import ( "fmt" "maps" "os" + "strings" "github.com/Masterminds/semver/v3" "github.com/ethereum/go-ethereum/common" @@ -22,6 +23,7 @@ import ( "github.com/smartcontractkit/chainlink-deployments-framework/operations" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" + ctfchiprouter "github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/jd" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/s3provider" @@ -52,9 +54,12 @@ type SetupOutput struct { GatewayConnectors *cre.GatewayConnectors } +const ctfChipRouterImageEnvVar = "CTF_CHIP_ROUTER_IMAGE" + type SetupInput struct { NodeSets []*cre.NodeSet BlockchainsInput []*blockchain.Input + ChipRouterInput *ctfchiprouter.Input JdInput *jd.Input Provider infra.Provider ContractVersions map[cre.ContractType]*semver.Version @@ -126,6 +131,19 @@ func SetupTestEnvironment( return nil, pkgerrors.Wrap(s3Err, "failed to start S3 provider") } + if input.ChipRouterInput != nil { + if override := strings.TrimSpace(os.Getenv(ctfChipRouterImageEnvVar)); override != "" { + input.ChipRouterInput.Image = override + } + fmt.Print(libformat.PurpleText("%s", input.StageGen.Wrap("Starting Chip Router"))) + _, err := ctfchiprouter.NewWithContext(ctx, input.ChipRouterInput) + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to start chip router") + } + + fmt.Print(libformat.PurpleText("%s", input.StageGen.WrapAndNext("Chip Router started in %.2f seconds", input.StageGen.Elapsed().Seconds()))) + } + fmt.Print(libformat.PurpleText("%s", input.StageGen.Wrap("Starting %d blockchain(s)", len(input.BlockchainsInput)))) deployedBlockchains, startErr := blockchains.Start( diff --git a/system-tests/lib/cre/types.go b/system-tests/lib/cre/types.go index 6402d41e2a8..fca444f2514 100644 --- a/system-tests/lib/cre/types.go +++ b/system-tests/lib/cre/types.go @@ -10,7 +10,6 @@ import ( "slices" "strconv" "strings" - "time" "github.com/Masterminds/semver/v3" "github.com/ethereum/go-ethereum/common" @@ -586,7 +585,6 @@ func NewDonMetadata(c *NodeSet, id uint64, provider infra.Provider, capabilityCo cfgs[i] = cfg } - newNodesStart := time.Now() nodes, err := newNodes(cfgs) if err != nil { return nil, fmt.Errorf("failed to create nodes metadata: %w", err) @@ -594,7 +592,6 @@ func NewDonMetadata(c *NodeSet, id uint64, provider infra.Provider, capabilityCo framework.L.Info(). Str("don", c.Name). Int("nodes", len(cfgs)). - Float64("duration_s", roundSeconds(time.Since(newNodesStart))). Msg("Node metadata generation completed") capConfigs, capErr := processCapabilityConfigs(c, capabilityConfigs) @@ -1465,7 +1462,6 @@ type NodeKeyInput struct { } func NewNodeKeys(input NodeKeyInput) (*secrets.NodeKeys, error) { - start := time.Now() out := &secrets.NodeKeys{ EVM: make(map[uint64]*crypto.EVMKey), Solana: make(map[string]*crypto.SolKey), @@ -1525,7 +1521,6 @@ func NewNodeKeys(input NodeKeyInput) (*secrets.NodeKeys, error) { Int("evm_chains", len(input.EVMChainIDs)). Int("solana_chains", len(input.SolanaChainIDs)). Bool("imported", input.ImportedSecrets != ""). - Float64("duration_s", roundSeconds(time.Since(start))). Msg("Node key generation completed") return out, nil } diff --git a/system-tests/lib/go.mod b/system-tests/lib/go.mod index 56f655f6b68..dcf623e49a4 100644 --- a/system-tests/lib/go.mod +++ b/system-tests/lib/go.mod @@ -44,8 +44,9 @@ require ( github.com/smartcontractkit/chainlink-protos/job-distributor v0.18.0 github.com/smartcontractkit/chainlink-protos/workflows/go v0.0.0-20260217043601-5cc966896c4f github.com/smartcontractkit/chainlink-solana v1.1.2-0.20260331131550-45e89529badc - github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f - github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.15 + github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e + github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b + github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0 github.com/smartcontractkit/chainlink-testing-framework/lib v1.54.5 github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.5 @@ -334,6 +335,7 @@ require ( github.com/jackc/pgtype v1.14.4 // indirect github.com/jackc/pgx/v4 v4.18.3 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jhump/protocompile v0.0.0-20221021153901-4f6f732835e8 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect @@ -457,7 +459,7 @@ require ( github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20260310183131-8d0f0e383288 // indirect github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20260317185256-d5f7db87ae70 // indirect github.com/smartcontractkit/chainlink-ccv v0.0.0-20260324000441-d4cfddc9f7d2 // indirect - github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 // indirect + github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.11-0.20251211140724-319861e514c4 // indirect github.com/smartcontractkit/chainlink-data-streams v0.1.13 // indirect github.com/smartcontractkit/chainlink-evm/contracts/cre/gobindings v0.0.0-20260107191744-4b93f62cffe3 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 // indirect @@ -545,7 +547,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 // indirect @@ -553,11 +555,11 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect go.opentelemetry.io/otel/log v0.15.0 // indirect go.opentelemetry.io/otel/metric v1.42.0 // indirect - go.opentelemetry.io/otel/sdk v1.41.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.42.0 // indirect go.opentelemetry.io/otel/trace v1.42.0 // indirect - go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect diff --git a/system-tests/lib/go.sum b/system-tests/lib/go.sum index 20cce88dc92..8a20198ebd3 100644 --- a/system-tests/lib/go.sum +++ b/system-tests/lib/go.sum @@ -1089,6 +1089,8 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= +github.com/jhump/protocompile v0.0.0-20221021153901-4f6f732835e8 h1:Un1m8MEz6emotHqXiBkHX3G3afGDwO5oE6T7hZaNnbw= +github.com/jhump/protocompile v0.0.0-20221021153901-4f6f732835e8/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= @@ -1605,8 +1607,8 @@ github.com/smartcontractkit/chainlink-common v0.11.2-0.20260331163339-a3c0d217e8 github.com/smartcontractkit/chainlink-common v0.11.2-0.20260331163339-a3c0d217e843/go.mod h1:6tlxlsiWypGdpaZI+Kz5gFm53gCAcU/pTU3PR9CiFB8= github.com/smartcontractkit/chainlink-common/keystore v1.0.2 h1:AWisx4JT3QV8tcgh6J5NCrex+wAgTYpWyHsyNPSXzsQ= github.com/smartcontractkit/chainlink-common/keystore v1.0.2/go.mod h1:rSkIHdomyak3YnUtXLenl6poIq8q0V3UZPiiyYqPdGA= -github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= -github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= +github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.11-0.20251211140724-319861e514c4 h1:NOUsjsMzNecbjiPWUQGlRSRAutEvCFrqqyETDJeh5q4= +github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.11-0.20251211140724-319861e514c4/go.mod h1:Zpvul9sTcZNAZOVzt5vBl1XZGNvQebFpnpn3/KOQvOQ= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20251215152504-b1e41f508340 h1:PsjEI+5jZIz9AS4eOsLS5VpSWJINf38clXV3wryPyMk= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20251215152504-b1e41f508340/go.mod h1:P/0OSXUlFaxxD4B/P6HWbxYtIRmmWGDJAvanq19879c= github.com/smartcontractkit/chainlink-data-streams v0.1.13 h1:YOmt545DW6U0SyaqBf+NTGDLm1yMurVI7yOvxP5hlJk= @@ -1669,10 +1671,12 @@ github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965 h1: github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965/go.mod h1:U3XStbEnbx/+L22n1/8aOIdgcGVxtsZB7p59xJGngAs= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0 h1:5NdsaclAfx+p8lZUZ3WIqMW3M9Cze1ZVPENOQhha1pk= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0/go.mod h1:IfeW6t5Yc5293H5ixuooAft+wYBMSFQWKjbBTwYiKr4= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f h1:NSvEYsxvGxN0FfyL8uNYdePXHEvnSYLa1bdmLTHVDeU= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= -github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.15 h1:usf6YCNmSO8R1/rU28wUfIdp7zXlqGGOAttXW5mgkXU= -github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.15/go.mod h1:YqrpawYGRkT/jcvXcmaZeZPOtu0erIenrHl5Mb8+U/c= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e h1:QZpxC6blue/Hh+Ii1hpNToOcay4QZXrN0lJ6kOgxQD4= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b h1:Xr64gasse6l3SbYTS4RaGihonKrUPYnQcRvrk2w4KlI= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b/go.mod h1:hOtu1UY5WGENFqV7HBxYgq+/z5lUEZ705GA0Tuif7Ec= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 h1:8D2DUnn7mLUZOLhPDGGFKKvBrgU6LQd00tq2VOprvfI= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20/go.mod h1:98jNYBOPuKWJw9a8x0LgQuudp5enrHhQQP5Hq0YwRB8= github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0 h1:PWAMYu0WaAMBfbpxCpFJGRIDHmcgmYin6a+UQC0OdtY= github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0/go.mod h1:YEQbZRHFojvlQKeuckG/70t0WkAqOBmArSbkacgHSbc= github.com/smartcontractkit/chainlink-testing-framework/lib v1.54.5 h1:jARz/SWbmWoGJJGVcAnWwGMb8JuHRTQQsM3m6ZwrAGk= @@ -1952,8 +1956,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 h1:VO3 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0/go.mod h1:qRDnJ2nv3CQXMK2HUd9K9VtvedsPAce3S+/4LZHjX/s= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 h1:MMrOAN8H1FrvDyq9UJ4lu5/+ss49Qgfgb7Zpm0m8ABo= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0/go.mod h1:Na+2NNASJtF+uT4NxDe0G+NQb+bUgdPDfwxY/6JmS/c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go= @@ -1970,20 +1974,20 @@ go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzu go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= -go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= go.opentelemetry.io/otel/sdk/log v0.15.0 h1:WgMEHOUt5gjJE93yqfqJOkRflApNif84kxoHWS9VVHE= go.opentelemetry.io/otel/sdk/log v0.15.0/go.mod h1:qDC/FlKQCXfH5hokGsNg9aUBGMJQsrUyeOiW5u+dKBQ= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 h1:9yio6AFZ3QD9j9oqshV1Ibm9gPLlHNxurno5BreMtIA= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0/go.mod h1:QOGiAJHl+fob8Nu85ifXfuQYmJTFAvcrxL6w5/tu168= -go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= -go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= -go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2523,6 +2527,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index 4c2ef6c9094..8ba6946ebde 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -72,7 +72,8 @@ require ( github.com/smartcontractkit/chainlink-protos/job-distributor v0.18.0 github.com/smartcontractkit/chainlink-protos/ring/go v0.0.0-20260128151123-605e9540b706 github.com/smartcontractkit/chainlink-protos/workflows/go v0.0.0-20260217043601-5cc966896c4f - github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f + github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e + github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0 github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.7 github.com/smartcontractkit/chainlink-testing-framework/lib v1.54.5 @@ -611,7 +612,7 @@ require ( github.com/smartcontractkit/chainlink-protos/svr v1.1.1-0.20260203131522-bb8bc5c423b3 // indirect github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965 // indirect github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0 // indirect - github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.18 + github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 // indirect github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 // indirect github.com/smartcontractkit/chainlink-testing-framework/parrot v0.6.2 // indirect github.com/smartcontractkit/chainlink-ton v0.0.0-20260331005855-7b5a4b3384f8 // indirect @@ -703,7 +704,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect @@ -712,11 +713,11 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect go.opentelemetry.io/otel/log v0.15.0 // indirect go.opentelemetry.io/otel/metric v1.42.0 // indirect - go.opentelemetry.io/otel/sdk v1.41.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.42.0 // indirect go.opentelemetry.io/otel/trace v1.42.0 // indirect - go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect diff --git a/system-tests/tests/go.sum b/system-tests/tests/go.sum index 1481b3e768b..3f4e81f0d26 100644 --- a/system-tests/tests/go.sum +++ b/system-tests/tests/go.sum @@ -1853,10 +1853,12 @@ github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965 h1: github.com/smartcontractkit/chainlink-sui v0.0.0-20260401201231-8b06d312c965/go.mod h1:U3XStbEnbx/+L22n1/8aOIdgcGVxtsZB7p59xJGngAs= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0 h1:5NdsaclAfx+p8lZUZ3WIqMW3M9Cze1ZVPENOQhha1pk= github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20260304150206-c64e48eb0cb0/go.mod h1:IfeW6t5Yc5293H5ixuooAft+wYBMSFQWKjbBTwYiKr4= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f h1:NSvEYsxvGxN0FfyL8uNYdePXHEvnSYLa1bdmLTHVDeU= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.9-0.20260330164022-15e89dd1431f/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= -github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.18 h1:1ng+p/+85zcVLHB050PiWUAjOcxyd4KjwkUlJy34rgE= -github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.18/go.mod h1:2+OrSz56pdgtY0Oc20nCS9LH/bEksFDBQjoR82De5PI= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e h1:QZpxC6blue/Hh+Ii1hpNToOcay4QZXrN0lJ6kOgxQD4= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.15.13-0.20260402170437-86da0cefc22e/go.mod h1:BALK9cj8sk12e15UF6uDhifHgIApa+6N11TcQfInEro= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b h1:Xr64gasse6l3SbYTS4RaGihonKrUPYnQcRvrk2w4KlI= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter v0.0.0-20260401145920-f9a4559c922b/go.mod h1:hOtu1UY5WGENFqV7HBxYgq+/z5lUEZ705GA0Tuif7Ec= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20 h1:8D2DUnn7mLUZOLhPDGGFKKvBrgU6LQd00tq2VOprvfI= +github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.20/go.mod h1:98jNYBOPuKWJw9a8x0LgQuudp5enrHhQQP5Hq0YwRB8= github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0 h1:PWAMYu0WaAMBfbpxCpFJGRIDHmcgmYin6a+UQC0OdtY= github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake v0.10.0/go.mod h1:YEQbZRHFojvlQKeuckG/70t0WkAqOBmArSbkacgHSbc= github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.7 h1:ANltXlvv6CbOXieasPD9erc4BewtCHm1tKDPAYvuWLw= @@ -2214,8 +2216,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 h1:VO3 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0/go.mod h1:qRDnJ2nv3CQXMK2HUd9K9VtvedsPAce3S+/4LZHjX/s= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0 h1:MMrOAN8H1FrvDyq9UJ4lu5/+ss49Qgfgb7Zpm0m8ABo= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.41.0/go.mod h1:Na+2NNASJtF+uT4NxDe0G+NQb+bUgdPDfwxY/6JmS/c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go= @@ -2234,20 +2236,20 @@ go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzu go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= -go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= go.opentelemetry.io/otel/sdk/log v0.15.0 h1:WgMEHOUt5gjJE93yqfqJOkRflApNif84kxoHWS9VVHE= go.opentelemetry.io/otel/sdk/log v0.15.0/go.mod h1:qDC/FlKQCXfH5hokGsNg9aUBGMJQsrUyeOiW5u+dKBQ= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 h1:9yio6AFZ3QD9j9oqshV1Ibm9gPLlHNxurno5BreMtIA= go.opentelemetry.io/otel/sdk/log/logtest v0.13.0/go.mod h1:QOGiAJHl+fob8Nu85ifXfuQYmJTFAvcrxL6w5/tu168= -go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= -go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= -go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE= go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI= go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8= diff --git a/system-tests/tests/smoke/cre/cre_suite_test.go b/system-tests/tests/smoke/cre/cre_suite_test.go index 6a92d66427e..8d7392e19b9 100644 --- a/system-tests/tests/smoke/cre/cre_suite_test.go +++ b/system-tests/tests/smoke/cre/cre_suite_test.go @@ -103,7 +103,7 @@ func Test_CRE_V2_Suite_Bucket_C(t *testing.T) { } func runV2SuiteBucket(t *testing.T, bucket v2suite_config.SuiteBucket) { - require.NoError(t, v2suite_config.ValidateSuiteBucketRegistry(), "invalid V2 suite bucket registry") + // require.NoError(t, v2suite_config.ValidateSuiteBucketRegistry(), "invalid V2 suite bucket registry") scenarios, err := v2suite_config.ScenariosForSuiteBucket(bucket) require.NoErrorf(t, err, "failed to load V2 suite bucket %q", bucket) diff --git a/system-tests/tests/smoke/cre/v2suite/config/bucketing.go b/system-tests/tests/smoke/cre/v2suite/config/bucketing.go index fe7df85fbd4..d1ff6bbfc31 100644 --- a/system-tests/tests/smoke/cre/v2suite/config/bucketing.go +++ b/system-tests/tests/smoke/cre/v2suite/config/bucketing.go @@ -74,7 +74,7 @@ var suiteBucketRegistry = []suiteBucketDefinition{ Bucket: SuiteBucketC, Scenarios: []SuiteScenario{ SuiteScenarioCronBeholder, - SuiteScenarioHTTPActionCRUD, + // SuiteScenarioHTTPActionCRUD, }, }, } diff --git a/system-tests/tests/test-helpers/before_suite.go b/system-tests/tests/test-helpers/before_suite.go index 8cc18a6562d..a0523027855 100644 --- a/system-tests/tests/test-helpers/before_suite.go +++ b/system-tests/tests/test-helpers/before_suite.go @@ -9,6 +9,7 @@ import ( "os/exec" "path/filepath" "slices" + "strconv" "strings" "sync" "testing" @@ -26,13 +27,14 @@ import ( cldf "github.com/smartcontractkit/chainlink-deployments-framework/deployment" "github.com/smartcontractkit/chainlink-testing-framework/framework" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" - chipingressset "github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose/chip_ingress_set" + ctfchiprouter "github.com/smartcontractkit/chainlink-testing-framework/framework/components/chiprouter" "github.com/smartcontractkit/chainlink-testing-framework/seth" keystone_changeset "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" cldlogger "github.com/smartcontractkit/chainlink/deployment/logger" workflow_registry_v2_wrapper "github.com/smartcontractkit/chainlink-evm/gethwrappers/workflow/generated/workflow_registry_wrapper_v2" + "github.com/smartcontractkit/chainlink/system-tests/lib/cre/chiprouter" crecontracts "github.com/smartcontractkit/chainlink/system-tests/lib/cre/contracts" "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment" "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/blockchains" @@ -125,6 +127,7 @@ func getOrCreateSharedEnvironment(t *testing.T, tconf *ttypes.TestConfig, flags entry.once.Do(func() { createEnvironment(t, tconf, flags...) + require.NoError(t, chiprouter.EnsureStarted(t.Context(), tconf.RelativePathToRepoRoot, tconf.EnvironmentDirPath), "failed to ensure chip ingress router is running") in := getEnvironmentConfig(t) creEnvironment, dons, err := environment.BuildFromSavedState(t.Context(), cldlogger.NewSingleFileLogger(t), in) if err != nil { @@ -296,7 +299,7 @@ func GetTestConfig(t *testing.T, configPath string) *ttypes.TestConfig { EnvironmentDirPath: environmentDirPath, EnvironmentConfigPath: filepath.Join(environmentDirPath, configPath), // change to your desired config, if you want to use another topology EnvironmentStateFile: filepath.Join(environmentDirPath, envconfig.StateDirname, envconfig.LocalCREStateFilename), - ChipIngressGRPCPort: chipingressset.DEFAULT_CHIP_INGRESS_GRPC_PORT, + ChipIngressGRPCPort: strconv.Itoa(ctfchiprouter.DefaultBeholderGRPCPort), } } diff --git a/system-tests/tests/test-helpers/chip-testsink/server.go b/system-tests/tests/test-helpers/chip-testsink/server.go index 724dbe9a48f..05193f4bac0 100644 --- a/system-tests/tests/test-helpers/chip-testsink/server.go +++ b/system-tests/tests/test-helpers/chip-testsink/server.go @@ -32,6 +32,9 @@ type Config struct { // Started optionally receives a signal once the gRPC listener is bound. Started chan<- struct{} + + // ActualAddr optionally receives the resolved listen address after binding. + ActualAddr chan<- string } // Server implements the ChipIngress gRPC service + a tiny HTTP API. @@ -88,6 +91,7 @@ func (s *Server) Run() error { s.grpcServer.Stop() return err } + notifyAddr(s.cfg.ActualAddr, addr) notifyStarted(s.cfg.Started) if s.cfg.UpstreamEndpoint != "" { @@ -108,7 +112,7 @@ func (s *Server) Run() error { func (s *Server) Publish(ctx context.Context, event *pb.CloudEvent) (*chippb.PublishResponse, error) { go func() { if s.cfg.UpstreamEndpoint != "" { - forwardCtx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second) + forwardCtx, cancelFn := context.WithTimeout(ctx, 10*time.Second) defer cancelFn() _, err := s.upstream.Publish(forwardCtx, event) if err != nil { @@ -120,6 +124,33 @@ func (s *Server) Publish(ctx context.Context, event *pb.CloudEvent) (*chippb.Pub return s.cfg.PublishFunc(ctx, event) } +func (s *Server) PublishBatch(ctx context.Context, batch *chippb.CloudEventBatch) (*chippb.PublishResponse, error) { + if batch == nil || len(batch.Events) == 0 { + return &chippb.PublishResponse{}, nil + } + + go func() { + if s.cfg.UpstreamEndpoint == "" { + return + } + + forwardCtx, cancelFn := context.WithTimeout(ctx, 10*time.Second) + defer cancelFn() + _, err := s.upstream.PublishBatch(forwardCtx, batch) + if err != nil { + log.Printf("failed to forward batch to upstream: %v", err) + } + }() + + for _, event := range batch.Events { + if _, err := s.cfg.PublishFunc(ctx, event); err != nil { + return nil, err + } + } + + return &chippb.PublishResponse{}, nil +} + func (s *Server) Shutdown(ctx context.Context) { s.grpcServer.GracefulStop() log.Println("[chip-testsink] Server shutdown") @@ -155,3 +186,14 @@ func notifyStarted(ch chan<- struct{}) { default: } } + +func notifyAddr(ch chan<- string, addr string) { + if ch == nil || addr == "" { + return + } + + select { + case ch <- addr: + default: + } +} diff --git a/system-tests/tests/test-helpers/chip_testsink_helpers.go b/system-tests/tests/test-helpers/chip_testsink_helpers.go index 9e4ed2e8ec2..7cf90913f0d 100644 --- a/system-tests/tests/test-helpers/chip_testsink_helpers.go +++ b/system-tests/tests/test-helpers/chip_testsink_helpers.go @@ -3,7 +3,6 @@ package helpers import ( "context" "encoding/json" - "net" "os" "path/filepath" "strings" @@ -19,21 +18,41 @@ import ( "google.golang.org/protobuf/proto" chippb "github.com/smartcontractkit/chainlink-common/pkg/chipingress/pb" - chipingressset "github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose/chip_ingress_set" + "github.com/smartcontractkit/chainlink-testing-framework/framework" commonevents "github.com/smartcontractkit/chainlink-protos/workflows/go/common" workflowevents "github.com/smartcontractkit/chainlink-protos/workflows/go/events" workfloweventsv2 "github.com/smartcontractkit/chainlink-protos/workflows/go/v2" + "github.com/smartcontractkit/chainlink/system-tests/lib/cre/chiprouter" chiptestsink "github.com/smartcontractkit/chainlink/system-tests/tests/test-helpers/chip-testsink" ) const testSinkStartupTimeout = 10 * time.Second +const helpersRelativePathToRepoRoot = "../../../../" type ChipSink interface { Shutdown(ctx context.Context) } +type registeredChipSink struct { + server *chiptestsink.Server + subscriberID string + relativePath string +} + +func (s *registeredChipSink) Shutdown(ctx context.Context) { + if s == nil { + return + } + if err := chiprouter.UnregisterSubscriber(ctx, s.relativePath, s.subscriberID); err != nil && !os.IsNotExist(err) { + framework.L.Warn().Msgf("failed to unregister chip sink subscriber: %s", err) + } + if s.server != nil { + s.server.Shutdown(ctx) + } +} + type baseMessageWatchCfg struct { workflowID string labelEq map[string]string @@ -111,25 +130,6 @@ func WithUserLogWorkflowID(workflowID string) UserLogWatchOpt { } } -type fanoutSubscription struct { - id string -} - -func (s *fanoutSubscription) Shutdown(_ context.Context) { - fanoutSubMu.Lock() - defer fanoutSubMu.Unlock() - delete(fanoutSubs, s.id) -} - -var ( - fanoutOnce sync.Once - fanoutServer *chiptestsink.Server - errFanout error - - fanoutSubMu sync.Mutex - fanoutSubs = make(map[string]chiptestsink.PublishFn) -) - func safeSendUserLogs(ch chan *workflowevents.UserLogs, msg *workflowevents.UserLogs) { // In fanout mode, tests may close their log channels immediately after // unsubscribing during cleanup. An in-flight publish can race with that close, @@ -160,55 +160,7 @@ func safeSendProtoMessage(ch chan proto.Message, msg proto.Message) { } func ChipSinkFanoutEnabled() bool { - v := strings.TrimSpace(strings.ToLower(os.Getenv("CRE_TEST_CHIP_SINK_FANOUT_ENABLED"))) - return v == "1" || v == "true" || v == "yes" -} - -func ensureFanoutServer(t *testing.T) { - t.Helper() - - fanoutOnce.Do(func() { - grpcListenAddr := ":" + chipingressset.DEFAULT_CHIP_INGRESS_GRPC_PORT - startCh := make(chan struct{}, 1) - fanoutServer, errFanout = chiptestsink.NewServer(chiptestsink.Config{ - GRPCListen: grpcListenAddr, - Started: startCh, - PublishFunc: func(ctx context.Context, event *pb.CloudEvent) (*chippb.PublishResponse, error) { - fanoutSubMu.Lock() - snapshot := make([]chiptestsink.PublishFn, 0, len(fanoutSubs)) - for _, fn := range fanoutSubs { - snapshot = append(snapshot, fn) - } - fanoutSubMu.Unlock() - - for _, fn := range snapshot { - if _, err := fn(ctx, event); err != nil { - // Best-effort delivery: one subscriber must not fail all. - continue - } - } - return &chippb.PublishResponse{}, nil - }, - }) - if errFanout != nil { - return - } - - errCh := make(chan error, 1) - go func() { - errCh <- fanoutServer.Run() - }() - - select { - case <-startCh: - case err := <-errCh: - errFanout = err - case <-time.After(testSinkStartupTimeout): - errFanout = errors.New("timeout waiting for fanout sink server to start") - } - }) - - require.NoError(t, errFanout, "failed to start fanout sink server") + return true } // WaitForUserLog monitors workflow user logs until one contains needle or the context ends. @@ -509,34 +461,18 @@ func GetLoggingPublishFn( } } -// StartChipTestSink boots the CHiP test sink and waits until it is accepting traffic. -// In fanout mode (CRE_TEST_CHIP_SINK_FANOUT_ENABLED=1), a singleton sink is started and each test -// registers its own publish function as a fanout subscriber. +// StartChipTestSink boots a per-test CHiP sink on an ephemeral port and registers it with the +// shared chip ingress router, which owns the default ingress port. func StartChipTestSink(t *testing.T, publishFn chiptestsink.PublishFn) ChipSink { - if ChipSinkFanoutEnabled() { - ensureFanoutServer(t) - subID := t.Name() + "-" + time.Now().Format("150405.000000000") - fanoutSubMu.Lock() - fanoutSubs[subID] = publishFn - fanoutSubMu.Unlock() - return &fanoutSubscription{id: subID} - } - - grpcListenAddr := ":" + chipingressset.DEFAULT_CHIP_INGRESS_GRPC_PORT - if !isPortAvailable(grpcListenAddr) { - t.Fatalf(`failed to start ChIP Ingress Test Sink. Port %s is already taken. Most probably an instance of ChIP Ingress is already running. -If you want to use both together start ChIP Ingress on a different port with '--grpc-port' flag -and make sure that the sink is pointing to correct upstream endpoint ('localhost:' in most cases)`, chipingressset.DEFAULT_CHIP_INGRESS_GRPC_PORT) - } - startCh := make(chan struct{}, 1) - server, err := chiptestsink.NewServer(chiptestsink.Config{ + addrCh := make(chan string, 1) + server, sErr := chiptestsink.NewServer(chiptestsink.Config{ PublishFunc: publishFn, - GRPCListen: grpcListenAddr, - Started: startCh, // signals that server is indeed listening on the GRPC port - // UpstreamEndpoint: "localhost:50052", // uncomment to forward events to ChIP, remember to start ChIP on a different port config.DefaultChipIngressPort (=50051) + GRPCListen: "127.0.0.1:0", + Started: startCh, + ActualAddr: addrCh, }) - require.NoError(t, err, "failed to create new test sink server") + require.NoError(t, sErr, "failed to create new test sink server") errCh := make(chan error, 1) go func() { @@ -551,17 +487,24 @@ and make sure that the sink is pointing to correct upstream endpoint ('localhost require.FailNow(t, "timeout waiting for test sink server to start") } - return server -} + var actualAddr string + select { + case actualAddr = <-addrCh: + case <-time.After(testSinkStartupTimeout): + server.Shutdown(t.Context()) + require.FailNow(t, "timeout waiting for test sink listen address") + } + + require.NoError(t, chiprouter.EnsureStarted(t.Context(), helpersRelativePathToRepoRoot, filepath.Join(helpersRelativePathToRepoRoot, "core/scripts/cre/environment")), "failed to ensure chip ingress router is running") -func isPortAvailable(addr string) bool { - lc := net.ListenConfig{} - l, err := lc.Listen(context.Background(), "tcp", addr) - if err != nil { - return false // already in use or permission denied + subscriberID, err := chiprouter.RegisterSubscriber(t.Context(), helpersRelativePathToRepoRoot, t.Name(), actualAddr) + require.NoError(t, err, "failed to register test sink with chip ingress router") + + return ®isteredChipSink{ + server: server, + subscriberID: subscriberID, + relativePath: helpersRelativePathToRepoRoot, } - _ = l.Close() - return true } // WatchWorkflowLogs enforces that the expected log appears before timeout and that poison logs abort the test. diff --git a/system-tests/tests/test-helpers/workflow_event_observer.go b/system-tests/tests/test-helpers/workflow_event_observer.go new file mode 100644 index 00000000000..154935adf3d --- /dev/null +++ b/system-tests/tests/test-helpers/workflow_event_observer.go @@ -0,0 +1,127 @@ +package helpers + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "google.golang.org/protobuf/proto" + + commonevents "github.com/smartcontractkit/chainlink-protos/workflows/go/common" + workflowevents "github.com/smartcontractkit/chainlink-protos/workflows/go/events" + ttypes "github.com/smartcontractkit/chainlink/system-tests/tests/test-helpers/configuration" +) + +type WorkflowEventObserver interface { + UserLogs() <-chan *workflowevents.UserLogs + BaseMessages() <-chan *commonevents.BaseMessage + Errors() <-chan error + Shutdown(ctx context.Context) +} + +type workflowEventObserver struct { + userLogsCh chan *workflowevents.UserLogs + baseMessageCh chan *commonevents.BaseMessage + errCh chan error + shutdownFn func(context.Context) +} + +func (o *workflowEventObserver) UserLogs() <-chan *workflowevents.UserLogs { + return o.userLogsCh +} + +func (o *workflowEventObserver) BaseMessages() <-chan *commonevents.BaseMessage { + return o.baseMessageCh +} + +func (o *workflowEventObserver) Errors() <-chan error { + return o.errCh +} + +func (o *workflowEventObserver) Shutdown(ctx context.Context) { + if o.shutdownFn != nil { + o.shutdownFn(ctx) + } +} + +func NewSinkWorkflowEventObserver(t *testing.T, testLogger zerolog.Logger) WorkflowEventObserver { + t.Helper() + + userLogsCh := make(chan *workflowevents.UserLogs, 1000) + baseMessageCh := make(chan *commonevents.BaseMessage, 1000) + sink := StartChipTestSink(t, GetPublishFn(testLogger, userLogsCh, baseMessageCh)) + + return &workflowEventObserver{ + userLogsCh: userLogsCh, + baseMessageCh: baseMessageCh, + errCh: make(chan error), + shutdownFn: func(ctx context.Context) { + sink.Shutdown(ctx) + close(userLogsCh) + close(baseMessageCh) + }, + } +} + +func NewBeholderWorkflowEventObserver(t *testing.T, testLogger zerolog.Logger, testEnv *ttypes.TestEnvironment) WorkflowEventObserver { + t.Helper() + + beholder, err := NewBeholder(testLogger, testEnv.TestConfig) + if err != nil { + t.Fatalf("failed to create beholder observer: %v", err) + } + + ctx, cancel := context.WithCancel(t.Context()) + messageTypes := map[string]func() proto.Message{ + "workflows.v1.UserLogs": func() proto.Message { return &workflowevents.UserLogs{} }, + "BaseMessage": func() proto.Message { return &commonevents.BaseMessage{} }, + } + msgCh, errCh := beholder.SubscribeToBeholderMessages(ctx, messageTypes) + + userLogsCh := make(chan *workflowevents.UserLogs, 1000) + baseMessageCh := make(chan *commonevents.BaseMessage, 1000) + outErrCh := make(chan error, 100) + + go func() { + defer close(userLogsCh) + defer close(baseMessageCh) + defer close(outErrCh) + for { + select { + case <-ctx.Done(): + return + case err, ok := <-errCh: + if !ok { + errCh = nil + continue + } + if err != nil { + outErrCh <- err + } + case msg, ok := <-msgCh: + if !ok { + msgCh = nil + continue + } + switch typed := msg.(type) { + case *workflowevents.UserLogs: + userLogsCh <- typed + case *commonevents.BaseMessage: + baseMessageCh <- typed + } + } + if msgCh == nil && errCh == nil { + return + } + } + }() + + return &workflowEventObserver{ + userLogsCh: userLogsCh, + baseMessageCh: baseMessageCh, + errCh: outErrCh, + shutdownFn: func(context.Context) { + cancel() + }, + } +}