diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index 48f8baeba77..5f08c32f812 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -24,15 +24,6 @@ jobs: pull-requests: read outputs: affected-modules: ${{ steps.changed-modules.outputs.modules-json }} - # Runs on workflow changes, any deployment change, or any (non-ignored) core change - should-run-deployment-tests: >- - ${{ - steps.match-some.outputs.workflow == 'true' || - steps.match-some.outputs.deployment == 'true' || - steps.match-every.outputs.core-non-ignored == 'true' || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' - }} # Runs on workflow changes, and any (non-ignored) core changes should-run-core-tests: >- ${{ @@ -53,12 +44,9 @@ jobs: with: # "if any changed file matches one or more of the conditions" (https://github.com/dorny/paths-filter/issues/225) predicate-quantifier: some - # deployment - any changes in the deployment module # workflow - any changes that could affect this workflow definition # - Assume any repository action changes affect this workflow filters: | - deployment: - - 'deployment/**' workflow: - '.github/workflows/ci-core.yml' - '.github/actions/**' @@ -123,8 +111,6 @@ jobs: workflow-files: - ".github/workflows/ci-core.yml" - ".github/actions/**" - deployment-files: - - "deployment/**" ignored-files: - "core/scripts/cre/environment/examples/workflows/**" # Note: @@ -154,11 +140,6 @@ jobs: inclusion-sets: [ go-files, core-files, all-test-files, workflow-files ] paths: - "tools/bin/go_core_tests_integration" - deployment-tests: - exclusion-sets: [ e2e-tests-files, core-test-files, ignored-files ] - inclusion-sets: [ go-files, core-files, deployment-files, workflow-files ] - paths: - - "tools/bin/go_core_ccip_deployment_tests" - name: Changed modules id: changed-modules @@ -176,16 +157,18 @@ jobs: golangci: name: GolangCI Lint - needs: [filter, run-frequency] + needs: [ filter, run-frequency ] # We don't directly merge dependabot PRs to not waste the resources. - if: ${{ needs.filter.outputs.affected-modules != '[]' && github.event_name != 'merge_group' && github.actor != 'dependabot[bot]' }} + if: ${{ needs.filter.outputs.affected-modules != '[]' && github.event_name != + 'merge_group' && github.actor != 'dependabot[bot]' }} permissions: # To annotate code in the PR. checks: write contents: read # For golangci-lint-action's `only-new-issues` option. pull-requests: read - runs-on: runs-on=${{ github.run_id }}-${{ strategy.job-index }}/cpu=16/ram=32/family=c6gd/spot=false/image=ubuntu24-full-arm64/extras=s3-cache + runs-on: runs-on=${{ github.run_id }}-${{ strategy.job-index + }}/cpu=16/ram=32/family=c6gd/spot=false/image=ubuntu24-full-arm64/extras=s3-cache strategy: fail-fast: false matrix: @@ -226,13 +209,14 @@ jobs: golangci-matrix-results-validation: name: lint if: ${{ always() }} - needs: [filter, golangci] + needs: [ filter, golangci ] runs-on: ubuntu-latest steps: - name: Check Golangci-lint Matrix Results env: GOLANGCI_RESULT: ${{ needs.golangci.result }} - ALLOW_FAILURE: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'allow-lint-issues') }} + ALLOW_FAILURE: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), + 'allow-lint-issues') }} SKIPPED: ${{ needs.filter.outputs.affected-modules == '[]' }} run: | if [[ "${GOLANGCI_RESULT}" == "success" ]]; then @@ -270,42 +254,57 @@ jobs: matrix: type: - cmd: go_core_tests - os: runs-on=${{ github.run_id }}-unit/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ github.run_id + }}-unit/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} trunk-auto-quarantine: "true" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" - cmd: go_core_tests_integration - os: runs-on=${{ github.run_id }}-integ/cpu=48/ram=96/family=c6i/spot=false/image=ubuntu24-full-x64/extras=s3-cache+tmpfs + os: runs-on=${{ github.run_id + }}-integ/cpu=48/ram=96/family=c6i/spot=false/image=ubuntu24-full-x64/extras=s3-cache+tmpfs should-run: ${{ needs.filter.outputs.should-run-core-tests }} trunk-auto-quarantine: "true" setup-solana: "true" install-loopps: "true" + go-mod-directory: "" + setup-aptos: "false" + setup-sui: "false" - cmd: go_core_fuzz - os: runs-on=${{ github.run_id}}-fuzz/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ + github.run_id}}-fuzz/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} + trunk-auto-quarantine: "false" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" - cmd: go_core_race_tests - os: runs-on=${{ github.run_id}}-race/cpu=64/ram=128/family=c7i/volume=80gb/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ + github.run_id}}-race/cpu=64/ram=128/family=c7i/volume=80gb/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} - - - cmd: go_core_deployment_tests - os: runs-on=${{ github.run_id }}-deployment/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache - should-run: ${{ needs.filter.outputs.should-run-deployment-tests }} - trunk-auto-quarantine: "true" - go-mod-directory: "deployment/" - setup-solana: "true" - setup-aptos: "true" - install-loopps: "true" - setup-sui: "true" + trunk-auto-quarantine: "false" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" name: Core Tests (${{ matrix.type.cmd }}) # Be careful modifying the job name, as it is used to fetch the job URL # We don't directly merge dependabot PRs, so let's not waste the resources if: ${{ github.actor != 'dependabot[bot]' }} - needs: [filter, run-frequency] + needs: [ filter, run-frequency ] timeout-minutes: 60 # Use ubuntu-latest for jobs that will be skipped - runs-on: ${{ matrix.type.should-run == 'true' && matrix.type.os || 'ubuntu-latest' }} + runs-on: ${{ matrix.type.should-run == 'true' && matrix.type.os || + 'ubuntu-latest' }} permissions: id-token: write contents: read @@ -327,13 +326,15 @@ jobs: uses: ./.github/actions/setup-go with: # race/fuzz tests don't benefit repeated caching, so restore from develop's build cache - restore-build-cache-only: ${{ matrix.type.cmd == 'go_core_fuzz' || matrix.type.cmd == 'go_core_race_tests' }} + restore-build-cache-only: ${{ matrix.type.cmd == 'go_core_fuzz' || + matrix.type.cmd == 'go_core_race_tests' }} build-cache-version: ${{ matrix.type.cmd }} go-version-file: ${{ matrix.type.go-mod-directory }}go.mod go-module-file: ${{ matrix.type.go-mod-directory }}go.sum - name: Setup Solana - if: ${{ matrix.type.should-run == 'true' && matrix.type.setup-solana == 'true' }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.setup-solana == 'true' + }} uses: ./.github/actions/setup-solana - name: Setup Aptos @@ -375,16 +376,17 @@ jobs: CL_DATABASE_URL: ${{ env.DB_URL }} - name: Install LOOP Plugins - if: ${{ matrix.type.should-run == 'true' && matrix.type.install-loopps == 'true' }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.install-loopps == 'true' + }} run: make install-plugins - name: Increase Timeouts for Fuzz/Race # Increase timeouts for scheduled runs only if: ${{ github.event.schedule != '' && matrix.type.should-run == 'true' }} run: | - echo "TIMEOUT=10m" >> $GITHUB_ENV - echo "COUNT=50" >> $GITHUB_ENV - echo "FUZZ_TIMEOUT_MINUTES=10">> $GITHUB_ENV + echo "TIMEOUT=10m" >> "$GITHUB_ENV" + echo "COUNT=50" >> "$GITHUB_ENV" + echo "FUZZ_TIMEOUT_MINUTES=10" >> "$GITHUB_ENV" - name: Run tests if: ${{ matrix.type.should-run == 'true' }} @@ -404,7 +406,8 @@ jobs: # See: https://github.com/golang/go/issues/69179 - name: Analyze and upload test results - if: ${{ matrix.type.should-run == 'true' && matrix.type.trunk-auto-quarantine == 'true' && !cancelled() }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.trunk-auto-quarantine == + 'true' && !cancelled() }} uses: smartcontractkit/.github/actions/branch-out-upload@branch-out-upload/v1 with: junit-file-path: "./junit.xml" @@ -418,7 +421,8 @@ jobs: - name: Print Races id: print-races - if: ${{ failure() && matrix.type.cmd == 'go_core_race_tests' && matrix.type.should-run == 'true' }} + if: ${{ failure() && matrix.type.cmd == 'go_core_race_tests' && + matrix.type.should-run == 'true' }} env: GH_REPO: ${{ github.repository }} GH_RUN_ID: ${{ github.run_id }} @@ -452,7 +456,6 @@ jobs: ./coverage.txt ./postgres_logs.txt ./junit.xml - ./deployment/junit.xml retention-days: 7 - name: Notify Slack on Race Test Failure @@ -472,7 +475,7 @@ jobs: core-scripts-tests: name: test-scripts - needs: [filter] + needs: [ filter ] runs-on: ubuntu-latest if: ${{ needs.filter.outputs.should-run-core-tests == 'true' }} steps: @@ -502,7 +505,7 @@ jobs: scan: name: SonarQube Scan - needs: [golangci, core, core-scripts-tests] + needs: [ golangci, core, core-scripts-tests ] # If core is cancelled, skip this to not delay the cancellation of the workflow. if: ${{ always() && !cancelled() && github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest @@ -560,13 +563,17 @@ jobs: # Check and assign paths for lint reports # To find reports in the folders named differently (because of the matrix strategy), # We need to loop through the artifacts. It allows usage of RegExp folders (skipped if not found). + sonarqube_lint_report_paths="" + shopt -s nullglob for golang_lint_artifact in golangci-lint-report* do - echo "Found golangci-lint-report artifacts" - sonarqube_lint_report_paths=$(find -type f -name 'golangci-lint-report.xml' -printf "%p,") - echo "Lint report paths: $sonarqube_lint_report_paths" - break + [[ -d "$golang_lint_artifact" ]] || continue + echo "Found golangci-lint-report artifact: $golang_lint_artifact" + part=$(find "$golang_lint_artifact" -type f -name 'golangci-lint-report.xml' -printf '%p,') + sonarqube_lint_report_paths+="$part" done + shopt -u nullglob + echo "Lint report paths: $sonarqube_lint_report_paths" ARGS="" if [[ -z "$sonarqube_tests_report_paths" ]]; then @@ -591,7 +598,7 @@ jobs: fi echo "Final SONARQUBE_ARGS: $ARGS" - echo "SONARQUBE_ARGS=$ARGS" >> $GITHUB_ENV + echo "SONARQUBE_ARGS=$ARGS" >> "$GITHUB_ENV" - name: SonarQube Scan if: ${{ env.SONARQUBE_ARGS != '' }} @@ -619,7 +626,10 @@ jobs: with: only-modules: "true" - name: Install protoc-gen-go-wsrpc - run: curl https://github.com/smartcontractkit/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc --output $HOME/go/bin/protoc-gen-go-wsrpc && chmod +x $HOME/go/bin/protoc-gen-go-wsrpc + run: curl + https://github.com/smartcontractkit/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc + --output "$HOME/go/bin/protoc-gen-go-wsrpc" && chmod +x + "$HOME/go/bin/protoc-gen-go-wsrpc" - name: make generate run: | make rm-mocked @@ -662,7 +672,7 @@ jobs: # Check if the current hour is 00 (one per day) if [ "$current_hour" -eq "00" ]; then - echo "one-per-day-frequency=true" | tee -a $GITHUB_OUTPUT + echo "one-per-day-frequency=true" | tee -a "$GITHUB_OUTPUT" fi misc: diff --git a/.github/workflows/ci-deployments.yml b/.github/workflows/ci-deployments.yml new file mode 100644 index 00000000000..29f9204569b --- /dev/null +++ b/.github/workflows/ci-deployments.yml @@ -0,0 +1,390 @@ +name: Deployment Tests +run-name: Deployment Tests + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name + }}-deployments-tests + cancel-in-progress: true + +env: + DEPLOYMENT_TEST_SHARD_COUNT: "7" + +# Run on key branches to make sure integration is good, otherwise run on all PR's +on: + push: + branches: + - develop + - "release/*" + merge_group: + pull_request: + schedule: + - cron: "0 0,6,12,18 * * *" + workflow_dispatch: + +jobs: + filter: + name: Detect Changes + permissions: + pull-requests: read + outputs: + affected-modules: ${{ steps.changed-modules.outputs.modules-json }} + # Runs on workflow changes, any deployment change, or any (non-ignored) core change + should-run: >- + ${{ + steps.match-some.outputs.workflow == 'true' || + steps.match-some.outputs.deployment == 'true' || + steps.match-every.outputs.core-non-ignored == 'true' || + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' + }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + repository: smartcontractkit/chainlink + - uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1 + id: match-some + with: + # "if any changed file matches one or more of the conditions" (https://github.com/dorny/paths-filter/issues/225) + predicate-quantifier: some + # deployment - any changes in the deployment module + # workflow - any changes that could affect this workflow definition + # - Assume any repository action changes affect this workflow + filters: | + deployment: + - 'deployment/**' + workflow: + - '.github/workflows/ci-deployments.yml' + - '.github/actions/**' + - uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1 + id: match-every + with: + # "if any changed file match all of the conditions" (https://github.com/dorny/paths-filter/issues/225) + # - Enables listing of files matching each filter. + # - Paths to files will be available in `${FILTER_NAME}_files` output variable. + # - Paths will be formatted as JSON array + predicate-quantifier: every + # core-non-ignored - all changes except for paths which do not affect core module + # - This is opt-in on purpose. To be safe, new files are assumed to have an affect on core unless listed here specifically. + # - For example: core module does not depend on deployment or integration-tests module. + # all - changes in any directory + # - This is used resolve all affected modules based on changed files + list-files: json + filters: | + core-non-ignored: + - '**' + - '!deployment/**' + - '!integration-tests/**' + - '!tools/secrets/**' + - '!tools/docker/**' + - '!tools/benchmark/**' + - '!**/README.md' + - '!**/CHANGELOG.md' + - '!*.nix' + - '!sonar-project.properties' + - '!nix.conf' + - '!nix-darwin-shell-hook.sh' + - '!LICENSE' + - '!.github/**' + - '!core/scripts/cre/environment/examples/workflows/**' + all: + - '**' + + - name: Decide which tests to run (rollout-only) + # To validate that this properly tests, we will run this beside the dorny/paths-filter actions + # before using its output to actually gate any jobs. + id: triggers + uses: smartcontractkit/.github/actions/advanced-triggers@advanced-triggers/v1 + continue-on-error: true + with: + file-sets: | + go-files: + - "**/*.go" + - "**/go.mod" + - "**/go.sum" + core-files: + - "core/**" + core-test-files: + - "testdata/**" + - "core/**/testdata/**" + - "core/**/*_test.go" + e2e-tests-files: + - "system-tests/**" + - "integration-tests/**" + workflow-files: + - ".github/workflows/ci-deployments.yml" + - ".github/actions/**" + deployment-files: + - "deployment/**" + ignored-files: + - "core/scripts/cre/environment/examples/workflows/**" + # Note: + # - for pull_request, merge_group, and push events, a trigger will resolve to true if any changed files match the path/glob patterns + # - exclusion-sets/negations are applied first, and therefore filter all changed files before inclusion sets are applied + # - by default these will resolve to true for schedule, and workflow_dispatch events + triggers: | + deployment-tests: + exclusion-sets: [ e2e-tests-files, core-test-files, ignored-files ] + inclusion-sets: [ go-files, core-files, deployment-files, workflow-files ] + paths: + - "tools/bin/go_deployment_tests" + + - name: Changed modules + id: changed-modules + uses: smartcontractkit/.github/actions/changed-modules-go@changed-modules-go/v1 + with: + # when scheduled/workflow_dispatch, run against all modules + no-change-behaviour: all + file-patterns: | + **/*.go + **/go.mod + **/go.sum + module-patterns: | + ** + !core/scripts/cre/environment/examples/workflows/** + + shard-matrices: + name: Build Shard Matrices + needs: [filter] + runs-on: ubuntu-latest + if: ${{ needs.filter.outputs.should-run == 'true' }} + permissions: {} + outputs: + deployment-test-matrix: ${{ steps.shard-matrices.outputs.deployment-test-matrix }} + steps: + - name: Build shard matrices + id: shard-matrices + shell: bash + run: | + python - <<'PY' >> "$GITHUB_OUTPUT" + import json + import os + + run_id = os.environ["GITHUB_RUN_ID"] + + rows = [] + + def add_deployment_rows(): + count = int(os.environ["DEPLOYMENT_TEST_SHARD_COUNT"]) + for i in range(count): + rows.append({ + "job-name": f"go_deployment_tests shard {i}/{count}", + "os": f"runs-on={run_id}-deployment-{i}/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache", + "shard-count": str(count), + "shard-index": str(i), + "run-timeout-minutes": "25", + "logs-artifact-name": f"go_deployment_tests_logs_shard_{i}", + }) + + add_deployment_rows() + + print(f"deployment-test-matrix={json.dumps(rows)}") + PY + + deployment-shard-consistency: + name: Check Deployment Test Sharding Consistency + needs: [filter] + if: ${{ github.actor != 'dependabot[bot]' }} + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Setup Go + uses: ./.github/actions/setup-go + with: + go-version-file: deployment/go.mod + go-module-file: deployment/go.sum + + - name: Download Go vendor packages + run: go mod download + + - name: Verify deployment test sharding + shell: bash + run: | + ( + cd deployment || exit 1 + go list -f '{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./... + ) | go run ./tools/ci-testshard verify --shard-count "${DEPLOYMENT_TEST_SHARD_COUNT}" + + deployment-tests: + env: + # We explicitly have this env var not be "CL_DATABASE_URL" to avoid having it be used by core related tests + # when they should not be using it, while still allowing us to DRY up the setup + DB_URL: postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.shard-matrices.outputs.deployment-test-matrix) }} + name: Deployment Tests (${{ matrix.job-name }}) + if: ${{ github.actor != 'dependabot[bot]' }} + needs: [filter, shard-matrices, run-frequency] + timeout-minutes: 35 + runs-on: ${{ matrix.os }} + permissions: + id-token: write + contents: read + actions: read + steps: + - name: Enable S3 Cache for Self-Hosted Runners + uses: runs-on/action@742bf56072eb4845a0f94b3394673e4903c90ff0 # v2.1.0 + with: + metrics: cpu,network,memory,disk + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + - name: Setup Go + uses: ./.github/actions/setup-go + with: + restore-build-cache-only: false + build-cache-version: go_deployment_tests + go-version-file: deployment/go.mod + go-module-file: deployment/go.sum + - name: Setup Solana + uses: ./.github/actions/setup-solana + - name: Setup Aptos + uses: aptos-labs/actions/install-aptos-cli@63740b290d839b87ecfafbcf75ed03a36a54a29f # jan 15, 2025 + with: + CLI_VERSION: 8.1.0 + - name: Setup Sui CLI v1.69.2 + uses: ./.github/actions/setup-sui + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + version: mainnet-v1.69.2 + - name: Setup wasmd + uses: ./.github/actions/setup-wasmd + - name: Setup Postgres + uses: smartcontractkit/.github/actions/setup-postgres@setup-postgres/v1 + with: + tmpfs: "true" + image-tag: "16-alpine" + - name: Download Go vendor packages + run: go mod download + - name: Setup DB + run: go run ./core/store/cmd/preparetest + env: + CL_DATABASE_URL: ${{ env.DB_URL }} + - name: Install LOOP Plugins + run: make install-plugins + - name: Run tests + timeout-minutes: ${{ fromJson(matrix.run-timeout-minutes) }} + continue-on-error: true + id: run-tests + shell: bash + env: + CL_DATABASE_URL: ${{ env.DB_URL }} + OUTPUT_FILE: ./output.txt + PRODUCE_JUNIT_XML: "true" + TRUNK_AUTO_QUARANTINE: "true" + RUN_QUARANTINED_TESTS: "true" + JUNIT_FILE: ${{ github.workspace }}/junit.xml + GO_TEST_SHARD_COUNT: ${{ matrix.shard-count }} + GO_TEST_SHARD_INDEX: ${{ matrix.shard-index }} + run: | + GODEBUG=goindex=0 "./tools/bin/go_deployment_tests" ./... + # See: https://github.com/golang/go/issues/69179 + - name: Analyze and upload test results + if: ${{ !cancelled() }} + uses: smartcontractkit/.github/actions/branch-out-upload@branch-out-upload/v1 + with: + junit-file-path: "./junit.xml" + trunk-org-slug: chainlink + trunk-token: ${{ secrets.TRUNK_API_KEY }} + trunk-previous-step-outcome: ${{ steps.run-tests.outcome }} + trunk-upload-only: false + artifact-name: ${{ matrix.logs-artifact-name }} + - name: Print postgres logs + if: ${{ always() }} + uses: smartcontractkit/.github/actions/setup-postgres@setup-postgres/v1 + with: + print-logs: "true" + - name: Store logs artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v7 + with: + name: ${{ matrix.logs-artifact-name }} + path: | + ./deployment/coverage.txt + ./postgres_logs.txt + ./junit.xml + ./deployment/output.txt + ./deployment/output-short.txt + retention-days: 7 + + deployment-tests-result: + # Do not modify this job name, it is used by branch protection rules + name: Core Tests (go_core_deployment_tests) + if: ${{ always() && github.actor != 'dependabot[bot]' && needs.filter.outputs.should-run == 'true' }} + needs: [filter, deployment-tests, deployment-shard-consistency] + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + steps: + - name: Check deployment shard results + env: + EXPECTED_ROWS: ${{ env.DEPLOYMENT_TEST_SHARD_COUNT }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JOB_NAME_PREFIX: "Deployment Tests (go_deployment_tests shard " + run: | + gh api \ + --paginate \ + --jq '.jobs[] | {name, conclusion} | @json' \ + "/repos/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/jobs?per_page=100" \ + > jobs.jsonl + + python - <<'PY' + import json + import os + import sys + + prefix = os.environ["JOB_NAME_PREFIX"] + expected = int(os.environ["EXPECTED_ROWS"]) + rows = [] + with open("jobs.jsonl", encoding="utf-8") as f: + for line in f: + row = json.loads(line) + if row["name"].startswith(prefix): + rows.append(row) + + if len(rows) != expected: + print(f"Expected {expected} rows for {prefix}, got {len(rows)}") + sys.exit(1) + + conclusions = [row["conclusion"] for row in rows] + if any(conclusion not in {"success", "skipped"} for conclusion in conclusions): + print(f"Deployment shard group failed with conclusions: {conclusions}") + sys.exit(1) + print("All deployment shards succeeded.") + PY + + run-frequency: + name: Run frequency + outputs: + one-per-day-frequency: ${{ steps.check-time.outputs.one-per-day-frequency || 'false' }} + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Check time and set frequencies + id: check-time + shell: bash + run: | + if [ "$GITHUB_EVENT_NAME" != "schedule" ]; then + # Not a scheduled event - no frequencies to set. They default to false. + exit 0 + fi + + # Scheduled event, check current time for frequencies + current_hour=$(date +"%H") + + # Check if the current hour is 00 (one per day) + if [ "$current_hour" -eq "00" ]; then + echo "one-per-day-frequency=true" | tee -a "$GITHUB_OUTPUT" + fi diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go index 65ad2b4e28b..3af271e5ec0 100644 --- a/core/internal/testutils/testutils.go +++ b/core/internal/testutils/testutils.go @@ -86,17 +86,39 @@ func RandomizeName(n string) string { // DefaultWaitTimeout is the default wait timeout. If you have a *testing.T, use WaitTimeout instead. const DefaultWaitTimeout = 30 * time.Second -// WaitTimeout returns a timeout based on the test's Deadline, if available. +// deadlineRemainingBudget returns ~90% of time until the test deadline, or false if none. +func deadlineRemainingBudget(t *testing.T) (time.Duration, bool) { + if d, ok := t.Deadline(); ok { + return time.Until(d) * 9 / 10, true // 10% buffer for cleanup + } + return 0, false +} + +// WaitTimeout returns a timeout capped by the test's Deadline, if available. // Especially important to use in parallel tests, as their individual execution // can get paused for arbitrary amounts of time. +// +// When a deadline exists, it uses the full remaining budget (90% of time until the +// deadline), not [DefaultWaitTimeout], so long-running tests still get enough wall +// clock under package timeouts. func WaitTimeout(t *testing.T) time.Duration { - if d, ok := t.Deadline(); ok { - // 10% buffer for cleanup and scheduling delay - return time.Until(d) * 9 / 10 + if budget, ok := deadlineRemainingBudget(t); ok { + return budget } return DefaultWaitTimeout } +// WaitTimeoutCustom uses the requested duration when there is no test deadline. +// When the test has a deadline, it returns the lesser of the requested duration and +// the remaining budget (90% of time until deadline), so callers can ask for e.g. 5m +// without exceeding the test process deadline. +func WaitTimeoutCustom(t *testing.T, requested time.Duration) time.Duration { + if budget, ok := deadlineRemainingBudget(t); ok { + return min(budget, requested) + } + return requested +} + // Context returns a context with the test's deadline, if available. // Deprecated: use [testing.TB.Context] directly func Context(tb testing.TB) context.Context { diff --git a/core/internal/testutils/testutils_test.go b/core/internal/testutils/testutils_test.go new file mode 100644 index 00000000000..dbc006bc74c --- /dev/null +++ b/core/internal/testutils/testutils_test.go @@ -0,0 +1,38 @@ +package testutils + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestWaitTimeoutUsesDeadlineBudgetNotDefaultCap(t *testing.T) { + if d, ok := t.Deadline(); ok { + expectBefore := time.Until(d) * 9 / 10 + got := WaitTimeout(t) + expectAfter := time.Until(d) * 9 / 10 + require.Greater(t, got, time.Duration(0)) + // got uses time.Until(deadline) inside WaitTimeout between these snapshots + require.GreaterOrEqual(t, got, expectAfter) + require.LessOrEqual(t, got, expectBefore) + } else { + require.Equal(t, DefaultWaitTimeout, WaitTimeout(t)) + } +} + +func TestWaitTimeoutCustom(t *testing.T) { + requested := 10 * time.Second + + if d, ok := t.Deadline(); ok { + expectBefore := time.Until(d) * 9 / 10 + got := WaitTimeoutCustom(t, requested) + expectAfter := time.Until(d) * 9 / 10 + require.Greater(t, got, time.Duration(0)) + require.LessOrEqual(t, got, requested) + require.GreaterOrEqual(t, got, min(expectAfter, requested)) + require.LessOrEqual(t, got, min(expectBefore, requested)) + } else { + require.Equal(t, requested, WaitTimeoutCustom(t, requested)) + } +} diff --git a/core/services/vrf/v2/bhs_feeder_test.go b/core/services/vrf/v2/bhs_feeder_test.go index 3d504741ed5..c566d831189 100644 --- a/core/services/vrf/v2/bhs_feeder_test.go +++ b/core/services/vrf/v2/bhs_feeder_test.go @@ -1,6 +1,7 @@ package v2_test import ( + "math/big" "testing" "time" @@ -11,7 +12,6 @@ import ( "github.com/smartcontractkit/chainlink-evm/pkg/assets" "github.com/smartcontractkit/chainlink-evm/pkg/config/toml" - "github.com/smartcontractkit/chainlink-evm/pkg/types" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" @@ -27,27 +27,27 @@ func TestStartHeartbeats(t *testing.T) { vrfKey := cltest.MustGenerateRandomKey(t) sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) gasLanePriceWei := assets.GWei(1) - gasLimit := 3_000_000 + gasLimit := uint64(3_000_000) consumers := uni.vrfConsumers // generate n BHS keys to make sure BHS job rotates sending keys - var bhsKeyAddresses []string - var keySpecificOverrides []toml.KeySpecific - var keys []any + bhsKeyAddresses := make([]string, 0, len(consumers)) + keySpecificOverrides := make([]toml.KeySpecific, 0, len(consumers)+1) + keys := make([]any, 0, len(consumers)+2) for range consumers { bhsKey := cltest.MustGenerateRandomKey(t) bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address.String()) keys = append(keys, bhsKey) keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ - Key: ptr[types.EIP55Address](bhsKey.EIP55Address), + Key: new(bhsKey.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) } keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ // Gas lane. - Key: ptr[types.EIP55Address](vrfKey.EIP55Address), + Key: new(vrfKey.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) @@ -55,10 +55,10 @@ func TestStartHeartbeats(t *testing.T) { config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, gasLanePriceWei, keySpecificOverrides...)(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) - c.EVM[0].FinalityDepth = ptr[uint32](2) - c.EVM[0].GasEstimator.LimitDefault = ptr(uint64(gasLimit)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) + c.EVM[0].FinalityDepth = new(uint32(2)) + c.EVM[0].GasEstimator.LimitDefault = new(gasLimit) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(time.Second) }) @@ -86,11 +86,18 @@ func TestStartHeartbeats(t *testing.T) { diff := heartbeatPeriod + 1*time.Second t.Logf("Sleeping %.2f seconds before checking blockhash in BHS added by BHS_Heartbeats_Service\n", diff.Seconds()) time.Sleep(diff) - // storeEarliest in BHS contract stores blocktip - 256 in the Blockhash Store (BHS) - tipHeader, err := uni.backend.Client().HeaderByNumber(testutils.Context(t), nil) - require.NoError(t, err) - // the storeEarliest transaction will end up in a new block, hence the + 1 below. - blockNumberStored := tipHeader.Number.Uint64() - 256 + 1 - verifyBlockhashStored(t, uni.coordinatorV2UniverseCommon, blockNumberStored) + // The heartbeat store tx may not reach the mempool before the first + // Commit under load, so we can't predict which block it mines in. + // Commit blocks and check current_tip-256 on each attempt until BHS + // has a blockhash stored at that offset. + require.Eventually(t, func() bool { + uni.backend.Commit() + tip, tipErr := uni.backend.Client().HeaderByNumber(testutils.Context(t), nil) + if tipErr != nil || tip == nil || tip.Number.Uint64() < 256 { + return false + } + _, err := uni.bhsContract.GetBlockhash(nil, new(big.Int).SetUint64(tip.Number.Uint64()-256)) + return err == nil + }, testutils.WaitTimeoutCustom(t, 5*time.Minute), time.Second) }) } diff --git a/core/services/vrf/v2/integration_helpers_test.go b/core/services/vrf/v2/integration_helpers_test.go index 88c92cf47c2..141d2c53a16 100644 --- a/core/services/vrf/v2/integration_helpers_test.go +++ b/core/services/vrf/v2/integration_helpers_test.go @@ -68,15 +68,15 @@ func testSingleConsumerHappyPath( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr[types.EIP55Address](key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }, v2.KeySpecific{ // Gas lane. - Key: ptr[types.EIP55Address](key2.EIP55Address), + Key: new(key2.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) @@ -170,7 +170,7 @@ func testMultipleConsumersNeedBHS( uni coordinatorV2UniverseCommon, consumers []*bind.TransactOpts, consumerContracts []vrftesthelpers.VRFConsumerContract, - consumerContractAddresses []common.Address, + _ []common.Address, coordinator v22.CoordinatorV2_X, coordinatorAddress common.Address, batchCoordinatorAddress common.Address, @@ -180,7 +180,8 @@ func testMultipleConsumersNeedBHS( assertions ...func( t *testing.T, coordinator v22.CoordinatorV2_X, - rwfe v22.RandomWordsFulfilled), + rwfe v22.RandomWordsFulfilled, + ), ) { ctx := testutils.Context(t) nConsumers := len(consumers) @@ -188,32 +189,32 @@ func testMultipleConsumersNeedBHS( sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) // generate n BHS keys to make sure BHS job rotates sending keys - var bhsKeyAddresses []string - var keySpecificOverrides []v2.KeySpecific - var keys []any + bhsKeyAddresses := make([]string, 0, nConsumers) + keySpecificOverrides := make([]v2.KeySpecific, 0, nConsumers+1) + keys := make([]any, 0, nConsumers+2) gasLanePriceWei := assets.GWei(10) for range nConsumers { bhsKey := cltest.MustGenerateRandomKey(t) bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address.String()) keys = append(keys, bhsKey) keySpecificOverrides = append(keySpecificOverrides, v2.KeySpecific{ - Key: ptr(bhsKey.EIP55Address), + Key: new(bhsKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) } keySpecificOverrides = append(keySpecificOverrides, v2.KeySpecific{ // Gas lane. - Key: ptr(vrfKey.EIP55Address), + Key: new(vrfKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) - c.EVM[0].FinalityDepth = ptr[uint32](2) + c.EVM[0].FinalityDepth = new(uint32(2)) }) keys = append(keys, ownerKey, vrfKey) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, keys...) @@ -231,7 +232,8 @@ func testMultipleConsumersNeedBHS( vrfOwnerAddress, vrfVersion, false, - gasLanePriceWei) + gasLanePriceWei, + ) keyHash := vrfJobs[0].VRFSpec.PublicKey.MustHash() var ( @@ -251,7 +253,8 @@ func testMultipleConsumersNeedBHS( _ = vrftesthelpers.CreateAndStartBHSJob( t, bhsKeyAddresses, app, uni.bhsContractAddress.String(), "", - v2CoordinatorAddress, v2PlusCoordinatorAddress, "", 0, 200, 0, 100) + v2CoordinatorAddress, v2PlusCoordinatorAddress, "", 0, 200, 0, 100, + ) chain, ok := app.GetRelayers().LegacyEVMChains().Slice()[0].(legacyevm.Chain) require.True(t, ok) @@ -289,13 +292,12 @@ func testMultipleConsumersNeedBHS( topUpSubscription(t, consumer, consumerContract, uni.backend, big.NewInt(5e18 /* 5 LINK */), nativePayment) // Wait for fulfillment to be queued. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) - t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + require.NoError(c, err) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), time.Second) mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) @@ -315,7 +317,7 @@ func testMultipleConsumersNeedTrustedBHS( uni coordinatorV2PlusUniverse, consumers []*bind.TransactOpts, consumerContracts []vrftesthelpers.VRFConsumerContract, - consumerContractAddresses []common.Address, + _ []common.Address, coordinator v22.CoordinatorV2_X, coordinatorAddress common.Address, batchCoordinatorAddress common.Address, @@ -325,7 +327,8 @@ func testMultipleConsumersNeedTrustedBHS( assertions ...func( t *testing.T, coordinator v22.CoordinatorV2_X, - rwfe v22.RandomWordsFulfilled), + rwfe v22.RandomWordsFulfilled, + ), ) { ctx := testutils.Context(t) nConsumers := len(consumers) @@ -333,10 +336,10 @@ func testMultipleConsumersNeedTrustedBHS( sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) // generate n BHS keys to make sure BHS job rotates sending keys - var bhsKeyAddresses []common.Address - var bhsKeyAddressesStrings []string - var keySpecificOverrides []v2.KeySpecific - var keys []any + bhsKeyAddresses := make([]common.Address, 0, nConsumers) + bhsKeyAddressesStrings := make([]string, 0, nConsumers) + keySpecificOverrides := make([]v2.KeySpecific, 0, nConsumers+1) + keys := make([]any, 0, nConsumers+2) gasLanePriceWei := assets.GWei(10) for range nConsumers { bhsKey := cltest.MustGenerateRandomKey(t) @@ -344,14 +347,14 @@ func testMultipleConsumersNeedTrustedBHS( bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address) keys = append(keys, bhsKey) keySpecificOverrides = append(keySpecificOverrides, v2.KeySpecific{ - Key: ptr(bhsKey.EIP55Address), + Key: new(bhsKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) } keySpecificOverrides = append(keySpecificOverrides, v2.KeySpecific{ // Gas lane. - Key: ptr(vrfKey.EIP55Address), + Key: new(vrfKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }) @@ -364,11 +367,11 @@ func testMultipleConsumersNeedTrustedBHS( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.EVM[0].GasEstimator.LimitDefault = ptr(uint64(5_000_000)) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(5_000_000)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) - c.EVM[0].FinalityDepth = ptr[uint32](2) + c.EVM[0].FinalityDepth = new(uint32(2)) }) keys = append(keys, ownerKey, vrfKey) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, keys...) @@ -482,7 +485,7 @@ func verifyBlockhashStored( requestBlock uint64, ) { // Wait for the blockhash to be stored - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.Eventually(t, func() bool { uni.backend.Commit() callOpts := &bind.CallOpts{ Pending: false, @@ -490,15 +493,16 @@ func verifyBlockhashStored( BlockNumber: nil, Context: nil, } - _, err := uni.bhsContract.GetBlockhash(callOpts, big.NewInt(int64(requestBlock))) + _, err := uni.bhsContract.GetBlockhash(callOpts, new(big.Int).SetUint64(requestBlock)) if err == nil { return true - } else if strings.Contains(err.Error(), "execution reverted") { + } + if strings.Contains(err.Error(), "execution reverted") { return false } - t.Fatal(err) + require.FailNowf(t, "GetBlockhash: %v", err.Error()) return false - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + }, testutils.WaitTimeoutCustom(t, 5*time.Minute), time.Second) } func verifyBlockhashStoredTrusted( @@ -507,7 +511,7 @@ func verifyBlockhashStoredTrusted( requestBlock uint64, ) { // Wait for the blockhash to be stored - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.Eventually(t, func() bool { uni.backend.Commit() callOpts := &bind.CallOpts{ Pending: false, @@ -515,15 +519,16 @@ func verifyBlockhashStoredTrusted( BlockNumber: nil, Context: nil, } - _, err := uni.trustedBhsContract.GetBlockhash(callOpts, big.NewInt(int64(requestBlock))) + _, err := uni.trustedBhsContract.GetBlockhash(callOpts, new(big.Int).SetUint64(requestBlock)) if err == nil { return true - } else if strings.Contains(err.Error(), "execution reverted") { + } + if strings.Contains(err.Error(), "execution reverted") { return false } - t.Fatal(err) + require.FailNowf(t, "GetBlockhash (trusted BHS): %v", err.Error()) return false - }, time.Second*300, time.Second).Should(gomega.BeTrue()) + }, testutils.WaitTimeoutCustom(t, 5*time.Minute), time.Second) } func testSingleConsumerHappyPathBatchFulfillment( @@ -553,13 +558,13 @@ func testSingleConsumerHappyPathBatchFulfillment( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](5_000_000) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(5_000_000)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) c.EVM[0].ChainID = (*sqlutil.Big)(testutils.SimulatedChainID) - c.Feature.LogPoller = ptr(true) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) @@ -660,11 +665,11 @@ func testSingleConsumerNeedsTopUp( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(1000), v2.KeySpecific{ // Gas lane. - Key: ptr(key.EIP55Address), + Key: new(key.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key) @@ -740,7 +745,7 @@ func testBlockHeaderFeeder( uni coordinatorV2UniverseCommon, consumers []*bind.TransactOpts, consumerContracts []vrftesthelpers.VRFConsumerContract, - consumerContractAddresses []common.Address, + _ []common.Address, coordinator v22.CoordinatorV2_X, coordinatorAddress common.Address, batchCoordinatorAddress common.Address, @@ -767,13 +772,13 @@ func testBlockHeaderFeeder( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, gasLanePriceWei, v2.KeySpecific{ // Gas lane. - Key: ptr(vrfKey.EIP55Address), + Key: new(vrfKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) - c.EVM[0].FinalityDepth = ptr[uint32](2) + c.EVM[0].FinalityDepth = new(uint32(2)) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, vrfKey, bhfKey) require.NoError(t, app.Start(ctx)) @@ -838,13 +843,13 @@ func testBlockHeaderFeeder( topUpSubscription(t, consumer, consumerContract, uni.backend, big.NewInt(5e18), nativePayment) // Wait for fulfillment to be queued. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) + require.NoError(c, err) t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), time.Second) mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) @@ -858,20 +863,28 @@ func testBlockHeaderFeeder( } } -func createSubscriptionAndGetSubscriptionCreatedEvent( +func createSubscriptionAndGetSubID( t *testing.T, subOwner *bind.TransactOpts, coordinator v22.CoordinatorV2_X, backend types.Backend, -) v22.SubscriptionCreated { - _, err := coordinator.CreateSubscription(subOwner) +) *big.Int { + tx, err := coordinator.CreateSubscription(subOwner) require.NoError(t, err) backend.Commit() - iter, err := coordinator.FilterSubscriptionCreated(nil, nil) + receipt, err := backend.Client().TransactionReceipt(testutils.Context(t), tx.Hash()) require.NoError(t, err) - require.True(t, iter.Next(), "could not find SubscriptionCreated event for subID") - return iter.Event() + require.Equal(t, uint64(1), receipt.Status) + for _, log := range receipt.Logs { + if log.Address != coordinator.Address() { + continue + } + // SubscriptionCreated(uint64 indexed subId, address owner): Topics[1] = subId + return new(big.Int).SetBytes(log.Topics[1].Bytes()) + } + require.FailNow(t, "no SubscriptionCreated log from coordinator in CreateSubscription receipt") + return nil } func setupAndFundSubscriptionAndConsumer( @@ -884,8 +897,7 @@ func setupAndFundSubscriptionAndConsumer( vrfVersion vrfcommon.Version, fundingAmount *big.Int, ) (subID *big.Int) { - event := createSubscriptionAndGetSubscriptionCreatedEvent(t, subOwner, coordinator, uni.backend) - subID = event.SubID() + subID = createSubscriptionAndGetSubID(t, subOwner, coordinator, uni.backend) _, err := coordinator.AddConsumer(subOwner, subID, consumerAddress) require.NoError(t, err, "failed to add consumer") @@ -926,15 +938,15 @@ func testSingleConsumerForcedFulfillment( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }, v2.KeySpecific{ // Gas lane. - Key: ptr(key2.EIP55Address), + Key: new(key2.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) @@ -982,7 +994,7 @@ func testSingleConsumerForcedFulfillment( coordinatorAddress, batchCoordinatorAddress, uni.coordinatorV2UniverseCommon, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfVersion, batchEnabled, gasLanePriceWei) @@ -1039,16 +1051,20 @@ func testSingleConsumerForcedFulfillment( commitment, err2 := uni.oldRootContract.GetCommitment(nil, requestID) require.NoError(t, err2) t.Log("commitment is:", hexutil.Encode(commitment[:])) + // LogPoller may not have indexed the latest block yet; skip the filter + // check rather than crashing — the commitment check below is the real + // predicate and the filter will succeed on a later iteration. it, err2 := uni.vrfOwner.FilterRandomWordsForced(nil, []*big.Int{requestID}, []uint64{subID.Uint64()}, []common.Address{eoaConsumerAddr}) - require.NoError(t, err2) - i := 0 - for it.Next() { - i++ - require.Equal(t, requestID.String(), it.Event.RequestId.String()) - require.Equal(t, subID.Uint64(), it.Event.SubId) - require.Equal(t, eoaConsumerAddr.String(), it.Event.Sender.String()) + if err2 == nil { + i := 0 + for it.Next() { + i++ + require.Equal(t, requestID.String(), it.Event.RequestId.String()) + require.Equal(t, subID.Uint64(), it.Event.SubId) + require.Equal(t, eoaConsumerAddr.String(), it.Event.Sender.String()) + } + t.Log("num RandomWordsForced logs:", i) } - t.Log("num RandomWordsForced logs:", i) return utils.IsEmpty(commitment[:]) }, testutils.WaitTimeout(t), time.Second) @@ -1082,7 +1098,7 @@ func testSingleConsumerEIP150( ownerKey ethkey.KeyV2, uni coordinatorV2UniverseCommon, batchCoordinatorAddress common.Address, - batchEnabled bool, + _ bool, vrfVersion vrfcommon.Version, nativePayment bool, ) { @@ -1094,12 +1110,12 @@ func testSingleConsumerEIP150( config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr(uint64(3.5e6)) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(3.5e6)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) @@ -1134,13 +1150,13 @@ func testSingleConsumerEIP150( requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uint32(callBackGasLimit), uni.rootContract, uni.backend, nativePayment) // Wait for simulation to pass. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) + require.NoError(c, err) t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), time.Second) t.Log("Done!") } @@ -1150,14 +1166,14 @@ func testSingleConsumerEIP150Revert( ownerKey ethkey.KeyV2, uni coordinatorV2UniverseCommon, batchCoordinatorAddress common.Address, - batchEnabled bool, + _ bool, vrfVersion vrfcommon.Version, nativePayment bool, ) { ctx := testutils.Context(t) - callBackGasLimit := int64(2_500_000) // base callback gas. - eip150Fee := int64(0) // no premium given for callWithExactGas - coordinatorFulfillmentOverhead := int64(90_000) // fixed gas used in coordinator fulfillment + callBackGasLimit := uint64(2_500_000) // base callback gas. + eip150Fee := uint64(0) // no premium given for callWithExactGas + coordinatorFulfillmentOverhead := uint64(90_000) // fixed gas used in coordinator fulfillment gasLimit := callBackGasLimit + eip150Fee + coordinatorFulfillmentOverhead key1 := cltest.MustGenerateRandomKey(t) @@ -1165,12 +1181,12 @@ func testSingleConsumerEIP150Revert( config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr(uint64(gasLimit)) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.LimitDefault = new(gasLimit) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) @@ -1230,12 +1246,12 @@ func testSingleConsumerBigGasCallbackSandwich( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(100), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](5_000_000) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(5_000_000)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) @@ -1266,8 +1282,8 @@ func testSingleConsumerBigGasCallbackSandwich( // Make some randomness requests, each one block apart, which contain a single low-gas request sandwiched between two high-gas requests. numWords := uint32(2) - reqIDs := []*big.Int{} callbackGasLimits := []uint32{2_500_000, 50_000, 1_500_000} + reqIDs := make([]*big.Int, 0, len(callbackGasLimits)) for _, limit := range callbackGasLimits { requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, limit, uni.rootContract, uni.backend, nativePayment) reqIDs = append(reqIDs, requestID) @@ -1283,13 +1299,12 @@ func testSingleConsumerBigGasCallbackSandwich( } // Wait for the 50_000 gas randomness request to be enqueued. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) - t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + require.NoError(c, err) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), time.Second) // After the first successful request, no more will be enqueued. gomega.NewGomegaWithT(t).Consistently(func() bool { @@ -1327,7 +1342,7 @@ func testSingleConsumerBigGasCallbackSandwich( require.NoError(t, err) t.Log("assert 1", "runs", len(runs)) return len(runs) == 1 - }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + }, testutils.WaitTimeoutCustom(t, 5*time.Second), 1*time.Second).Should(gomega.BeTrue()) t.Log("Done!") } @@ -1348,16 +1363,16 @@ func testSingleConsumerMultipleGasLanes( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Cheap gas lane. - Key: ptr(cheapKey.EIP55Address), + Key: new(cheapKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: cheapGasLane}, }, v2.KeySpecific{ // Expensive gas lane. - Key: ptr(expensiveKey.EIP55Address), + Key: new(expensiveKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: expensiveGasLane}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](5_000_000) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(5_000_000)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) @@ -1461,7 +1476,7 @@ func testSingleConsumerAlwaysRevertingCallbackStillFulfilled( ownerKey ethkey.KeyV2, uni coordinatorV2UniverseCommon, batchCoordinatorAddress common.Address, - batchEnabled bool, + _ bool, vrfVersion vrfcommon.Version, nativePayment bool, ) { @@ -1471,11 +1486,11 @@ func testSingleConsumerAlwaysRevertingCallbackStillFulfilled( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key.EIP55Address), + Key: new(key.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key) @@ -1510,20 +1525,18 @@ func testSingleConsumerAlwaysRevertingCallbackStillFulfilled( requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni.rootContract, uni.backend, nativePayment) // Wait for fulfillment to be queued. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) - t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + require.NoError(c, err) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), 1*time.Second) // Mine the fulfillment that was queued. mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) // Assert correct state of RandomWordsFulfilled event. assertRandomWordsFulfilled(t, requestID, false, uni.rootContract, nativePayment) - t.Log("Done!") } func testConsumerProxyHappyPath( @@ -1541,14 +1554,14 @@ func testConsumerProxyHappyPath( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }, v2.KeySpecific{ - Key: ptr(key2.EIP55Address), + Key: new(key2.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) @@ -1634,8 +1647,6 @@ func testConsumerProxyHappyPath( n, err = uni.backend.Client().PendingNonceAt(ctx, key2.Address) require.NoError(t, err) require.EqualValues(t, 1, n) - - t.Log("Done!") } func testConsumerProxyCoordinatorZeroAddress( @@ -1670,12 +1681,12 @@ func testMaliciousConsumer( ) { ctx := testutils.Context(t) config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](2_000_000) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(2_000_000)) c.EVM[0].GasEstimator.PriceMax = assets.GWei(1) c.EVM[0].GasEstimator.PriceDefault = assets.GWei(1) c.EVM[0].GasEstimator.FeeCapDefault = assets.GWei(1) c.EVM[0].ChainID = (*sqlutil.Big)(testutils.SimulatedChainID) - c.Feature.LogPoller = ptr(true) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) carol := uni.vrfConsumers[0] @@ -1747,24 +1758,19 @@ func testMaliciousConsumer( require.NoError(t, err) require.Equal(t, uint64(1), r.Status) - // The user callback should have errored - it, err := uni.rootContract.FilterRandomWordsFulfilled(nil, nil, nil) - require.NoError(t, err) - var fulfillments []v22.RandomWordsFulfilled - for it.Next() { - fulfillments = append(fulfillments, it.Event()) - } - require.Len(t, fulfillments, 1) - require.False(t, fulfillments[0].Success()) - - // It should not have succeeded in placing another request. - it2, err2 := uni.rootContract.FilterRandomWordsRequested(nil, nil, nil, nil) - require.NoError(t, err2) - var requests []v22.RandomWordsRequested - for it2.Next() { - requests = append(requests, it2.Event()) + // The user callback should have errored; parse the fulfillment event directly from the receipt. + var fulfillment v22.RandomWordsFulfilled + for _, log := range r.Logs { + event, parseErr := uni.rootContract.ParseRandomWordsFulfilled(*log) + if parseErr == nil { + fulfillment = event + break + } } - require.Len(t, requests, 1) + require.NotNil(t, fulfillment, "no RandomWordsFulfilled event in fulfillment receipt") + require.False(t, fulfillment.Success()) + // A reverted callback reverts all state and events within it, including any re-entrant + // requestRandomness call, so there is no separate check needed for request count. } func testReplayOldRequestsOnStartUp( @@ -1784,7 +1790,8 @@ func testReplayOldRequestsOnStartUp( t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, - subID *big.Int), + subID *big.Int, + ), ) { ctx := testutils.Context(t) sendingKey := cltest.MustGenerateRandomKey(t) @@ -1792,11 +1799,11 @@ func testReplayOldRequestsOnStartUp( config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(sendingKey.EIP55Address), + Key: new(sendingKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, sendingKey) @@ -1833,11 +1840,11 @@ func testReplayOldRequestsOnStartUp( config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ // Gas lane. - Key: ptr(sendingKey.EIP55Address), + Key: new(sendingKey.EIP55Address), GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) @@ -1889,13 +1896,12 @@ func testReplayOldRequestsOnStartUp( }, testutils.WaitTimeout(t), 100*time.Millisecond) // Wait for fulfillment to be queued. - gomega.NewGomegaWithT(t).Eventually(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns(ctx) - require.NoError(t, err) - t.Log("runs", len(runs)) - return len(runs) == 1 - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + require.NoError(c, err) + require.Len(c, runs, 1) + }, testutils.WaitTimeout(t), time.Second) // Mine the fulfillment that was queued. mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) @@ -1905,7 +1911,7 @@ func testReplayOldRequestsOnStartUp( // * success should be true // * payment should be exactly the amount specified as the premium in the coordinator fee config rwfe := assertRandomWordsFulfilled(t, requestID1, true, coordinator, nativePayment) - if len(assertions) > 0 { - assertions[0](t, coordinator, rwfe, subID) + for _, assertion := range assertions { + assertion(t, coordinator, rwfe, subID) } } diff --git a/core/services/vrf/v2/integration_v2_plus_test.go b/core/services/vrf/v2/integration_v2_plus_test.go index a7ade37a8fe..a5f898c1692 100644 --- a/core/services/vrf/v2/integration_v2_plus_test.go +++ b/core/services/vrf/v2/integration_v2_plus_test.go @@ -82,7 +82,7 @@ func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumer reverter = evmtestutils.MustNewSimTransactor(t) submanager = evmtestutils.MustNewSimTransactor(t) nallory = oracleTransactor - vrfConsumers []*bind.TransactOpts + vrfConsumers = make([]*bind.TransactOpts, 0, numConsumers) ) // Create consumer contract deployer identities @@ -189,8 +189,8 @@ func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumer // Create the VRF consumers. var ( - consumerContracts []vrftesthelpers.VRFConsumerContract - consumerContractAddresses []common.Address + consumerContracts = make([]vrftesthelpers.VRFConsumerContract, 0, len(vrfConsumers)) + consumerContractAddresses = make([]common.Address, 0, len(vrfConsumers)) ) for _, author := range vrfConsumers { // Deploy a VRF consumer. It has a starting balance of 500 LINK. @@ -893,7 +893,7 @@ func TestVRFV2PlusIntegration_RequestCost(t *testing.T) { big.NewInt(2000000000000000000)) // 0.2 ETH uni.backend.Commit() // Ensure even with large number of consumers its still cheap - var addrs []common.Address + addrs := make([]common.Address, 0, 99) for range 99 { addrs = append(addrs, testutils.NewAddress()) } @@ -926,15 +926,15 @@ func TestVRFV2PlusIntegration_RequestCost(t *testing.T) { require.NoError(tt, err) t.Log("gas used by proxied CreateSubscriptionAndFund:", r.GasUsed) - subId, err := consumerContract.SSubId(nil) + subID, err := consumerContract.SSubId(nil) require.NoError(tt, err) - _, err = uni.rootContract.GetSubscription(nil, subId) + _, err = uni.rootContract.GetSubscription(nil, subID) require.NoError(tt, err) theAbi := evmtypes.MustGetABI(vrf_consumer_v2_plus_upgradeable_example.VRFConsumerV2PlusUpgradeableExampleMetaData.ABI) estimate := estimateGas(tt, uni.backend, common.Address{}, consumerContractAddress, &theAbi, - "requestRandomness", vrfkey.PublicKey.MustHash(), subId, uint16(2), uint32(10000), uint32(1)) + "requestRandomness", vrfkey.PublicKey.MustHash(), subID, uint16(2), uint32(10000), uint32(1)) tt.Log("gas estimate of proxied requestRandomness:", estimate) // There is some gas overhead of the delegatecall that is made by the proxy // to the logic contract. See https://www.evm.codes/#f4?fork=grayGlacier for a detailed @@ -958,9 +958,9 @@ func TestVRFV2PlusIntegration_MaxConsumersCost(t *testing.T) { big.NewInt(1000000000000000000)) // 0.1 LINK require.NoError(t, err) uni.backend.Commit() - subId, err := carolContract.SSubId(nil) + subID, err := carolContract.SSubId(nil) require.NoError(t, err) - var addrs []common.Address + addrs := make([]common.Address, 0, 98) for range 98 { addrs = append(addrs, testutils.NewAddress()) } @@ -969,12 +969,12 @@ func TestVRFV2PlusIntegration_MaxConsumersCost(t *testing.T) { require.NoError(t, err) estimate := estimateGas(t, uni.backend, carolContractAddress, uni.rootContractAddress, uni.coordinatorABI, - "removeConsumer", subId, carolContractAddress) + "removeConsumer", subID, carolContractAddress) t.Log(estimate) assert.Less(t, estimate, uint64(540000)) estimate = estimateGas(t, uni.backend, carolContractAddress, uni.rootContractAddress, uni.coordinatorABI, - "addConsumer", subId, testutils.NewAddress()) + "addConsumer", subID, testutils.NewAddress()) t.Log(estimate) assert.Less(t, estimate, uint64(100000)) } @@ -1051,18 +1051,18 @@ func TestVRFV2PlusIntegration_FulfillmentCost(t *testing.T) { _, err2 = carolContract.TopUpSubscriptionNative(carol, big.NewInt(2000000000000000000)) // 0.2 ETH require.NoError(tt, err2) - gasRequested := 50_000 - nw := 1 - requestedIncomingConfs := 3 + gasRequested := uint32(50_000) + nw := uint32(1) + requestedIncomingConfs := uint16(3) t.Run("native payment", func(tt *testing.T) { requestAndEstimateFulfillmentCost( t, subID, carol, vrfkey, - uint16(requestedIncomingConfs), - uint32(gasRequested), - uint32(nw), + requestedIncomingConfs, + gasRequested, + nw, carolContract, carolContractAddress, uni.coordinatorV2UniverseCommon, @@ -1079,9 +1079,9 @@ func TestVRFV2PlusIntegration_FulfillmentCost(t *testing.T) { subID, carol, vrfkey, - uint16(requestedIncomingConfs), - uint32(gasRequested), - uint32(nw), + requestedIncomingConfs, + gasRequested, + nw, carolContract, carolContractAddress, uni.coordinatorV2UniverseCommon, @@ -1103,17 +1103,17 @@ func TestVRFV2PlusIntegration_FulfillmentCost(t *testing.T) { uni.backend.Commit() subID, err2 := consumerContract.SSubId(nil) require.NoError(t, err2) - gasRequested := 50_000 - nw := 1 - requestedIncomingConfs := 3 + gasRequested := uint32(50_000) + nw := uint32(1) + requestedIncomingConfs := uint16(3) requestAndEstimateFulfillmentCost( t, subID, consumerOwner, vrfkey, - uint16(requestedIncomingConfs), - uint32(gasRequested), - uint32(nw), + requestedIncomingConfs, + gasRequested, + nw, consumerContract, consumerContractAddress, uni.coordinatorV2UniverseCommon, @@ -1133,14 +1133,23 @@ func setupSubscriptionAndFund( consumerAddress common.Address, linkAmount *big.Int, nativeAmount *big.Int) *big.Int { - _, err := uni.rootContract.CreateSubscription(consumer) + tx, err := uni.rootContract.CreateSubscription(consumer) require.NoError(t, err) uni.backend.Commit() - iter, err := uni.rootContract.FilterSubscriptionCreated(nil, nil) + receipt, err := uni.backend.Client().TransactionReceipt(testutils.Context(t), tx.Hash()) require.NoError(t, err) - require.True(t, iter.Next(), "could not find SubscriptionCreated event for subID") - subID := iter.Event().SubID() + require.Equal(t, uint64(1), receipt.Status) + var subID *big.Int + for _, log := range receipt.Logs { + if log.Address != uni.rootContractAddress { + continue + } + // SubscriptionCreated(uint64 indexed subId, address owner): Topics[1] = subId + subID = new(big.Int).SetBytes(log.Topics[1].Bytes()) + break + } + require.NotNil(t, subID, "no SubscriptionCreated log from coordinator in CreateSubscription receipt") _, err = consumerContract.SetSubID(consumer, subID) require.NoError(t, err) @@ -1173,12 +1182,12 @@ func TestVRFV2PlusIntegration_Migration(t *testing.T) { config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](5_000_000) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(5_000_000)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) diff --git a/core/services/vrf/v2/integration_v2_reverted_txns_test.go b/core/services/vrf/v2/integration_v2_reverted_txns_test.go index 8d40397bca7..dbc21f41711 100644 --- a/core/services/vrf/v2/integration_v2_reverted_txns_test.go +++ b/core/services/vrf/v2/integration_v2_reverted_txns_test.go @@ -95,7 +95,7 @@ func TestVRFV2Integration_ForceFulfillmentRevertedTxn_Retry(t *testing.T) { // Make VRF request without sufficient balance and send fulfillment without simulation req := makeVRFReq(t, th, th.subs[0]) - req = fulfillVRFReq(t, th, req, th.subs[0], true, ptr(uint64(7))) + req = fulfillVRFReq(t, th, req, th.subs[0], true, new(uint64(7))) waitForForceFulfillment(t, th, req, th.subs[0], true, 2) @@ -174,10 +174,9 @@ func TestUniqueReqById_WithPendingReceipts(t *testing.T) { {RequestID: common.BigToHash(big.NewInt(2)).Hex(), ForceFulfillmentAttempt: 4, EVMReceipt: types.Receipt{Status: 0}}, } - allForceTxns := []v2.TxnReceiptDB{} + allForceTxns := make([]v2.TxnReceiptDB, 0, len(revertedForceTxns)+1) allForceTxns = append(allForceTxns, revertedForceTxns...) - allForceTxns = append(allForceTxns, v2.TxnReceiptDB{RequestID: common.BigToHash(big.NewInt(2)).Hex(), - ForceFulfillmentAttempt: 5}) + allForceTxns = append(allForceTxns, v2.TxnReceiptDB{RequestID: common.BigToHash(big.NewInt(2)).Hex(), ForceFulfillmentAttempt: 5}) res := v2.UniqueByReqID(revertedForceTxns, allForceTxns) require.Len(t, res, 1) for _, r := range res { @@ -188,17 +187,20 @@ func TestUniqueReqById_WithPendingReceipts(t *testing.T) { } // Wait till force fulfillment event fired for the req passed in, till go test timeout -func waitForForceFulfillment(t *testing.T, +func waitForForceFulfillment( + t *testing.T, th *revertTxnTH, req *vrfReq, sub *vrfSub, success bool, - forceFulfilledCount int64) { + forceFulfilledCount int64, +) { uni := th.uni coordinator := th.uni.rootContract requestID := req.requestID // Wait for force-fulfillment to be queued. + // Use a longer timeout: the retry cycle (multiple reverts before success) can exceed DefaultWaitTimeout under parallel load. require.Eventually(t, func() bool { uni.backend.Commit() commitment, err := coordinator.GetCommitment(nil, requestID) @@ -229,7 +231,7 @@ func checkForForceFulfilledEvent(t *testing.T, sub *vrfSub, numForcedLogs int) { requestID := req.requestID - it, err := th.uni.vrfOwnerNew.FilterRandomWordsForced(nil, []*big.Int{requestID}, + it, err := th.uni.vrfOwnerNew.FilterRandomWordsForced(indexedFilterOpts(t, th.uni.backend), []*big.Int{requestID}, []uint64{sub.subID}, []common.Address{th.eoaConsumerAddr}) require.NoError(t, err) i := 0 @@ -287,13 +289,13 @@ func fulfillVRFReq(t *testing.T, require.True(t, ok) metadata := &txmgr.TxMeta{ - RequestID: ptr(common.BytesToHash(req.requestID.Bytes())), + RequestID: new(common.BytesToHash(req.requestID.Bytes())), SubID: &sub.subID, RequestTxHash: req.requestTxHash, // No max link since simulation failed } if forceFulfill { - metadata.ForceFulfilled = ptr(true) + metadata.ForceFulfilled = new(true) if forceFulfilmentAttempt != nil { metadata.ForceFulfillmentAttempt = forceFulfilmentAttempt } @@ -310,7 +312,7 @@ func fulfillVRFReq(t *testing.T, ec.Commit() // wait for above tx to mine (reach state confirmed) - mine(t, req.requestID, big.NewInt(int64(sub.subID)), th.uni.backend, th.db, vrfcommon.V2, th.chainID) + mine(t, req.requestID, new(big.Int).SetUint64(sub.subID), th.uni.backend, th.db, vrfcommon.V2, th.chainID) receipts, err := getTxnReceiptDB(th.db, etx.ID) require.NoError(t, err) @@ -324,12 +326,13 @@ func fulfillVRFReq(t *testing.T, func fulfilBatchVRFReq(t *testing.T, th *revertTxnTH, reqs []*vrfReq, - sub *vrfSub) { - proofs := make([]vrf_coordinator_v2.VRFProof, 0) - reqCommitments := make([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, 0) - requestIDs := make([]common.Hash, 0) - requestIDInts := make([]*big.Int, 0) - requestTxnHashes := make([]common.Hash, 0) + sub *vrfSub, +) { + proofs := make([]vrf_coordinator_v2.VRFProof, 0, len(reqs)) + reqCommitments := make([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, 0, len(reqs)) + requestIDs := make([]common.Hash, 0, len(reqs)) + requestIDInts := make([]*big.Int, 0, len(reqs)) + requestTxnHashes := make([]common.Hash, 0, len(reqs)) // Generate VRF proof and commitment for i, req := range reqs { reqUpdated := genReqProofNCommitment(t, th, *req, sub) @@ -372,7 +375,7 @@ func fulfilBatchVRFReq(t *testing.T, ec.Commit() // wait for above tx to mine (reach state confirmed) - mineBatch(t, requestIDInts, big.NewInt(int64(sub.subID)), th.uni.backend, th.db, vrfcommon.V2, chainID) + mineBatch(t, requestIDInts, new(big.Int).SetUint64(sub.subID), th.uni.backend, th.db, vrfcommon.V2, chainID) receipts, err := getTxnReceiptDB(th.db, etx.ID) require.NoError(t, err) @@ -418,16 +421,15 @@ func createVRFJobsNew( gasLanePrices ...*assets.Wei, ) (jobs []job.Job, vrfKeyIDs []string) { ctx := testutils.Context(t) - if len(gasLanePrices) != len(fromKeys) { - t.Fatalf("must provide one gas lane price for each set of from addresses. len(gasLanePrices) != len(fromKeys) [%d != %d]", - len(gasLanePrices), len(fromKeys)) - } + require.Len(t, gasLanePrices, len(fromKeys), "must provide one gas lane price for each set of from addresses, got %d for %d sets", len(gasLanePrices), len(fromKeys)) // Create separate jobs for each gas lane and register their keys for i, keys := range fromKeys { - var keyStrs []string + keyStrs := make([]string, 0, len(keys)) for _, k := range keys { keyStrs = append(keyStrs, k.Address.String()) } + //nolint:gosec // we already checked the length of gasLanePrices above + gasLanePrice := gasLanePrices[i] vrfkey, err := app.GetKeyStore().VRF().Create(ctx) require.NoError(t, err) @@ -446,7 +448,7 @@ func createVRFJobsNew( BackoffInitialDelay: 10 * time.Millisecond, BackoffMaxDelay: time.Second, V2: true, - GasLanePrice: gasLanePrices[i], + GasLanePrice: gasLanePrice, VRFOwnerAddress: uni.vrfOwnerAddressNew.Hex(), CustomRevertsPipelineEnabled: true, EVMChainID: chainID.String(), @@ -456,7 +458,7 @@ func createVRFJobsNew( require.NoError(t, err) err = app.JobSpawner().CreateJob(ctx, nil, &jb) require.NoError(t, err) - registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, coordinator, vrfkey, ptr(gasLanePrices[i].ToInt().Uint64())) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, coordinator, vrfkey, new(gasLanePrice.ToInt().Uint64())) jobs = append(jobs, jb) vrfKeyIDs = append(vrfKeyIDs, vrfkey.ID()) } @@ -575,14 +577,14 @@ func newRevertTxnTH(t *testing.T, config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, }, toml.KeySpecific{ // Gas lane. - Key: ptr(key2.EIP55Address), + Key: new(key2.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) @@ -678,12 +680,13 @@ func setupSub(t *testing.T, th *revertTxnTH, subID uint64, balance uint64) { b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, subID) require.NoError(t, err) _, err = uni.linkContract.TransferAndCall( - uni.sergey, coordinatorAddress, big.NewInt(int64(balance)), b) + uni.sergey, coordinatorAddress, new(big.Int).SetUint64(balance), b, + ) require.NoError(t, err, "failed to fund sub") uni.backend.Commit() // Add the consumer to the sub - subIDBig := big.NewInt(int64(subID)) + subIDBig := new(big.Int).SetUint64(subID) _, err = coordinator.AddConsumer(uni.neil, subIDBig, th.eoaConsumerAddr) require.NoError(t, err, "failed to add consumer") uni.backend.Commit() @@ -692,7 +695,7 @@ func setupSub(t *testing.T, th *revertTxnTH, subID uint64, balance uint64) { sub, err := coordinator.GetSubscription(nil, subIDBig) consumers := sub.Consumers() require.NoError(t, err, "failed to get subscription with id %d", subID) - require.Equal(t, big.NewInt(int64(balance)), sub.Balance()) + require.Equal(t, new(big.Int).SetUint64(balance), sub.Balance()) require.Len(t, consumers, 1) require.Equal(t, th.eoaConsumerAddr, consumers[0]) require.Equal(t, uni.neil.From, sub.Owner()) diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go index 06dbf2a1c1e..d2a0410227d 100644 --- a/core/services/vrf/v2/integration_v2_test.go +++ b/core/services/vrf/v2/integration_v2_test.go @@ -28,8 +28,6 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/quarantine" - commonkeystore "github.com/smartcontractkit/chainlink-common/keystore" commonassets "github.com/smartcontractkit/chainlink-common/pkg/assets" commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" @@ -178,7 +176,7 @@ func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers in evil = evmtestutils.MustNewSimTransactor(t) reverter = evmtestutils.MustNewSimTransactor(t) nallory = oracleTransactor - vrfConsumers []*bind.TransactOpts + vrfConsumers = make([]*bind.TransactOpts, 0, numConsumers) ) // Create consumer contract deployer identities @@ -286,14 +284,14 @@ func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers in // Create the VRF consumers. var ( - consumerContracts []vrftesthelpers.VRFConsumerContract - consumerContractAddresses []common.Address + consumerContracts = make([]vrftesthelpers.VRFConsumerContract, 0, len(vrfConsumers)) + consumerContractAddresses = make([]common.Address, 0, len(vrfConsumers)) ) for _, author := range vrfConsumers { // Deploy a VRF consumer. It has a starting balance of 500 LINK. - consumerContractAddress, _, consumerContract, err2 := - vrf_consumer_v2.DeployVRFConsumerV2( - author, backend.Client(), coordinatorAddress, linkAddress) + consumerContractAddress, _, consumerContract, err2 := vrf_consumer_v2.DeployVRFConsumerV2( + author, backend.Client(), coordinatorAddress, linkAddress, + ) require.NoError(t, err2, "failed to deploy VRFConsumer contract to simulated ethereum blockchain") backend.Commit() _, err2 = linkContract.Transfer(sergey, consumerContractAddress, assets.Ether(500).ToInt()) // Actually, LINK @@ -571,17 +569,17 @@ func createVRFJobs( gasLanePrices ...*assets.Wei, ) (jobs []job.Job) { ctx := testutils.Context(t) - if len(gasLanePrices) != len(fromKeys) { - t.Fatalf("must provide one gas lane price for each set of from addresses. len(gasLanePrices) != len(fromKeys) [%d != %d]", - len(gasLanePrices), len(fromKeys)) - } + require.Len(t, gasLanePrices, len(fromKeys), "must provide one gas lane price for each set of from addresses") // Create separate jobs for each gas lane and register their keys for i, keys := range fromKeys { - var keyStrs []string + keyStrs := make([]string, 0, len(keys)) for _, k := range keys { keyStrs = append(keyStrs, k.Address.String()) } + //nolint:gosec // we already checked the length of gasLanePrices above + gasLanePrice := gasLanePrices[i] + vrfkey, err := app.GetKeyStore().VRF().Create(ctx) require.NoError(t, err) @@ -605,7 +603,7 @@ func createVRFJobs( BackoffInitialDelay: 10 * time.Millisecond, BackoffMaxDelay: time.Second, V2: true, - GasLanePrice: gasLanePrices[i], + GasLanePrice: gasLanePrice, VRFOwnerAddress: vrfOwnerString, EVMChainID: testutils.SimulatedChainID.String(), }).Toml() @@ -615,7 +613,7 @@ func createVRFJobs( t.Log(jb.VRFSpec.PublicKey.MustHash(), vrfkey.PublicKey.MustHash()) err = app.JobSpawner().CreateJob(ctx, nil, &jb) require.NoError(t, err) - registerProvingKeyHelper(t, uni, coordinator, vrfkey, ptr(gasLanePrices[i].ToInt().Uint64())) + registerProvingKeyHelper(t, uni, coordinator, vrfkey, new(gasLanePrice.ToInt().Uint64())) jobs = append(jobs, jb) } // Wait until all jobs are active and listening for logs @@ -659,17 +657,26 @@ func requestRandomnessForWrapper( numWords, ) require.NoError(t, err) - uni.backend.Commit() + filterOpts := commitRequestAndFilterIndexBlock(t, uni.backend) - iter, err := coordinator.FilterRandomWordsRequested(nil, nil, []*big.Int{subID}, nil) - require.NoError(t, err, "could not filter RandomWordsRequested events") + // LogPoller indexes asynchronously; retry until the target block is available. + var iter v22.RandomWordsRequestedIterator + require.Eventually(t, func() bool { + var filterErr error + iter, filterErr = coordinator.FilterRandomWordsRequested(filterOpts, nil, []*big.Int{subID}, nil) + if filterErr != nil { + uni.backend.Commit() + return false + } + return true + }, testutils.WaitTimeout(t), time.Second, "could not filter RandomWordsRequested events") var events []v22.RandomWordsRequested for iter.Next() { events = append(events, iter.Event()) } - wrapperIter, err := vrfWrapperConsumer.FilterWrapperRequestMade(nil, nil) + wrapperIter, err := vrfWrapperConsumer.FilterWrapperRequestMade(filterOpts, nil) require.NoError(t, err, "could not filter WrapperRequestMade events") wrapperConsumerEvents := []*vrfv2_wrapper_consumer_example.VRFV2WrapperConsumerExampleWrapperRequestMade{} @@ -716,10 +723,19 @@ func requestRandomnessAndAssertRandomWordsRequestedEvent( nativePayment, ) require.NoError(t, err) - backend.Commit() + filterOpts := commitRequestAndFilterIndexBlock(t, backend) - iter, err := coordinator.FilterRandomWordsRequested(nil, nil, []*big.Int{subID}, nil) - require.NoError(t, err, "could not filter RandomWordsRequested events") + // LogPoller indexes asynchronously; retry until the target block is available. + var iter v22.RandomWordsRequestedIterator + require.Eventually(t, func() bool { + var filterErr error + iter, filterErr = coordinator.FilterRandomWordsRequested(filterOpts, nil, []*big.Int{subID}, nil) + if filterErr != nil { + backend.Commit() + return false + } + return true + }, testutils.WaitTimeout(t), time.Second, "could not filter RandomWordsRequested events") var events []v22.RandomWordsRequested for iter.Next() { @@ -741,6 +757,30 @@ func requestRandomnessAndAssertRandomWordsRequestedEvent( return requestID, event.Raw().BlockNumber } +func commitRequestAndFilterIndexBlock(t *testing.T, backend types.Backend) *bind.FilterOpts { + ctx := testutils.Context(t) + block, err := backend.Client().BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + end := block.NumberU64() + // Geth's filter index reads end+1, so mine one more block and filter only + // through the request block. + backend.Commit() + return &bind.FilterOpts{Start: 0, End: &end, Context: ctx} +} + +func indexedFilterOpts(t *testing.T, backend types.Backend) *bind.FilterOpts { + ctx := testutils.Context(t) + header, err := backend.Client().HeaderByNumber(ctx, nil) + require.NoError(t, err) + if header.Number.Sign() == 0 { + return &bind.FilterOpts{Start: 0, Context: ctx} + } + // Geth's filter index reads end+1; latest-1 keeps the lookup bounded to an + // indexed block and avoids blocking simulated-backend mining loops. + end := header.Number.Uint64() - 1 + return &bind.FilterOpts{Start: 0, End: &end, Context: ctx} +} + // subscribeAndAssertSubscriptionCreatedEvent subscribes the given consumer contract // to VRF and funds the subscription with the given fundingJuels amount. It returns the // subscription ID of the resulting subscription. @@ -929,7 +969,6 @@ func checkForReceipt(t *testing.T, db *sqlx.DB, txID int64) bool { } func TestVRFV2Integration_SingleConsumer_ForceFulfillment(t *testing.T) { - quarantine.Flaky(t, "DX-1875") t.Parallel() ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) @@ -975,7 +1014,7 @@ func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment(t *testing.T uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), 5, // number of requests to send false, // don't send big callback vrfcommon.V2, @@ -997,7 +1036,7 @@ func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment_BigGasCallba uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), 5, // number of requests to send true, // send big callback vrfcommon.V2, @@ -1019,7 +1058,7 @@ func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { @@ -1041,7 +1080,7 @@ func TestVRFV2Integration_SingleConsumer_EOA_Request(t *testing.T) { uni.coordinatorV2UniverseCommon, false, uni.batchBHSContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, ) } @@ -1057,7 +1096,7 @@ func TestVRFV2Integration_SingleConsumer_EOA_Request_Batching_Enabled(t *testing uni.coordinatorV2UniverseCommon, true, uni.batchBHSContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, ) } @@ -1072,7 +1111,7 @@ func testEoa( vrfVersion vrfcommon.Version, ) { ctx := testutils.Context(t) - gasLimit := int64(2_500_000) + gasLimit := uint64(2_500_000) finalityDepth := uint32(50) @@ -1081,17 +1120,17 @@ func testEoa( config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr(uint64(gasLimit)) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.EVM[0].FinalityDepth = ptr(finalityDepth) + c.EVM[0].GasEstimator.LimitDefault = new(gasLimit) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.EVM[0].FinalityDepth = new(finalityDepth) }) app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) consumer := uni.vrfConsumers[0] - // Createa a new subscription. + // Create a new subscription. subID := setupAndFundSubscriptionAndConsumer( t, uni, @@ -1242,11 +1281,11 @@ func TestVRFV2Integration_SingleConsumer_Wrapper(t *testing.T) { config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](3_500_000) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(3_500_000)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) }) ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) @@ -1265,7 +1304,7 @@ func TestVRFV2Integration_SingleConsumer_Wrapper(t *testing.T) { uni.rootContractAddress, uni.batchCoordinatorContractAddress, uni.coordinatorV2UniverseCommon, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, gasLanePriceWei) @@ -1323,12 +1362,12 @@ func TestVRFV2Integration_Wrapper_High_Gas(t *testing.T) { config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ // Gas lane. - Key: ptr(key1.EIP55Address), + Key: new(key1.EIP55Address), GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](3_500_000) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) - c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(3_500_000)) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) }) ownerKey := cltest.MustGenerateRandomKey(t) @@ -1348,7 +1387,7 @@ func TestVRFV2Integration_Wrapper_High_Gas(t *testing.T) { uni.rootContractAddress, uni.batchCoordinatorContractAddress, uni.coordinatorV2UniverseCommon, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, gasLanePriceWei) @@ -1408,7 +1447,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, ) @@ -1468,7 +1507,7 @@ func TestVRFV2Integration_SingleConsumer_BlockHeaderFeeder(t *testing.T) { uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, ) @@ -1489,7 +1528,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsTopUp(t *testing.T) { uni.rootContract, uni.rootContractAddress, uni.batchCoordinatorContractAddress, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), assets.Ether(1).ToInt(), // initial funding of 1 LINK assets.Ether(100).ToInt(), // top up of 100 LINK vrfcommon.V2, @@ -1547,23 +1586,23 @@ func TestVRFV2Integration_ConsumerProxy_CoordinatorZeroAddress(t *testing.T) { func simulatedOverrides(t *testing.T, defaultGasPrice *assets.Wei, ks ...toml.KeySpecific) func(*chainlink.Config, *chainlink.Secrets) { return func(c *chainlink.Config, s *chainlink.Secrets) { require.Zero(t, testutils.SimulatedChainID.Cmp(c.EVM[0].ChainID.ToInt())) - c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + c.EVM[0].GasEstimator.Mode = new("FixedPrice") if defaultGasPrice != nil { c.EVM[0].GasEstimator.PriceDefault = defaultGasPrice } - c.EVM[0].GasEstimator.LimitDefault = ptr[uint64](3_500_000) + c.EVM[0].GasEstimator.LimitDefault = new(uint64(3_500_000)) - c.Feature.LogPoller = ptr(true) + c.Feature.LogPoller = new(true) c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) - c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) + c.EVM[0].HeadTracker.MaxBufferSize = new(uint32(100)) c.EVM[0].HeadTracker.SamplingInterval = commonconfig.MustNewDuration(0) // Head sampling disabled c.EVM[0].Transactions.ResendAfterThreshold = commonconfig.MustNewDuration(0) c.EVM[0].Transactions.ReaperThreshold = commonconfig.MustNewDuration(100 * time.Millisecond) - c.EVM[0].FinalityDepth = ptr[uint32](15) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].FinalityDepth = new(uint32(15)) + c.EVM[0].MinIncomingConfirmations = new(uint32(1)) c.EVM[0].MinContractPayment = commonassets.NewLinkFromJuels(100) c.EVM[0].KeySpecific = ks } @@ -1708,7 +1747,7 @@ func TestIntegrationVRFV2(t *testing.T) { Key: &key.EIP55Address, GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, })(c, s) - c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].MinIncomingConfirmations = new(uint32(2)) }) uni := newVRFCoordinatorV2Universe(t, key, 1) carol := uni.vrfConsumers[0] @@ -1736,7 +1775,7 @@ func TestIntegrationVRFV2(t *testing.T) { uni.rootContractAddress, uni.batchCoordinatorContractAddress, uni.coordinatorV2UniverseCommon, - ptr(uni.vrfOwnerAddress), + new(uni.vrfOwnerAddress), vrfcommon.V2, false, gasLanePriceWei) @@ -1765,9 +1804,9 @@ func TestIntegrationVRFV2(t *testing.T) { assets.Ether(1).ToInt(), big.NewInt(0), }) - subId, err := carolContract.SSubId(nil) + subID, err := carolContract.SSubId(nil) require.NoError(t, err) - subStart, err := uni.rootContract.GetSubscription(nil, subId) + subStart, err := uni.rootContract.GetSubscription(nil, subID) require.NoError(t, err) // Make a request for random words. @@ -1776,7 +1815,7 @@ func TestIntegrationVRFV2(t *testing.T) { gasRequested := 500_000 nw := 10 requestedIncomingConfs := 3 - _, err = carolContract.RequestRandomness(carol, keyHash, subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + _, err = carolContract.RequestRandomness(carol, keyHash, subID, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) require.NoError(t, err) // Oracle tries to withdraw before its fulfilled should fail @@ -1838,7 +1877,7 @@ func TestIntegrationVRFV2(t *testing.T) { // Assert that we were only charged for how much gas we actually used. // We should be charged for the verification + our callbacks execution in link. - subEnd, err := uni.rootContract.GetSubscription(nil, subId) + subEnd, err := uni.rootContract.GetSubscription(nil, subID) require.NoError(t, err) var ( end = decimal.RequireFromString(subEnd.Balance().String()) @@ -1852,7 +1891,7 @@ func TestIntegrationVRFV2(t *testing.T) { linkCharged := linkWeiCharged.Sub(decimal.RequireFromString("1000000000000000")).Div(wei) gasPriceD := decimal.NewFromBigInt(gasPrice.ToInt(), 0) t.Logf("subscription charged %s with gas prices of %s gwei and %s ETH per LINK\n", linkCharged, gasPriceD.Div(gwei), vrftesthelpers.WeiPerUnitLink.Div(wei)) - expected := decimal.RequireFromString(strconv.Itoa(int(fulfillReceipt.GasUsed))).Mul(gasPriceD).Div(vrftesthelpers.WeiPerUnitLink) + expected := decimal.RequireFromString(strconv.FormatUint(fulfillReceipt.GasUsed, 10)).Mul(gasPriceD).Div(vrftesthelpers.WeiPerUnitLink) t.Logf("expected sub charge gas use %v %v off by %v", fulfillReceipt.GasUsed, expected, expected.Sub(linkCharged)) // The expected sub charge should be within 200 gas of the actual gas usage. // wei/link * link / wei/gas = wei / (wei/gas) = gas @@ -1930,10 +1969,10 @@ func TestRequestCost(t *testing.T) { big.NewInt(1000000000000000000)) // 0.1 LINK require.NoError(tt, err) uni.backend.Commit() - subId, err := carolContract.SSubId(nil) + subID, err := carolContract.SSubId(nil) require.NoError(tt, err) // Ensure even with large number of consumers its still cheap - var addrs []common.Address + addrs := make([]common.Address, 0, 99) for range 99 { addrs = append(addrs, testutils.NewAddress()) } @@ -1941,7 +1980,7 @@ func TestRequestCost(t *testing.T) { require.NoError(tt, err) estimate := estimateGas(tt, uni.backend, common.Address{}, carolContractAddress, uni.consumerABI, - "requestRandomness", vrfkey.PublicKey.MustHash(), subId.Uint64(), uint16(2), uint32(10000), uint32(1)) + "requestRandomness", vrfkey.PublicKey.MustHash(), subID.Uint64(), uint16(2), uint32(10000), uint32(1)) tt.Log("gas estimate of non-proxied testRequestRandomness:", estimate) // V2 should be at least (87000-134000)/134000 = 35% cheaper // Note that a second call drops further to 68998 gas, but would also drop in V1. @@ -1962,15 +2001,15 @@ func TestRequestCost(t *testing.T) { require.NoError(tt, err) t.Log("gas used by proxied CreateSubscriptionAndFund:", r.GasUsed) - subId, err := consumerContract.SSubId(nil) + subID, err := consumerContract.SSubId(nil) require.NoError(tt, err) - _, err = uni.rootContract.GetSubscription(nil, subId) + _, err = uni.rootContract.GetSubscription(nil, subID) require.NoError(tt, err) theAbi := types.MustGetABI(vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExampleMetaData.ABI) estimate := estimateGas(tt, uni.backend, common.Address{}, consumerContractAddress, &theAbi, - "requestRandomness", vrfkey.PublicKey.MustHash(), subId.Uint64(), uint16(2), uint32(10000), uint32(1)) + "requestRandomness", vrfkey.PublicKey.MustHash(), subID.Uint64(), uint16(2), uint32(10000), uint32(1)) tt.Log("gas estimate of proxied requestRandomness:", estimate) // There is some gas overhead of the delegatecall that is made by the proxy // to the logic contract. See https://www.evm.codes/#f4?fork=grayGlacier for a detailed @@ -1995,9 +2034,9 @@ func TestMaxConsumersCost(t *testing.T) { big.NewInt(1000000000000000000)) // 0.1 LINK require.NoError(t, err) uni.backend.Commit() - subId, err := carolContract.SSubId(nil) + subID, err := carolContract.SSubId(nil) require.NoError(t, err) - var addrs []common.Address + addrs := make([]common.Address, 0, 98) for range 98 { addrs = append(addrs, testutils.NewAddress()) } @@ -2006,12 +2045,12 @@ func TestMaxConsumersCost(t *testing.T) { require.NoError(t, err) estimate := estimateGas(t, uni.backend, carolContractAddress, uni.rootContractAddress, uni.coordinatorABI, - "removeConsumer", subId.Uint64(), carolContractAddress) + "removeConsumer", subID.Uint64(), carolContractAddress) t.Log(estimate) assert.Less(t, estimate, uint64(310000)) estimate = estimateGas(t, uni.backend, carolContractAddress, uni.rootContractAddress, uni.coordinatorABI, - "addConsumer", subId.Uint64(), testutils.NewAddress()) + "addConsumer", subID.Uint64(), testutils.NewAddress()) t.Log(estimate) assert.Less(t, estimate, uint64(100000)) } @@ -2042,13 +2081,13 @@ func TestFulfillmentCost(t *testing.T) { big.NewInt(1000000000000000000)) // 0.1 LINK require.NoError(tt, err) uni.backend.Commit() - subId, err := carolContract.SSubId(nil) + subID, err := carolContract.SSubId(nil) require.NoError(tt, err) gasRequested := 50_000 nw := 1 requestedIncomingConfs := 3 - _, err = carolContract.RequestRandomness(carol, vrfkey.PublicKey.MustHash(), subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + _, err = carolContract.RequestRandomness(carol, vrfkey.PublicKey.MustHash(), subID, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) require.NoError(t, err) for range requestedIncomingConfs { uni.backend.Commit() @@ -2061,7 +2100,7 @@ func TestFulfillmentCost(t *testing.T) { PreSeed: s, BlockHash: requestLog.Raw().BlockHash, BlockNum: requestLog.Raw().BlockNumber, - SubId: subId.Uint64(), + SubId: subID.Uint64(), CallbackGasLimit: uint32(gasRequested), NumWords: uint32(nw), Sender: carolContractAddress, @@ -2084,26 +2123,26 @@ func TestFulfillmentCost(t *testing.T) { _, err := consumerContract.CreateSubscriptionAndFund(consumerOwner, assets.Ether(5).ToInt()) require.NoError(t, err) uni.backend.Commit() - subId, err := consumerContract.SSubId(nil) + subID, err := consumerContract.SSubId(nil) require.NoError(t, err) gasRequested := 50_000 nw := 1 requestedIncomingConfs := 3 - _, err = consumerContract.RequestRandomness(consumerOwner, vrfkey.PublicKey.MustHash(), subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + _, err = consumerContract.RequestRandomness(consumerOwner, vrfkey.PublicKey.MustHash(), subID, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) require.NoError(t, err) for range requestedIncomingConfs { uni.backend.Commit() } requestLog := FindLatestRandomnessRequestedLog(t, uni.rootContract, vrfkey.PublicKey.MustHash(), nil) - require.Equal(tt, subId, requestLog.SubID()) + require.Equal(tt, subID, requestLog.SubID()) s, err := proof.BigToSeed(requestLog.PreSeed()) require.NoError(t, err) proof, rc, err := proof.GenerateProofResponseV2(app.GetKeyStore().VRF(), vrfkey.ID(), proof.PreSeedDataV2{ PreSeed: s, BlockHash: requestLog.Raw().BlockHash, BlockNum: requestLog.Raw().BlockNumber, - SubId: subId.Uint64(), + SubId: subID.Uint64(), CallbackGasLimit: uint32(gasRequested), NumWords: uint32(nw), Sender: consumerContractAddress, @@ -2177,8 +2216,32 @@ func TestStartingCountsV1(t *testing.T) { md2SQL := sqlutil.JSON(md2) require.NoError(t, err) chainID := sqlutil.New(testutils.SimulatedChainID) - confirmedTxes := []txmgr.Tx{ - { + // Build unconfirmed txes first so confirmedTxes can be preallocated for append(confirmed, unconfirmed...). + unconfirmedTxes := make([]txmgr.Tx, 0, 2) + for i := int64(4); i < 6; i++ { + reqID3 := evmutils.PadByteToHash(0x12) + md, err2 := json.Marshal(&txmgr.TxMeta{ + RequestID: &reqID3, + }) + require.NoError(t, err2) + mdSQL := sqlutil.JSON(md) + newNonce := types.Nonce(i + 1) + unconfirmedTxes = append(unconfirmedTxes, txmgr.Tx{ + Sequence: &newNonce, + FromAddress: k.Address, + Error: null.String{}, + CreatedAt: b, + State: txmgrcommon.TxUnconfirmed, + BroadcastAt: &b, + InitialBroadcastAt: &b, + Meta: &mdSQL, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }) + } + confirmedTxes := make([]txmgr.Tx, 0, 4+len(unconfirmedTxes)) + confirmedTxes = append(confirmedTxes, + txmgr.Tx{ Sequence: &n1, FromAddress: k.Address, Error: null.String{}, @@ -2190,7 +2253,7 @@ func TestStartingCountsV1(t *testing.T) { EncodedPayload: []byte{}, ChainID: chainID.ToInt(), }, - { + txmgr.Tx{ Sequence: &n2, FromAddress: k.Address, Error: null.String{}, @@ -2202,7 +2265,7 @@ func TestStartingCountsV1(t *testing.T) { EncodedPayload: []byte{}, ChainID: chainID.ToInt(), }, - { + txmgr.Tx{ Sequence: &n3, FromAddress: k.Address, Error: null.String{}, @@ -2214,7 +2277,7 @@ func TestStartingCountsV1(t *testing.T) { EncodedPayload: []byte{}, ChainID: chainID.ToInt(), }, - { + txmgr.Tx{ Sequence: &n4, FromAddress: k.Address, Error: null.String{}, @@ -2226,30 +2289,7 @@ func TestStartingCountsV1(t *testing.T) { EncodedPayload: []byte{}, ChainID: chainID.ToInt(), }, - } - // add unconfirmed txes - unconfirmedTxes := []txmgr.Tx{} - for i := int64(4); i < 6; i++ { - reqID3 := evmutils.PadByteToHash(0x12) - md, err2 := json.Marshal(&txmgr.TxMeta{ - RequestID: &reqID3, - }) - require.NoError(t, err2) - mdSQL := sqlutil.JSON(md) - newNonce := types.Nonce(i + 1) - unconfirmedTxes = append(unconfirmedTxes, txmgr.Tx{ - Sequence: &newNonce, - FromAddress: k.Address, - Error: null.String{}, - CreatedAt: b, - State: txmgrcommon.TxUnconfirmed, - BroadcastAt: &b, - InitialBroadcastAt: &b, - Meta: &mdSQL, - EncodedPayload: []byte{}, - ChainID: chainID.ToInt(), - }) - } + ) txList := append(confirmedTxes, unconfirmedTxes...) for i := range txList { err = txStore.InsertTx(ctx, &txList[i]) @@ -2258,7 +2298,7 @@ func TestStartingCountsV1(t *testing.T) { // add tx attempt for confirmed broadcastBlock := int64(1) - var txAttempts []txmgr.TxAttempt + txAttempts := make([]txmgr.TxAttempt, 0, len(confirmedTxes)+len(unconfirmedTxes)) for i := range confirmedTxes { txAttempts = append(txAttempts, txmgr.TxAttempt{ TxID: int64(i + 1), @@ -2292,7 +2332,7 @@ func TestStartingCountsV1(t *testing.T) { } // add evm.receipts - receipts := []types.Receipt{} + receipts := make([]types.Receipt, 0, 4) for i := range 4 { receipts = append(receipts, types.Receipt{ BlockHash: evmutils.NewHash(), @@ -2381,8 +2421,6 @@ func AssertLinkBalances(t *testing.T, linkContract *link_token_interface.LinkTok } } -func ptr[T any](t T) *T { return &t } - func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } // estimateGas returns the estimated gas cost of running the given method on the diff --git a/core/services/vrf/v2/listener_v2.go b/core/services/vrf/v2/listener_v2.go index 8b912dd9047..067892ec82c 100644 --- a/core/services/vrf/v2/listener_v2.go +++ b/core/services/vrf/v2/listener_v2.go @@ -62,8 +62,8 @@ const ( // backoffFactor is the factor by which to increase the delay each time a request fails. backoffFactor = 1.3 - txMetaFieldSubId = "SubId" - txMetaGlobalSubId = "GlobalSubId" + txMetaFieldSubID = "SubId" + txMetaGlobalSubID = "GlobalSubId" ) func New( @@ -195,19 +195,15 @@ func (lsn *listenerV2) Start(ctx context.Context) error { if lsn.job.VRFSpec.CustomRevertsPipelineEnabled && lsn.vrfOwner != nil && lsn.job.VRFSpec.VRFOwnerAddress != nil { // Start reverted txns handler in background - lsn.wg.Add(1) - go func() { - defer lsn.wg.Done() + lsn.wg.Go(func() { lsn.runRevertedTxnsHandler(spec.PollPeriod) - }() + }) } // Log listener gathers request logs and processes them - lsn.wg.Add(1) - go func() { - defer lsn.wg.Done() + lsn.wg.Go(func() { lsn.runLogListener(spec.PollPeriod, spec.MinIncomingConfirmations) - }() + }) return nil }) @@ -248,6 +244,10 @@ func (lsn *listenerV2) GetStartingResponseCountsV2(ctx context.Context) (respCou continue } bi := new(big.Int).SetBytes(b) + if c.Count < 0 { + lsn.l.Errorw("unexpected negative fulfillment count from tx metadata", "count", c.Count, "reqID", c.RequestID) + continue + } respCounts[bi.String()] = uint64(c.Count) } return respCounts, nil @@ -256,6 +256,9 @@ func (lsn *listenerV2) GetStartingResponseCountsV2(ctx context.Context) (respCou func (lsn *listenerV2) setLatestHead(head logpoller.Block) { lsn.latestHeadMu.Lock() defer lsn.latestHeadMu.Unlock() + if head.BlockNumber < 0 { + return + } num := uint64(head.BlockNumber) if num > lsn.latestHeadNumber { lsn.latestHeadNumber = num diff --git a/core/services/vrf/v2/listener_v2_helpers.go b/core/services/vrf/v2/listener_v2_helpers.go index b3a3675e296..482d7a5d5b6 100644 --- a/core/services/vrf/v2/listener_v2_helpers.go +++ b/core/services/vrf/v2/listener_v2_helpers.go @@ -72,8 +72,6 @@ func observeRequestSimDuration(jobName string, extJobID uuid.UUID, vrfVersion vr } } -func ptr[T any](t T) *T { return &t } - func isProofVerificationError(errMsg string) bool { // See VRF.sol for all these messages // NOTE: it's unclear which of these errors are impossible and which diff --git a/core/services/vrf/v2/listener_v2_log_listener.go b/core/services/vrf/v2/listener_v2_log_listener.go index 3cf6fdcf1cb..30fc7a49f4d 100644 --- a/core/services/vrf/v2/listener_v2_log_listener.go +++ b/core/services/vrf/v2/listener_v2_log_listener.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "math" "math/big" "time" @@ -172,14 +173,22 @@ func (lsn *listenerV2) initializeLastProcessedBlock(ctx context.Context) (lastPr // find request block of earliest unfulfilled request // even if this block is > latest finalized, we use latest finalized as earliest unprocessed // because re-orgs can occur on any unfinalized block. - var earliestUnfulfilledBlock = latestBlock.FinalizedBlockNumber + finalized := latestBlock.FinalizedBlockNumber + if finalized < 0 { + lsn.l.Errorw("unexpected negative finalized block number", "finalized", finalized) + finalized = 0 + } + earliestU := uint64(finalized) for _, req := range unfulfilled { - if req.Raw().BlockNumber < uint64(earliestUnfulfilledBlock) { - earliestUnfulfilledBlock = int64(req.Raw().BlockNumber) + rb := req.Raw().BlockNumber + if rb < earliestU { + earliestU = rb } } - - return earliestUnfulfilledBlock, nil + if earliestU > uint64(math.MaxInt64) { + return math.MaxInt64, nil + } + return int64(earliestU), nil } func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastProcessedBlock int64) (lastProcessedBlock int64, err error) { @@ -215,7 +224,12 @@ func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastPro // find request block of earliest unfulfilled request // even if this block is > latest finalized, we use latest finalized as earliest unprocessed // because re-orgs can occur on any unfinalized block. - var earliestUnprocessedRequestBlock = latestBlock.FinalizedBlockNumber + finalized := latestBlock.FinalizedBlockNumber + if finalized < 0 { + lsn.l.Errorw("unexpected negative finalized block number", "finalized", finalized) + finalized = 0 + } + earliestU := uint64(finalized) for i, req := range unfulfilled { // need to drop requests that have timed out otherwise the earliestUnprocessedRequestBlock // will be unnecessarily far back and our queries will be slower. @@ -226,12 +240,15 @@ func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastPro ) continue } - if req.Raw().BlockNumber < uint64(earliestUnprocessedRequestBlock) { - earliestUnprocessedRequestBlock = int64(req.Raw().BlockNumber) + rb := req.Raw().BlockNumber + if rb < earliestU { + earliestU = rb } } - - return earliestUnprocessedRequestBlock, nil + if earliestU > uint64(math.MaxInt64) { + return math.MaxInt64, nil + } + return int64(earliestU), nil } // pollLogs uses the log poller to poll for the latest VRF logs @@ -342,13 +359,24 @@ func (lsn *listenerV2) getConfirmedAt(req RandomWordsRequested, nodeMinConfs uin // Take the max(nodeMinConfs, requestedConfs + requestedConfsDelay). // Add the requested confs delay if provided in the jobspec so that we avoid an edge case // where the primary and backup VRF v2 nodes submit a proof at the same time. - minConfs := max(uint32(req.MinimumRequestConfirmations())+uint32(lsn.job.VRFSpec.RequestedConfsDelay), nodeMinConfs) + delay := lsn.job.VRFSpec.RequestedConfsDelay + var delayU32 uint32 + switch { + case delay < 0: + delayU32 = 0 + case delay > math.MaxUint32: + delayU32 = math.MaxUint32 + default: + delayU32 = uint32(delay) + } + minConfs := max(uint32(req.MinimumRequestConfirmations())+delayU32, nodeMinConfs) newConfs := min( // We cap this at 200 because solidity only supports the most recent 256 blocks // in the contract so if it was older than that, fulfillments would start failing // without the blockhash store feeder. We use 200 to give the node plenty of time // to fulfill even on fast chains. - uint64(minConfs)*(1< 0 { lsn.l.Warnw("Duplicate request found after fulfillment, doubling incoming confirmations", "txHash", req.Raw().TxHash, diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go index 20ff3f1f73b..837618fbec1 100644 --- a/core/services/vrf/v2/listener_v2_log_listener_test.go +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -832,7 +832,7 @@ func TestGetUnfulfilled_NoVRFReqs(t *testing.T) { listener, chainID := SetupGetUnfulfilledTH(t) - logs := []logpoller.Log{} + logs := make([]logpoller.Log, 0, 10) for i := range 10 { logs = append(logs, logpoller.Log{ EVMChainID: chainID, @@ -919,7 +919,7 @@ func TestGetUnfulfilled_OneUnfulfilledVRFReq(t *testing.T) { listener, chainID := SetupGetUnfulfilledTH(t) - logs := []logpoller.Log{} + logs := make([]logpoller.Log, 0, 10) for i := range 10 { eventSig := emitterABI.Events["Log1"].ID topics := [][]byte{ @@ -960,7 +960,7 @@ func TestGetUnfulfilled_SomeUnfulfilledVRFReq(t *testing.T) { listener, chainID := SetupGetUnfulfilledTH(t) - logs := []logpoller.Log{} + logs := make([]logpoller.Log, 0, 10) for i := range 10 { eventSig := emitterABI.Events["Log1"].ID topics := [][]byte{ diff --git a/core/services/vrf/v2/listener_v2_log_processor.go b/core/services/vrf/v2/listener_v2_log_processor.go index c2fdd312ac9..f769f51ee3e 100644 --- a/core/services/vrf/v2/listener_v2_log_processor.go +++ b/core/services/vrf/v2/listener_v2_log_processor.go @@ -68,12 +68,12 @@ func (lsn *listenerV2) ready(req pendingRequest, latestHead uint64) bool { req.lastTry)) } -func nextTry(retries int, initial, max time.Duration, last time.Time) time.Time { +func nextTry(retries int, initial, maxDur time.Duration, last time.Time) time.Time { expBackoffFactor := math.Pow(backoffFactor, float64(retries-1)) var delay time.Duration - if expBackoffFactor > float64(max/initial) { - delay = max + if expBackoffFactor > float64(maxDur/initial) { + delay = maxDur } else { delay = time.Duration(float64(initial) * expBackoffFactor) } @@ -83,15 +83,15 @@ func nextTry(retries int, initial, max time.Duration, last time.Time) time.Time // Remove all entries 10000 blocks or older // to avoid a memory leak. func (lsn *listenerV2) pruneConfirmedRequestCounts() { - min := lsn.blockNumberToReqID.FindMin() - for min != nil { - m := min.(fulfilledReqV2) + minBlock := lsn.blockNumberToReqID.FindMin() + for minBlock != nil { + m := minBlock.(fulfilledReqV2) if m.blockNumber > (lsn.getLatestHead() - 10000) { break } delete(lsn.respCount, m.reqID) lsn.blockNumberToReqID.DeleteMin() - min = lsn.blockNumberToReqID.FindMin() + minBlock = lsn.blockNumberToReqID.FindMin() } } @@ -207,9 +207,9 @@ func (lsn *listenerV2) MaybeSubtractReservedLink(ctx context.Context, startBalan var metaField string switch vrfVersion { case vrfcommon.V2Plus: - metaField = txMetaGlobalSubId + metaField = txMetaGlobalSubID case vrfcommon.V2: - metaField = txMetaFieldSubId + metaField = txMetaFieldSubID default: return nil, errors.Errorf("unsupported vrf version %s", vrfVersion) } @@ -247,7 +247,7 @@ func (lsn *listenerV2) MaybeSubtractReservedEth(ctx context.Context, startBalanc var metaField string switch vrfVersion { case vrfcommon.V2Plus: - metaField = txMetaGlobalSubId + metaField = txMetaGlobalSubID case vrfcommon.V2: // native payment is not supported for v2, so returning 0 reserved ETH return big.NewInt(0), nil @@ -409,11 +409,12 @@ func (lsn *listenerV2) processRequestsPerSubBatchHelper( ll = ll.With("fromAddress", fromAddress) if p.err != nil { - if errors.Is(p.err, errBlockhashNotInStore{}) { + switch { + case errors.Is(p.err, blockhashNotInStoreError{}): // Running the blockhash store feeder in backwards mode will be required to // resolve this. ll.Criticalw("Pipeline error", "err", p.err) - } else if errors.Is(p.err, errProofVerificationFailed{}) { + case errors.Is(p.err, proofVerificationFailedError{}): // This occurs when the proof reverts in the simulation // This is almost always (if not always) due to a proof generated with an out-of-date // blockhash @@ -421,7 +422,7 @@ func (lsn *listenerV2) processRequestsPerSubBatchHelper( // process the request with the right blockhash ll.Infow("proof reverted in simulation, likely stale blockhash") processed[p.req.req.RequestID().String()] = struct{}{} - } else { + default: ll.Errorw("Pipeline error", "err", p.err) if !subIsActive { ll.Warnw("Force-fulfilling a request with insufficient funds on a cancelled sub") @@ -440,7 +441,7 @@ func (lsn *listenerV2) processRequestsPerSubBatchHelper( continue } - if startBalanceNoReserved.Cmp(p.fundsNeeded) < 0 && errors.Is(p.err, errPossiblyInsufficientFunds{}) { + if startBalanceNoReserved.Cmp(p.fundsNeeded) < 0 && errors.Is(p.err, possiblyInsufficientFundsError{}) { ll.Infow("Insufficient balance to fulfill a request based on estimate, breaking", "err", p.err) outOfBalance = true @@ -588,10 +589,7 @@ func (lsn *listenerV2) enqueueForceFulfillment( lsn.l.Infow("fulfillRandomWords payload", "proof", p.proof, "commitment", p.reqCommitment.Get(), "payload", p.payload) txData := hexutil.MustDecode(p.payload) - if err != nil { - err = fmt.Errorf("abi pack VRFOwner.fulfillRandomWords: %w", err) - return - } + estimateGasLimit, err := lsn.chain.Client().EstimateGas(ctx, ethereum.CallMsg{ From: fromAddress, To: &vrfOwnerAddressSpec, @@ -619,7 +617,7 @@ func (lsn *listenerV2) enqueueForceFulfillment( Strategy: txmgrcommon.NewSendEveryStrategy(), Meta: &txmgr.TxMeta{ RequestID: &requestID, - SubID: ptr(subID.Uint64()), + SubID: new(subID.Uint64()), RequestTxHash: &requestTxHash, // No max link since simulation failed }, @@ -632,7 +630,7 @@ func (lsn *listenerV2) enqueueForceFulfillment( func (lsn *listenerV2) isConsumerValidAfterFinalityDepthElapsed(ctx context.Context, req pendingRequest) bool { latestHead := lsn.getLatestHead() if latestHead-req.req.Raw().BlockNumber > uint64(lsn.cfg.FinalityDepth()) { - code, err := lsn.chain.Client().CodeAt(ctx, req.req.Sender(), big.NewInt(int64(latestHead))) + code, err := lsn.chain.Client().CodeAt(ctx, req.req.Sender(), new(big.Int).SetUint64(latestHead)) if err != nil { lsn.l.Warnw("Failed to fetch contract code", "err", err) return true // error fetching code, give the benefit of doubt to the consumer @@ -732,11 +730,12 @@ func (lsn *listenerV2) processRequestsPerSubHelper( ll = ll.With("fromAddress", fromAddress) if p.err != nil { - if errors.Is(p.err, errBlockhashNotInStore{}) { + switch { + case errors.Is(p.err, blockhashNotInStoreError{}): // Running the blockhash store feeder in backwards mode will be required to // resolve this. ll.Criticalw("Pipeline error", "err", p.err) - } else if errors.Is(p.err, errProofVerificationFailed{}) { + case errors.Is(p.err, proofVerificationFailedError{}): // This occurs when the proof reverts in the simulation // This is almost always (if not always) due to a proof generated with an out-of-date // blockhash @@ -744,7 +743,7 @@ func (lsn *listenerV2) processRequestsPerSubHelper( // process the request with the right blockhash ll.Infow("proof reverted in simulation, likely stale blockhash") processed[p.req.req.RequestID().String()] = struct{}{} - } else { + default: ll.Errorw("Pipeline error", "err", p.err) if !subIsActive { @@ -810,9 +809,9 @@ func (lsn *listenerV2) processRequestsPerSubHelper( txMetaGlobalSubID *string ) if lsn.coordinator.Version() == vrfcommon.V2Plus { - txMetaGlobalSubID = ptr(p.req.req.SubID().String()) + txMetaGlobalSubID = new(p.req.req.SubID().String()) } else if lsn.coordinator.Version() == vrfcommon.V2 { - txMetaSubID = ptr(p.req.req.SubID().Uint64()) + txMetaSubID = new(p.req.req.SubID().Uint64()) } requestID := common.BytesToHash(p.req.req.RequestID().Bytes()) coordinatorAddress := lsn.coordinator.Address() @@ -876,9 +875,9 @@ func (lsn *listenerV2) processRequestsPerSub( } var processed = make(map[string]struct{}) - chainId := lsn.chain.Client().ConfiguredChainID() + chainID := lsn.chain.Client().ConfiguredChainID() startBalanceNoReserveLink, err := lsn.MaybeSubtractReservedLink( - ctx, startLinkBalance, chainId, subID, lsn.coordinator.Version()) + ctx, startLinkBalance, chainID, subID, lsn.coordinator.Version()) if err != nil { lsn.l.Errorw("Couldn't get reserved LINK for subscription", "sub", reqs[0].req.SubID(), "err", err) return processed @@ -917,9 +916,7 @@ func (lsn *listenerV2) processRequestsPerSub( wg sync.WaitGroup nativeProcessed, linkProcessed map[string]struct{} ) - wg.Add(2) - go func() { - defer wg.Done() + wg.Go(func() { nativeProcessed = lsn.processRequestsPerSubHelper( ctx, subID, @@ -928,9 +925,8 @@ func (lsn *listenerV2) processRequestsPerSub( nativeRequests, subIsActive, true) - }() - go func() { - defer wg.Done() + }) + wg.Go(func() { linkProcessed = lsn.processRequestsPerSubHelper( ctx, subID, @@ -939,7 +935,7 @@ func (lsn *listenerV2) processRequestsPerSub( linkRequests, subIsActive, false) - }() + }) wg.Wait() // combine the native and link processed requests into the processed map maps.Copy(processed, nativeProcessed) @@ -1141,11 +1137,12 @@ func (lsn *listenerV2) simulateFulfillment( if res.run.AllErrors.HasError() { res.err = errors.WithStack(res.run.AllErrors.ToError()) - if strings.Contains(res.err.Error(), "blockhash not found in store") { - res.err = stderrors.Join(res.err, errBlockhashNotInStore{}) - } else if isProofVerificationError(res.err.Error()) { - res.err = stderrors.Join(res.err, errProofVerificationFailed{}) - } else if strings.Contains(res.err.Error(), "execution reverted") { + switch { + case strings.Contains(res.err.Error(), "blockhash not found in store"): + res.err = stderrors.Join(res.err, blockhashNotInStoreError{}) + case isProofVerificationError(res.err.Error()): + res.err = stderrors.Join(res.err, proofVerificationFailedError{}) + case strings.Contains(res.err.Error(), "execution reverted"): // Even if the simulation fails, we want to get the // txData for the fulfillRandomWords call, in case // we need to force fulfill. @@ -1169,7 +1166,7 @@ func (lsn *listenerV2) simulateFulfillment( res.reqCommitment = NewRequestCommitment(m["requestCommitment"]) } } - res.err = stderrors.Join(res.err, errPossiblyInsufficientFunds{}) + res.err = stderrors.Join(res.err, possiblyInsufficientFundsError{}) } return res @@ -1216,7 +1213,7 @@ func (lsn *listenerV2) simulateFulfillment( } func (lsn *listenerV2) fromAddresses() []common.Address { - var addresses []common.Address + addresses := make([]common.Address, 0, len(lsn.job.VRFSpec.FromAddresses)) for _, a := range lsn.job.VRFSpec.FromAddresses { addresses = append(addresses, a.Address()) } diff --git a/core/services/vrf/v2/listener_v2_test.go b/core/services/vrf/v2/listener_v2_test.go index 3764dcc5f7d..6f7bcede470 100644 --- a/core/services/vrf/v2/listener_v2_test.go +++ b/core/services/vrf/v2/listener_v2_test.go @@ -70,9 +70,9 @@ func txMetaSubIDs(t *testing.T, vrfVersion vrfcommon.Version, subID *big.Int) (* ) switch vrfVersion { case vrfcommon.V2Plus: - txMetaGlobalSubID = ptr(subID.String()) + txMetaGlobalSubID = new(subID.String()) case vrfcommon.V2: - txMetaSubID = ptr(subID.Uint64()) + txMetaSubID = new(subID.Uint64()) default: t.Errorf("unsupported vrf version: %s", vrfVersion) } diff --git a/core/services/vrf/v2/listener_v2_types.go b/core/services/vrf/v2/listener_v2_types.go index 2c26569006b..e70f9cfe899 100644 --- a/core/services/vrf/v2/listener_v2_types.go +++ b/core/services/vrf/v2/listener_v2_types.go @@ -16,21 +16,21 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon" ) -type errPossiblyInsufficientFunds struct{} +type possiblyInsufficientFundsError struct{} -func (errPossiblyInsufficientFunds) Error() string { +func (possiblyInsufficientFundsError) Error() string { return "Simulation errored, possibly insufficient funds. Request will remain unprocessed until funds are available" } -type errBlockhashNotInStore struct{} +type blockhashNotInStoreError struct{} -func (errBlockhashNotInStore) Error() string { +func (blockhashNotInStoreError) Error() string { return "Blockhash not in store" } -type errProofVerificationFailed struct{} +type proofVerificationFailedError struct{} -func (errProofVerificationFailed) Error() string { +func (proofVerificationFailedError) Error() string { return "Proof verification failed" } @@ -191,7 +191,7 @@ func (lsn *listenerV2) processBatch( "err", err, "proofs", batch.proofs, "commitments", batch.commitments) return } - txMetaSubID = ptr(subID.Uint64()) + txMetaSubID = new(subID.Uint64()) case vrfcommon.V2Plus: payload, err = batchCoordinatorV2PlusABI.Pack("fulfillRandomWords", ToV2PlusProofs(batch.proofs), ToV2PlusCommitments(batch.commitments)) if err != nil { @@ -200,7 +200,7 @@ func (lsn *listenerV2) processBatch( "err", err, "proofs", batch.proofs, "commitments", batch.commitments) return } - txMetaGlobalSubID = ptr(subID.String()) + txMetaGlobalSubID = new(subID.String()) default: panic("batch version should be v2 or v2plus") } diff --git a/core/services/vrf/v2/reverted_txns.go b/core/services/vrf/v2/reverted_txns.go index 6306706932d..d8a8ab20a19 100644 --- a/core/services/vrf/v2/reverted_txns.go +++ b/core/services/vrf/v2/reverted_txns.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "database/sql" + "errors" "fmt" "strconv" "strings" @@ -15,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-evm/gethwrappers/generated/vrf_coordinator_v2" @@ -51,8 +51,27 @@ type ( var ReqScanTimeRangeInDB = "1 hour" +// skipRevertedTxnFetchFatal reports whether a fetch error should not trigger Fatal. +// We skip on explicit cancellation and when the outer ctx is done (e.g. job stop). +// We do not skip solely on [context.DeadlineExceeded] while the outer ctx is still +// valid: sqlutil's per-query timeout uses an inner context, so that case must still +// Fatal to surface stuck/slow DB. Drivers may return unrelated errors while the outer +// ctx is already canceled — we skip Fatal then but log at Debug in the caller. +func skipRevertedTxnFetchFatal(ctx context.Context, err error) bool { + if err == nil { + return false + } + if errors.Is(err, context.Canceled) { + return true + } + if ctx.Err() != nil { + return true + } + return false +} + func (lsn *listenerV2) runRevertedTxnsHandler(pollPeriod time.Duration) { - pollPeriod = pollPeriod + time.Second*3 + pollPeriod += time.Second * 3 tick := time.NewTicker(pollPeriod) defer tick.Stop() ctx, cancel := lsn.chStop.NewCtx() @@ -73,14 +92,26 @@ func (lsn *listenerV2) handleRevertedTxns(ctx context.Context, pollPeriod time.D // Fetch recent single and batch txns, that have not been force-fulfilled recentSingleTxns, err := lsn.fetchRecentSingleTxns(ctx, lsn.ds, lsn.chainID.Uint64(), pollPeriod) if err != nil { + if skipRevertedTxnFetchFatal(ctx, err) { + lsn.l.Debugw("Reverted txn fetch aborted", "err", err, "stage", "recent_single") + return + } lsn.l.Fatalw("Fetch recent txns", "err", err) } recentBatchTxns, err := lsn.fetchRecentBatchTxns(ctx, lsn.ds, lsn.chainID.Uint64(), pollPeriod) if err != nil { + if skipRevertedTxnFetchFatal(ctx, err) { + lsn.l.Debugw("Reverted txn fetch aborted", "err", err, "stage", "recent_batch") + return + } lsn.l.Fatalw("Fetch recent batch txns", "err", err) } recentForceFulfillmentTxns, err := lsn.fetchRevertedForceFulfilmentTxns(ctx, lsn.ds, lsn.chainID.Uint64(), pollPeriod) if err != nil { + if skipRevertedTxnFetchFatal(ctx, err) { + lsn.l.Debugw("Reverted txn fetch aborted", "err", err, "stage", "force_fulfillment") + return + } lsn.l.Fatalw("Fetch recent reverted force-fulfillment txns", "err", err) } recentTxns := make([]TxnReceiptDB, 0) @@ -155,9 +186,9 @@ func (lsn *listenerV2) fetchRecentSingleTxns(ctx context.Context, before := time.Now() err := ds.SelectContext(ctx, &recentReceipts, sqlQuery, chainID) - lsn.postSqlLog(ctx, before, pollPeriod, "FetchRecentSingleTxns") + lsn.postSQLLog(ctx, before, pollPeriod, "FetchRecentSingleTxns") if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, errors.Wrap(err, "Error fetching recent non-force-fulfilled txns") + return nil, fmt.Errorf("error fetching recent non-force-fulfilled txns: %w", err) } recentReceipts = unique(recentReceipts) @@ -217,9 +248,9 @@ func (lsn *listenerV2) fetchRecentBatchTxns(ctx context.Context, before := time.Now() err := ds.SelectContext(ctx, &recentReceipts, sqlQuery, chainID) - lsn.postSqlLog(ctx, before, pollPeriod, "FetchRecentBatchTxns") + lsn.postSQLLog(ctx, before, pollPeriod, "FetchRecentBatchTxns") if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, errors.Wrap(err, "Error fetching recent non-force-fulfilled txns") + return nil, fmt.Errorf("error fetching recent non-force-fulfilled txns: %w", err) } recentReceipts = unique(recentReceipts) @@ -270,9 +301,9 @@ func (lsn *listenerV2) fetchRevertedForceFulfilmentTxns(ctx context.Context, before := time.Now() err := ds.SelectContext(ctx, &recentReceipts, sqlQuery, chainID) - lsn.postSqlLog(ctx, before, pollPeriod, "FetchRevertedForceFulfilmentTxns") + lsn.postSQLLog(ctx, before, pollPeriod, "FetchRevertedForceFulfilmentTxns") if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, errors.Wrap(err, "Error fetching recent reverted force-fulfilled txns") + return nil, fmt.Errorf("error fetching recent reverted force-fulfilled txns: %w", err) } sqlQueryAll := fmt.Sprintf(` @@ -299,9 +330,9 @@ func (lsn *listenerV2) fetchRevertedForceFulfilmentTxns(ctx context.Context, var allReceipts []TxnReceiptDB before = time.Now() err = ds.SelectContext(ctx, &allReceipts, sqlQueryAll, chainID) - lsn.postSqlLog(ctx, before, pollPeriod, "Fetch all ForceFulfilment Txns") + lsn.postSQLLog(ctx, before, pollPeriod, "Fetch all ForceFulfilment Txns") if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, errors.Wrap(err, "Error fetching all recent force-fulfilled txns") + return nil, fmt.Errorf("error fetching all recent force-fulfilled txns: %w", err) } recentReceipts = UniqueByReqID(recentReceipts, allReceipts) @@ -379,9 +410,9 @@ func UniqueByReqID(revertedForceTxns []TxnReceiptDB, allForceTxns []TxnReceiptDB return res } -// postSqlLog logs about context cancellation and timing after a query returns. +// postSQLLog logs about context cancellation and timing after a query returns. // Queries which use their full timeout log critical level. More than 50% log error, and 10% warn. -func (lsn *listenerV2) postSqlLog(ctx context.Context, begin time.Time, pollPeriod time.Duration, queryName string) { +func (lsn *listenerV2) postSQLLog(ctx context.Context, begin time.Time, pollPeriod time.Duration, queryName string) { elapsed := time.Since(begin) if ctx.Err() != nil { lsn.l.Debugw("SQL context canceled", "ms", elapsed.Milliseconds(), "err", ctx.Err(), "sql", queryName) @@ -483,7 +514,7 @@ func (lsn *listenerV2) filterSingleRevertedTxn(ctx context.Context, ethClient := lsn.chain.Client() tx, err := ethClient.TransactionByHash(ctx, txnReceiptDB.TxHash) if err != nil { - return nil, errors.Wrap(err, "get_txn_by_hash") + return nil, fmt.Errorf("get_txn_by_hash: %w", err) } // Simulate txn to get revert error @@ -554,7 +585,7 @@ func (lsn *listenerV2) filterBatchRevertedTxn(ctx context.Context, } unpackedInputs, err := batchCoordinatorV2ABI.Methods["fulfillRandomWords"].Inputs.Unpack(txnReceiptDB.EncodedPayload[4:]) if err != nil { - return nil, errors.Wrap(err, "cannot_unpack_batch_txn") + return nil, fmt.Errorf("cannot_unpack_batch_txn: %w", err) } proofs := abi.ConvertType(unpackedInputs[0], new([]vrf_coordinator_v2.VRFProof)).(*[]vrf_coordinator_v2.VRFProof) reqCommitments := abi.ConvertType(unpackedInputs[1], new([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment)).(*[]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) @@ -654,7 +685,7 @@ func (lsn *listenerV2) enqueueForceFulfillmentForRevertedTxn( fromAddresses := lsn.fromAddresses() fromAddress, err := lsn.gethks.GetRoundRobinAddress(ctx, lsn.chainID, fromAddresses...) if err != nil { - return txmgr.Tx{}, errors.Wrap(err, "failed_to_get_vrf_listener_from_address") + return txmgr.Tx{}, fmt.Errorf("failed_to_get_vrf_listener_from_address: %w", err) } // fulfill the request through the VRF owner @@ -669,7 +700,7 @@ func (lsn *listenerV2) enqueueForceFulfillmentForRevertedTxn( txData, err := vrfOwnerABI.Pack("fulfillRandomWords", proof, reqCommitment) if err != nil { - return txmgr.Tx{}, errors.Wrap(err, "abi pack VRFOwner.fulfillRandomWords") + return txmgr.Tx{}, fmt.Errorf("abi pack VRFOwner.fulfillRandomWords: %w", err) } vrfOwnerCoordinator, _ := lsn.vrfOwner.GetVRFCoordinator(nil) lsn.l.Infow("RevertedTxnForceFulfilment EstimatingGas", @@ -683,7 +714,7 @@ func (lsn *listenerV2) enqueueForceFulfillmentForRevertedTxn( Data: txData, }) if err != nil { - return txmgr.Tx{}, errors.Wrap(err, "failed to estimate gas on VRFOwner.fulfillRandomWords") + return txmgr.Tx{}, fmt.Errorf("failed to estimate gas on VRFOwner.fulfillRandomWords: %w", err) } estimateGasLimit = uint64(1.4 * float64(estimateGasLimit)) diff --git a/core/services/vrf/v2/reverted_txns_test.go b/core/services/vrf/v2/reverted_txns_test.go new file mode 100644 index 00000000000..7eca4e0506b --- /dev/null +++ b/core/services/vrf/v2/reverted_txns_test.go @@ -0,0 +1,108 @@ +package v2 + +import ( + "context" + stderrors "errors" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestSkipRevertedTxnFetchFatal(t *testing.T) { + t.Parallel() + + ctxAlive := context.Background() + ctxCanceled, cancel := context.WithCancel(context.Background()) + cancel() + + dbErr := stderrors.New("connection reset") + + tests := []struct { + name string + ctxFn func() context.Context + err error + want bool + }{ + { + name: "nil error", + ctxFn: func() context.Context { + return ctxAlive + }, + err: nil, + want: false, + }, + { + name: "context.Canceled on alive ctx", + ctxFn: func() context.Context { + return ctxAlive + }, + err: context.Canceled, + want: true, + }, + { + name: "wrapped context.Canceled", + ctxFn: func() context.Context { + return ctxAlive + }, + err: errors.Wrap(context.Canceled, "pq"), + want: true, + }, + { + name: "context.DeadlineExceeded on alive ctx inner query timeout", + ctxFn: func() context.Context { + return ctxAlive + }, + err: context.DeadlineExceeded, + want: false, + }, + { + name: "wrapped DeadlineExceeded on alive ctx", + ctxFn: func() context.Context { + return ctxAlive + }, + err: errors.Wrap(context.DeadlineExceeded, "timeout"), + want: false, + }, + { + name: "DeadlineExceeded while outer ctx canceled", + ctxFn: func() context.Context { + return ctxCanceled + }, + err: context.DeadlineExceeded, + want: true, + }, + { + name: "generic DB error on alive ctx", + ctxFn: func() context.Context { + return ctxAlive + }, + err: dbErr, + want: false, + }, + { + name: "generic DB error while outer ctx canceled", + ctxFn: func() context.Context { + return ctxCanceled + }, + err: dbErr, + want: true, + }, + { + name: "context.Canceled while outer ctx also canceled", + ctxFn: func() context.Context { + return ctxCanceled + }, + err: context.Canceled, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := skipRevertedTxnFetchFatal(tt.ctxFn(), tt.err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/deployment/ccip/changeset/solana_v0_1_1/artifact_versions.go b/deployment/ccip/changeset/solana_v0_1_1/artifact_versions.go index ccfa1318c9d..fb7304da29e 100644 --- a/deployment/ccip/changeset/solana_v0_1_1/artifact_versions.go +++ b/deployment/ccip/changeset/solana_v0_1_1/artifact_versions.go @@ -2,6 +2,8 @@ package solana // Mapping between Semver Tags and commit SHA from chainlink-ccip repository for Solana Contracts. // Source tags (GitHub releases): +// - solana-v1.6.2 +// - solana-v1.6.1 // - solana-v1.6.0 // - solana-v0.1.2 // - solana-v0.1.1-cctp @@ -11,6 +13,7 @@ package solana // Public version constants. const ( // Versions for Chainlink CCIP Solana contracts + VersionSolanaV1_6_2 = "solana-v1.6.2" VersionSolanaV1_6_1 = "solana-v1.6.1" VersionSolanaV1_6_0 = "solana-v1.6.0" VersionSolanaV0_1_2 = "solana-v0.1.2" @@ -21,6 +24,7 @@ const ( // VersionToShortCommitSHA maps a version tag to its corresponding short commit SHA. var VersionToShortCommitSHA = map[string]string{ + VersionSolanaV1_6_2: "9546a59bd0a3", VersionSolanaV1_6_1: "cb23ec38649f", VersionSolanaV1_6_0: "d0d81df31957", VersionSolanaV0_1_2: "b96a80a69ad2", @@ -30,6 +34,7 @@ var VersionToShortCommitSHA = map[string]string{ } var VersionToFullCommitSHA = map[string]string{ + VersionSolanaV1_6_2: "9546a59bd0a3cee4ddc8ae4042da533e62225b78", VersionSolanaV1_6_1: "cb23ec38649f9d23aabd0350e30d3d649ebc2174", VersionSolanaV1_6_0: "d0d81df3195728091cad1b0569a2980201a92e97", VersionSolanaV0_1_2: "b96a80a69ad2696c48d645d0cf7807fd02a212c8", diff --git a/deployment/ccip/shared/stateview/state.go b/deployment/ccip/shared/stateview/state.go index 08cc82c16e7..cdfcfa8588c 100644 --- a/deployment/ccip/shared/stateview/state.go +++ b/deployment/ccip/shared/stateview/state.go @@ -31,6 +31,7 @@ import ( cldf_evm "github.com/smartcontractkit/chainlink-deployments-framework/chain/evm" cldf_chain_utils "github.com/smartcontractkit/chainlink-deployments-framework/chain/utils" + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" cldf "github.com/smartcontractkit/chainlink-deployments-framework/deployment" "github.com/smartcontractkit/chainlink-evm/gethwrappers/generated/link_token_interface" @@ -96,7 +97,6 @@ import ( factoryBurnMintERC20v1_6_2 "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_6_2/factory_burn_mint_erc20" usdc_token_pool_v1_6_2 "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_6_2/usdc_token_pool" "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_6_3/fee_quoter" - "github.com/smartcontractkit/chainlink-deployments-framework/datastore" capabilities_registry "github.com/smartcontractkit/chainlink-evm/gethwrappers/keystone/generated/capabilities_registry_1_1_0" "github.com/smartcontractkit/chainlink-evm/gethwrappers/shared/generated/1_5_0/burn_mint_erc20_pausable_freezable_transparent" "github.com/smartcontractkit/chainlink-evm/gethwrappers/shared/generated/1_5_0/burn_mint_erc20_transparent" @@ -1504,7 +1504,8 @@ func LoadChainState(ctx context.Context, chain cldf_evm.Chain, addresses map[str state.ERC677Tokens[ccipshared.TokenSymbol(symbol)] = tok state.ABIByAddress[address] = erc677.ERC677ABI // legacy addresses below are commented out to avoid loading them by default, to be uncommented for migrations - case cldf.NewTypeAndVersion(ccipshared.OnRamp, deployment.Version1_5_0).String(): + case cldf.NewTypeAndVersion(ccipshared.OnRamp, deployment.Version1_5_0).String(), + cldf.NewTypeAndVersion(ccipshared.EVM2EVMOnRamp, deployment.Version1_5_0).String(): if !config.loadLegacyContracts { continue } @@ -1521,7 +1522,8 @@ func LoadChainState(ctx context.Context, chain cldf_evm.Chain, addresses map[str } state.EVM2EVMOnRamp[sCfg.DestChainSelector] = onRampC state.ABIByAddress[address] = evm_2_evm_onramp.EVM2EVMOnRampABI - case cldf.NewTypeAndVersion(ccipshared.OffRamp, deployment.Version1_5_0).String(): + case cldf.NewTypeAndVersion(ccipshared.OffRamp, deployment.Version1_5_0).String(), + cldf.NewTypeAndVersion(ccipshared.EVM2EVMOffRamp, deployment.Version1_5_0).String(): if !config.loadLegacyContracts { continue } diff --git a/deployment/ccip/shared/stateview/state_test.go b/deployment/ccip/shared/stateview/state_test.go index 17c9d7e1669..358ad70c088 100644 --- a/deployment/ccip/shared/stateview/state_test.go +++ b/deployment/ccip/shared/stateview/state_test.go @@ -2,6 +2,7 @@ package stateview_test import ( "crypto/ecdsa" + "math/big" "testing" "time" @@ -9,7 +10,11 @@ import ( chain_selectors "github.com/smartcontractkit/chain-selectors" "github.com/stretchr/testify/require" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_0/commit_store" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_0/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_0/evm_2_evm_onramp" "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_6_0/ccip_home" + onrampv16 "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_6_0/onramp" "github.com/smartcontractkit/chainlink-common/pkg/logger" capabilities_registry "github.com/smartcontractkit/chainlink-evm/gethwrappers/keystone/generated/capabilities_registry_1_1_0" "github.com/smartcontractkit/chainlink-evm/pkg/utils" @@ -44,6 +49,246 @@ func TestLoadChainState_MultipleFeeQuoters(t *testing.T) { require.Equal(t, deployment.Version1_2_0, *state.FeeQuoterVersion, "expected latest fee quoter version to be selected") } +func TestLoadChainState_LegacyV15EVM2EVMDatastoreKeys(t *testing.T) { + t.Parallel() + + srcSel := chain_selectors.TEST_90000001.Selector + dstSel := chain_selectors.TEST_90000002.Selector + + e, err := environment.New(t.Context(), + environment.WithEVMSimulated(t, []uint64{srcSel, dstSel}), + environment.WithLogger(logger.Test(t)), + ) + require.NoError(t, err) + + chain := e.BlockChains.EVMChains()[srcSel] + + _, tx, onRamp, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + chain.DeployerKey, chain.Client, + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: common.HexToAddress("0x1"), + ChainSelector: chain.Selector, + DestChainSelector: dstSel, + DefaultTxGasLimit: 10, + MaxNopFeesJuels: big.NewInt(10), + PrevOnRamp: common.Address{}, + RmnProxy: common.HexToAddress("0x2"), + TokenAdminRegistry: common.HexToAddress("0x3"), + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x4"), + MaxNumberOfTokensPerMsg: 0, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: common.HexToAddress("0x5"), + MaxDataBytes: 0, + MaxPerMsgGasLimit: 0, + DefaultTokenFeeUSDCents: 0, + DefaultTokenDestGasOverhead: 0, + EnforceOutOfOrder: false, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: big.NewInt(100), + Rate: big.NewInt(10), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{}, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{}, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + _, tx, cs, err := commit_store.DeployCommitStore( + chain.DeployerKey, chain.Client, commit_store.CommitStoreStaticConfig{ + ChainSelector: dstSel, + SourceChainSelector: srcSel, + OnRamp: common.HexToAddress("0x4"), + RmnProxy: common.HexToAddress("0x1"), + }) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + offRampStatic := evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + ChainSelector: dstSel, + SourceChainSelector: srcSel, + RmnProxy: common.HexToAddress("0x1"), + CommitStore: cs.Address(), + TokenAdminRegistry: common.HexToAddress("0x3"), + OnRamp: common.HexToAddress("0x4"), + } + rl := evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: big.NewInt(100), + Rate: big.NewInt(10), + } + _, tx, offRamp, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + chain.DeployerKey, chain.Client, offRampStatic, rl) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + evm2evmAddrs := map[string]cldf.TypeAndVersion{ + onRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.EVM2EVMOnRamp, deployment.Version1_5_0), + offRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.EVM2EVMOffRamp, deployment.Version1_5_0), + } + + legacyDisabled, err := stateview.LoadChainState(t.Context(), chain, evm2evmAddrs) + require.NoError(t, err) + require.Nil(t, legacyDisabled.EVM2EVMOnRamp) + require.Nil(t, legacyDisabled.EVM2EVMOffRamp) + + lcs, err := stateview.LoadChainState(t.Context(), chain, evm2evmAddrs, stateview.WithLoadLegacyContracts(true)) + require.NoError(t, err) + require.NotNil(t, lcs.EVM2EVMOnRamp[dstSel]) + require.Equal(t, onRamp.Address(), lcs.EVM2EVMOnRamp[dstSel].Address()) + require.NotNil(t, lcs.EVM2EVMOffRamp[srcSel]) + require.Equal(t, offRamp.Address(), lcs.EVM2EVMOffRamp[srcSel].Address()) + require.Equal(t, evm_2_evm_onramp.EVM2EVMOnRampABI, lcs.ABIByAddress[onRamp.Address().Hex()]) + require.Equal(t, evm_2_evm_offramp.EVM2EVMOffRampABI, lcs.ABIByAddress[offRamp.Address().Hex()]) + + legacyNamesAddrs := map[string]cldf.TypeAndVersion{ + onRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.OnRamp, deployment.Version1_5_0), + offRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.OffRamp, deployment.Version1_5_0), + } + byLegacyKeys, err := stateview.LoadChainState(t.Context(), chain, legacyNamesAddrs, stateview.WithLoadLegacyContracts(true)) + require.NoError(t, err) + require.Equal(t, onRamp.Address(), byLegacyKeys.EVM2EVMOnRamp[dstSel].Address()) + require.Equal(t, offRamp.Address(), byLegacyKeys.EVM2EVMOffRamp[srcSel].Address()) +} + +// TestLoadChainState_LegacyEVM2EVMAndV16OnRampCoexist verifies that a chain can list both v1.5 EVM2EVM ramps +// (loaded only with WithLoadLegacyContracts) and a v1.6 OnRamp in the address map without either clobbering the other. +func TestLoadChainState_LegacyEVM2EVMAndV16OnRampCoexist(t *testing.T) { + t.Parallel() + + srcSel := chain_selectors.TEST_90000001.Selector + dstSel := chain_selectors.TEST_90000002.Selector + + e, err := environment.New(t.Context(), + environment.WithEVMSimulated(t, []uint64{srcSel, dstSel}), + environment.WithLogger(logger.Test(t)), + ) + require.NoError(t, err) + + chain := e.BlockChains.EVMChains()[srcSel] + + _, tx, evmOnRamp, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + chain.DeployerKey, chain.Client, + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: common.HexToAddress("0x1"), + ChainSelector: chain.Selector, + DestChainSelector: dstSel, + DefaultTxGasLimit: 10, + MaxNopFeesJuels: big.NewInt(10), + PrevOnRamp: common.Address{}, + RmnProxy: common.HexToAddress("0x2"), + TokenAdminRegistry: common.HexToAddress("0x3"), + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x4"), + MaxNumberOfTokensPerMsg: 0, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: common.HexToAddress("0x5"), + MaxDataBytes: 0, + MaxPerMsgGasLimit: 0, + DefaultTokenFeeUSDCents: 0, + DefaultTokenDestGasOverhead: 0, + EnforceOutOfOrder: false, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: big.NewInt(100), + Rate: big.NewInt(10), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{}, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{}, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + _, tx, cs, err := commit_store.DeployCommitStore( + chain.DeployerKey, chain.Client, commit_store.CommitStoreStaticConfig{ + ChainSelector: dstSel, + SourceChainSelector: srcSel, + OnRamp: common.HexToAddress("0x4"), + RmnProxy: common.HexToAddress("0x1"), + }) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + offRampStatic := evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + ChainSelector: dstSel, + SourceChainSelector: srcSel, + RmnProxy: common.HexToAddress("0x1"), + CommitStore: cs.Address(), + TokenAdminRegistry: common.HexToAddress("0x3"), + OnRamp: common.HexToAddress("0x4"), + } + rl := evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: big.NewInt(100), + Rate: big.NewInt(10), + } + _, tx, evmOffRamp, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + chain.DeployerKey, chain.Client, offRampStatic, rl) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + v16Static := onrampv16.OnRampStaticConfig{ + ChainSelector: chain.Selector, + RmnRemote: common.HexToAddress("0xb0"), + NonceManager: common.HexToAddress("0xb1"), + TokenAdminRegistry: common.HexToAddress("0xb2"), + } + v16Dynamic := onrampv16.OnRampDynamicConfig{ + FeeQuoter: common.HexToAddress("0xb3"), + ReentrancyGuardEntered: false, + MessageInterceptor: common.Address{}, + FeeAggregator: common.HexToAddress("0xb4"), + AllowlistAdmin: common.HexToAddress("0xb5"), + } + v16DestArgs := []onrampv16.OnRampDestChainConfigArgs{ + { + DestChainSelector: dstSel, + Router: common.HexToAddress("0xb6"), + AllowlistEnabled: false, + }, + } + v16Addr, tx, _, err := onrampv16.DeployOnRamp(chain.DeployerKey, chain.Client, v16Static, v16Dynamic, v16DestArgs) + _, err = cldf.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + combined := map[string]cldf.TypeAndVersion{ + evmOnRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.EVM2EVMOnRamp, deployment.Version1_5_0), + evmOffRamp.Address().Hex(): cldf.NewTypeAndVersion(shared.EVM2EVMOffRamp, deployment.Version1_5_0), + v16Addr.Hex(): cldf.NewTypeAndVersion(shared.OnRamp, deployment.Version1_6_0), + } + + withoutLegacy, err := stateview.LoadChainState(t.Context(), chain, combined) + require.NoError(t, err) + require.Nil(t, withoutLegacy.EVM2EVMOnRamp) + require.Nil(t, withoutLegacy.EVM2EVMOffRamp) + require.NotNil(t, withoutLegacy.OnRamp) + require.Equal(t, v16Addr, withoutLegacy.OnRamp.Address()) + + withLegacy, err := stateview.LoadChainState(t.Context(), chain, combined, stateview.WithLoadLegacyContracts(true)) + require.NoError(t, err) + require.NotNil(t, withLegacy.OnRamp) + require.Equal(t, v16Addr, withLegacy.OnRamp.Address()) + require.NotNil(t, withLegacy.EVM2EVMOnRamp[dstSel]) + require.Equal(t, evmOnRamp.Address(), withLegacy.EVM2EVMOnRamp[dstSel].Address()) + require.NotNil(t, withLegacy.EVM2EVMOffRamp[srcSel]) + require.Equal(t, evmOffRamp.Address(), withLegacy.EVM2EVMOffRamp[srcSel].Address()) +} + func TestSmokeState(t *testing.T) { tenv, _ := testhelpers.NewMemoryEnvironment(t, testhelpers.WithNumOfChains(3)) state, err := stateview.LoadOnchainState(tenv.Env) diff --git a/deployment/ccip/shared/types.go b/deployment/ccip/shared/types.go index 1a080e96e96..ba5be30fee2 100644 --- a/deployment/ccip/shared/types.go +++ b/deployment/ccip/shared/types.go @@ -9,6 +9,9 @@ var ( CommitStore deployment.ContractType = "CommitStore" PriceRegistry deployment.ContractType = "PriceRegistry" RMN deployment.ContractType = "RMN" + // Legacy v1.5 datastore aliases (same bindings as OnRamp/OffRamp @ v1.5.0; matches on-chain typeAndVersion). + EVM2EVMOnRamp deployment.ContractType = "EVM2EVMOnRamp" + EVM2EVMOffRamp deployment.ContractType = "EVM2EVMOffRamp" // Not legacy MockRMN deployment.ContractType = "MockRMN" diff --git a/tools/bin/go_core_deployment_tests b/tools/bin/go_deployment_tests similarity index 68% rename from tools/bin/go_core_deployment_tests rename to tools/bin/go_deployment_tests index 46c420f7f59..5a299db9e2e 100755 --- a/tools/bin/go_core_deployment_tests +++ b/tools/bin/go_deployment_tests @@ -6,6 +6,8 @@ SCRIPT_PATH=`dirname "$0"`; SCRIPT_PATH=`eval "cd \"$SCRIPT_PATH\" && pwd"` OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} JUNIT_FILE=${JUNIT_FILE:-"./junit.xml"} GO_TEST_TIMEOUT=${GO_TEST_TIMEOUT:-"15m"} +GO_TEST_SHARD_COUNT=${GO_TEST_SHARD_COUNT:?GO_TEST_SHARD_COUNT must be set} +GO_TEST_SHARD_INDEX=${GO_TEST_SHARD_INDEX:?GO_TEST_SHARD_INDEX must be set} # Adjusting freeport port block size for tests. export CL_RESERVE_PORTS=128 @@ -40,6 +42,30 @@ echo "Using GO_TEST_FLAGS: $GO_TEST_FLAGS" echo "Using RERUN_FLAGS: $RERUN_FLAGS" echo "Using JUNIT_FLAG: $JUNIT_FLAG" +TEST_PACKAGE_OUTPUT=$(go list -f '{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./... | sed '/^$/d') +if [[ $? -ne 0 ]]; then + exit 1 +fi +if [[ -z "$TEST_PACKAGE_OUTPUT" ]]; then + echo "No deployment test packages found." + exit 1 +fi +mapfile -t TEST_PACKAGES <<<"$TEST_PACKAGE_OUTPUT" + +SHARD_PACKAGE_OUTPUT=$(printf '%s\n' "${TEST_PACKAGES[@]}" | go run github.com/smartcontractkit/chainlink/v2/tools/ci-testshard list --shard-count "$GO_TEST_SHARD_COUNT" --shard-index "$GO_TEST_SHARD_INDEX") +if [[ $? -ne 0 ]]; then + exit 1 +fi +if [[ -z "$SHARD_PACKAGE_OUTPUT" ]]; then + echo "No packages assigned to shard ${GO_TEST_SHARD_INDEX}/${GO_TEST_SHARD_COUNT}." + exit 0 +fi +mapfile -t SHARD_PACKAGES <<<"$SHARD_PACKAGE_OUTPUT" + +echo "Using GO_TEST_SHARD_COUNT: $GO_TEST_SHARD_COUNT" +echo "Using GO_TEST_SHARD_INDEX: $GO_TEST_SHARD_INDEX" +echo "Shard package count: ${#SHARD_PACKAGES[@]}" + echo "Test execution results: ---------------------" echo "" @@ -49,11 +75,10 @@ gotestsum \ --format='standard-quiet' \ --hide-summary=skipped \ $RERUN_FLAGS \ - --packages='./...' \ --jsonfile "$OUTPUT_FILE" \ --post-run-command "sh -c \"echo; echo 'Slowest tests'; echo '----------------------------------------------'; echo; gotestsum tool slowest --num 20 --jsonfile ${OUTPUT_FILE}\"" \ "$JUNIT_FLAG" \ - -- $GO_TEST_FLAGS + -- $GO_TEST_FLAGS "${SHARD_PACKAGES[@]}" EXITCODE=${PIPESTATUS[0]} diff --git a/tools/ci-testshard/main.go b/tools/ci-testshard/main.go new file mode 100644 index 00000000000..1541e43d700 --- /dev/null +++ b/tools/ci-testshard/main.go @@ -0,0 +1,173 @@ +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "hash/fnv" + "io" + "os" + "strings" +) + +func main() { + if err := run(os.Args[1:], os.Stdin, os.Stdout); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func run(args []string, stdin io.Reader, stdout io.Writer) error { + if len(args) == 0 { + return usageError("expected subcommand: list or verify") + } + + switch args[0] { + case "list": + return runList(args[1:], stdin, stdout) + case "verify": + return runVerify(args[1:], stdin, stdout) + case "-h", "--help", "help": + printUsage(stdout) + return nil + default: + return usageError("unknown subcommand %q", args[0]) + } +} + +func runList(args []string, stdin io.Reader, stdout io.Writer) error { + fs := flag.NewFlagSet("list", flag.ContinueOnError) + fs.SetOutput(io.Discard) + + shardCount := fs.Int("shard-count", 1, "total number of shards") + shardIndex := fs.Int("shard-index", 0, "zero-based shard index") + + if err := fs.Parse(args); err != nil { + return usageError("%v", err) + } + if fs.NArg() != 0 { + return usageError("list takes no positional arguments") + } + + packages, err := readPackages(stdin) + if err != nil { + return err + } + if err := validateShardArgs(*shardCount, *shardIndex); err != nil { + return err + } + + for _, pkg := range packages { + if shardForPackage(pkg, *shardCount) == *shardIndex { + if _, err := fmt.Fprintln(stdout, pkg); err != nil { + return err + } + } + } + + return nil +} + +func runVerify(args []string, stdin io.Reader, stdout io.Writer) error { + fs := flag.NewFlagSet("verify", flag.ContinueOnError) + fs.SetOutput(io.Discard) + + shardCount := fs.Int("shard-count", 1, "total number of shards") + + if err := fs.Parse(args); err != nil { + return usageError("%v", err) + } + if fs.NArg() != 0 { + return usageError("verify takes no positional arguments") + } + + packages, err := readPackages(stdin) + if err != nil { + return err + } + if *shardCount < 1 { + return fmt.Errorf("invalid --shard-count %d: must be >= 1", *shardCount) + } + + shardSizes := make([]int, *shardCount) + seen := make(map[string]int, len(packages)) + for _, pkg := range packages { + shardIndex := shardForPackage(pkg, *shardCount) + shardSizes[shardIndex]++ + seen[pkg]++ + } + + for _, pkg := range packages { + if seen[pkg] != 1 { + return fmt.Errorf("package %q assigned %d times", pkg, seen[pkg]) + } + } + + if _, err := fmt.Fprintf(stdout, "verified %d packages across %d shards\n", len(packages), *shardCount); err != nil { + return err + } + for shardIndex, size := range shardSizes { + if _, err := fmt.Fprintf(stdout, "shard %d: %d packages\n", shardIndex, size); err != nil { + return err + } + } + + return nil +} + +func readPackages(r io.Reader) ([]string, error) { + scanner := bufio.NewScanner(r) + packages := make([]string, 0) + seen := make(map[string]struct{}) + + for scanner.Scan() { + pkg := strings.TrimSpace(scanner.Text()) + if pkg == "" { + continue + } + + if _, exists := seen[pkg]; exists { + return nil, fmt.Errorf("duplicate package path %q", pkg) + } + seen[pkg] = struct{}{} + packages = append(packages, pkg) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + if len(packages) == 0 { + return nil, errors.New("no package paths provided on stdin") + } + + return packages, nil +} + +func validateShardArgs(shardCount, shardIndex int) error { + if shardCount < 1 { + return fmt.Errorf("invalid --shard-count %d: must be >= 1", shardCount) + } + if shardIndex < 0 || shardIndex >= shardCount { + return fmt.Errorf("invalid --shard-index %d: must be in [0,%d)", shardIndex, shardCount) + } + return nil +} + +func shardForPackage(pkg string, shardCount int) int { + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(pkg)) // hash.Hash.Write on fnv (Fowler-Noll-Vo) never returns an error + return int(int64(hasher.Sum32()) % int64(shardCount)) +} + +func printUsage(w io.Writer) { + fmt.Fprintln(w, "usage: ci-testshard [flags]") + fmt.Fprintln(w, "") + fmt.Fprintln(w, "Commands:") + fmt.Fprintln(w, " list read newline-delimited package paths from stdin and emit one shard") + fmt.Fprintln(w, " verify read newline-delimited package paths from stdin and verify shard coverage") +} + +func usageError(format string, args ...any) error { + return fmt.Errorf(format, args...) +} diff --git a/tools/ci-testshard/main_test.go b/tools/ci-testshard/main_test.go new file mode 100644 index 00000000000..5f820d85ddb --- /dev/null +++ b/tools/ci-testshard/main_test.go @@ -0,0 +1,271 @@ +package main + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "testing" +) + +func TestReadPackagesRejectsDuplicatePaths(t *testing.T) { + _, err := readPackages(strings.NewReader("pkg/a\npkg/a\n")) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package error, got %v", err) + } +} + +func TestReadPackagesRejectsEmptyInput(t *testing.T) { + _, err := readPackages(strings.NewReader("\n\n")) + if err == nil || !strings.Contains(err.Error(), "no package paths provided on stdin") { + t.Fatalf("expected empty input error, got %v", err) + } +} + +func TestReadPackagesTrimsWhitespace(t *testing.T) { + packages, err := readPackages(strings.NewReader(" pkg/a \n\tpkg/b\t\n")) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 2 || packages[0] != "pkg/a" || packages[1] != "pkg/b" { + t.Fatalf("unexpected packages: %#v", packages) + } +} + +func TestReadPackagesIgnoresBlankLinesBetweenPackages(t *testing.T) { + packages, err := readPackages(strings.NewReader("pkg/a\n\n \n\t\npkg/b\n")) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 2 || packages[0] != "pkg/a" || packages[1] != "pkg/b" { + t.Fatalf("unexpected packages: %#v", packages) + } +} + +func TestReadPackagesRejectsDuplicatePathsAfterTrimming(t *testing.T) { + _, err := readPackages(strings.NewReader("pkg/a\n pkg/a \n")) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package error after trimming, got %v", err) + } +} + +func TestListReturnsPartitionWithoutOverlap(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\npkg/d\n" + + seen := make(map[string]struct{}) + for shardIndex := 0; shardIndex < 4; shardIndex++ { + packages := runListForTest(t, input, 4, shardIndex) + for _, pkg := range packages { + if _, exists := seen[pkg]; exists { + t.Fatalf("package %s appeared in multiple shards", pkg) + } + seen[pkg] = struct{}{} + } + } + + for _, pkg := range []string{"pkg/a", "pkg/b", "pkg/c", "pkg/d"} { + if _, exists := seen[pkg]; !exists { + t.Fatalf("package %s missing from shard union", pkg) + } + } +} + +func TestListWithSingleShardReturnsEntireInputInOrder(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\n" + packages := runListForTest(t, input, 1, 0) + want := []string{"pkg/a", "pkg/b", "pkg/c"} + if len(packages) != len(want) { + t.Fatalf("unexpected package count: got %d want %d (%v)", len(packages), len(want), packages) + } + for i := range want { + if packages[i] != want[i] { + t.Fatalf("unexpected package at %d: got %q want %q", i, packages[i], want[i]) + } + } +} + +func TestListProducesDeterministicOutput(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\npkg/d\npkg/e\n" + first := runListOutputForTest(t, input, 4, 2) + second := runListOutputForTest(t, input, 4, 2) + if first != second { + t.Fatalf("list output changed between runs:\nfirst:\n%s\nsecond:\n%s", first, second) + } +} + +func TestListCanProduceEmptyShard(t *testing.T) { + input := "pkg/a\npkg/b\n" + foundEmpty := false + for shardIndex := 0; shardIndex < 10; shardIndex++ { + if output := runListOutputForTest(t, input, 10, shardIndex); output == "" { + foundEmpty = true + break + } + } + if !foundEmpty { + t.Fatal("expected at least one empty shard for 2 packages across 10 shards") + } +} + +func TestListAndVerifyAgreeOnPartition(t *testing.T) { + inputPackages := []string{ + "pkg/a", + "pkg/b", + "pkg/c", + "pkg/d", + "pkg/e", + "pkg/f", + } + input := strings.Join(inputPackages, "\n") + "\n" + seen := make(map[string]struct{}, len(inputPackages)) + + for shardIndex := 0; shardIndex < 4; shardIndex++ { + for _, pkg := range runListForTest(t, input, 4, shardIndex) { + if _, exists := seen[pkg]; exists { + t.Fatalf("package %s appeared in multiple shards", pkg) + } + seen[pkg] = struct{}{} + } + } + + for _, pkg := range inputPackages { + if _, exists := seen[pkg]; !exists { + t.Fatalf("package %s missing from shard union", pkg) + } + } + + var stdout bytes.Buffer + if err := run([]string{"verify", "--shard-count", "4"}, strings.NewReader(input), &stdout); err != nil { + t.Fatalf("verify failed: %v", err) + } +} + +func TestVerifyAllowsEmptyShard(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "10"}, strings.NewReader("pkg/a\npkg/b\n"), &stdout) + if err != nil { + t.Fatalf("verify failed: %v", err) + } + if !strings.Contains(stdout.String(), "verified 2 packages across 10 shards") { + t.Fatalf("unexpected verify output: %q", stdout.String()) + } +} + +func TestVerifyWithSingleShardCoversEntireInput(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "1"}, strings.NewReader("pkg/a\npkg/b\npkg/c\n"), &stdout) + if err != nil { + t.Fatalf("verify failed: %v", err) + } + + output := stdout.String() + if !strings.Contains(output, "verified 3 packages across 1 shards") { + t.Fatalf("unexpected verify summary: %q", output) + } + if !strings.Contains(output, "shard 0: 3 packages") { + t.Fatalf("unexpected shard coverage: %q", output) + } +} + +func TestVerifyRejectsDuplicatePaths(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "2"}, strings.NewReader("pkg/a\npkg/a\n"), &stdout) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package failure, got %v", err) + } +} + +func TestVerifyRejectsDuplicatePathsAmongOthers(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "2"}, strings.NewReader("pkg/a\npkg/b\npkg/c\npkg/d\npkg/e\npkg/a\n"), &stdout) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package failure, got %v", err) + } +} + +func TestInvalidShardParamsFail(t *testing.T) { + tests := []struct { + name string + args []string + }{ + {name: "zero-count", args: []string{"list", "--shard-count", "0", "--shard-index", "0"}}, + {name: "negative-index", args: []string{"list", "--shard-count", "2", "--shard-index", "-1"}}, + {name: "index-out-of-range", args: []string{"list", "--shard-count", "2", "--shard-index", "2"}}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := run(tc.args, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil { + t.Fatal("expected error") + } + }) + } +} + +func TestUnknownSubcommandFails(t *testing.T) { + err := run([]string{"wat"}, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil || !strings.Contains(err.Error(), `unknown subcommand "wat"`) { + t.Fatalf("expected unknown subcommand error, got %v", err) + } +} + +func TestExtraPositionalArgsFail(t *testing.T) { + tests := []struct { + name string + args []string + want string + }{ + {name: "list", args: []string{"list", "--shard-count", "2", "--shard-index", "0", "extra"}, want: "list takes no positional arguments"}, + {name: "verify", args: []string{"verify", "--shard-count", "2", "extra"}, want: "verify takes no positional arguments"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := run(tc.args, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil || !strings.Contains(err.Error(), tc.want) { + t.Fatalf("expected %q, got %v", tc.want, err) + } + }) + } +} + +func TestLargePackageListParses(t *testing.T) { + var builder strings.Builder + for i := 0; i < 500; i++ { + fmt.Fprintf(&builder, "pkg/%03d\n", i) + } + + packages, err := readPackages(strings.NewReader(builder.String())) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 500 { + t.Fatalf("unexpected package count: got %d want 500", len(packages)) + } + if packages[0] != "pkg/000" || packages[499] != "pkg/499" { + t.Fatalf("unexpected package boundaries: first=%q last=%q", packages[0], packages[499]) + } +} + +func runListForTest(t *testing.T, input string, shardCount, shardIndex int) []string { + t.Helper() + output := runListOutputForTest(t, input, shardCount, shardIndex) + if output == "" { + return nil + } + return strings.Fields(output) +} + +func runListOutputForTest(t *testing.T, input string, shardCount, shardIndex int) string { + t.Helper() + var stdout bytes.Buffer + if err := run( + []string{"list", "--shard-count", strconv.Itoa(shardCount), "--shard-index", strconv.Itoa(shardIndex)}, + strings.NewReader(input), + &stdout, + ); err != nil { + t.Fatalf("list failed for shard %d/%d: %v", shardIndex, shardCount, err) + } + return stdout.String() +}