diff --git a/libbitcoinkernel-sys/bitcoin/.github/ci-lint-exec.py b/libbitcoinkernel-sys/bitcoin/.github/ci-lint-exec.py deleted file mode 100755 index 73749acc..00000000 --- a/libbitcoinkernel-sys/bitcoin/.github/ci-lint-exec.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or https://opensource.org/license/mit. - -import os -import shlex -import subprocess -import sys -import time - - -def run(cmd, **kwargs): - print("+ " + shlex.join(cmd), flush=True) - kwargs.setdefault("check", True) - try: - return subprocess.run(cmd, **kwargs) - except Exception as e: - sys.exit(e) - - -def main(): - CONTAINER_NAME = os.environ["CONTAINER_NAME"] - - build_cmd = [ - "docker", "buildx", "build", - f"--tag={CONTAINER_NAME}", - *shlex.split(os.getenv("DOCKER_BUILD_CACHE_ARG", "")), - "--file=./ci/lint_imagefile", - "." - ] - - if run(build_cmd, check=False).returncode != 0: - print("Retry building image tag after failure") - time.sleep(3) - run(build_cmd) - - extra_env = [] - if os.environ["GITHUB_EVENT_NAME"] == "pull_request": - extra_env = ["--env", "LINT_CI_IS_PR=1"] - if os.environ["GITHUB_EVENT_NAME"] != "pull_request" and os.environ["GITHUB_REPOSITORY"] == "bitcoin/bitcoin": - extra_env = ["--env", "LINT_CI_SANITY_CHECK_COMMIT_SIG=1"] - - run([ - "docker", - "run", - "--rm", - *extra_env, - f"--volume={os.getcwd()}:/bitcoin", - CONTAINER_NAME, - ]) - - -if __name__ == "__main__": - main() diff --git a/libbitcoinkernel-sys/bitcoin/.github/ci-test-each-commit-exec.py b/libbitcoinkernel-sys/bitcoin/.github/ci-test-each-commit-exec.py index b81241bc..aed1526b 100755 --- a/libbitcoinkernel-sys/bitcoin/.github/ci-test-each-commit-exec.py +++ b/libbitcoinkernel-sys/bitcoin/.github/ci-test-each-commit-exec.py @@ -10,10 +10,11 @@ def run(cmd, **kwargs): print("+ " + shlex.join(cmd), flush=True) + kwargs.setdefault("check", True) try: - return subprocess.run(cmd, check=True, **kwargs) + return subprocess.run(cmd, **kwargs) except Exception as e: - sys.exit(e) + sys.exit(str(e)) def main(): @@ -37,12 +38,16 @@ def main(): "-DAPPEND_CXXFLAGS='-O3 -g2'", "-DAPPEND_CFLAGS='-O3 -g2'", "-DCMAKE_BUILD_TYPE=Debug", - "-DWERROR=ON", + "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON", "--preset=dev-mode", # Tolerate unused member functions in intermediate commits in a pull request "-DCMAKE_CXX_FLAGS=-Wno-error=unused-member-function", ]) - run(["cmake", "--build", build_dir, "-j", str(num_procs)]) + + if run(["cmake", "--build", build_dir, "-j", str(num_procs)], check=False).returncode != 0: + print("Build failure. Verbose build follows.") + run(["cmake", "--build", build_dir, "-j1", "--verbose"]) + run([ "ctest", "--output-on-failure", @@ -57,6 +62,7 @@ def main(): f"./{build_dir}/test/functional/test_runner.py", "-j", str(num_procs * 2), + "--failfast", "--combinedlogslen=99999999", ]) diff --git a/libbitcoinkernel-sys/bitcoin/.github/ci-windows-cross.py b/libbitcoinkernel-sys/bitcoin/.github/ci-windows-cross.py new file mode 100755 index 00000000..13ca3b49 --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/.github/ci-windows-cross.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +import argparse +import os +import shlex +import subprocess +import sys +from pathlib import Path + + +def run(cmd, **kwargs): + print("+ " + shlex.join(cmd), flush=True) + kwargs.setdefault("check", True) + try: + return subprocess.run(cmd, **kwargs) + except Exception as e: + sys.exit(str(e)) + + +def print_version(): + bitcoind = Path.cwd() / "bin" / "bitcoind.exe" + run([str(bitcoind), "-version"]) + + +def check_manifests(): + release_dir = Path.cwd() / "bin" + manifest_path = release_dir / "bitcoind.manifest" + + cmd_bitcoind_manifest = [ + "mt.exe", + "-nologo", + f"-inputresource:{release_dir / 'bitcoind.exe'}", + f"-out:{manifest_path}", + ] + run(cmd_bitcoind_manifest) + print(manifest_path.read_text()) + + skipped = { # Skip as they currently do not have manifests + "fuzz.exe", + "bench_bitcoin.exe", + "test_kernel.exe", + } + for entry in release_dir.iterdir(): + if entry.suffix.lower() != ".exe": + continue + if entry.name in skipped: + print(f"Skipping {entry.name} (no manifest present)") + continue + print(f"Checking {entry.name}") + run(["mt.exe", "-nologo", f"-inputresource:{entry}", "-validate_manifest"]) + + +def prepare_tests(): + workspace = Path.cwd() + config_path = workspace / "test" / "config.ini" + rpcauth_path = workspace / "share" / "rpcauth" / "rpcauth.py" + replacements = { + "SRCDIR=": f"SRCDIR={workspace}", + "BUILDDIR=": f"BUILDDIR={workspace}", + "RPCAUTH=": f"RPCAUTH={rpcauth_path}", + } + lines = config_path.read_text().splitlines() + for index, line in enumerate(lines): + for prefix, new_value in replacements.items(): + if line.startswith(prefix): + lines[index] = new_value + break + content = "\n".join(lines) + "\n" + config_path.write_text(content) + print(content) + previous_releases_dir = Path(os.environ["PREVIOUS_RELEASES_DIR"]) + cmd_download_prev_rel = [ + sys.executable, + str(workspace / "test" / "get_previous_releases.py"), + "--target-dir", + str(previous_releases_dir), + ] + run(cmd_download_prev_rel) + run([sys.executable, "-m", "pip", "install", "pyzmq"]) + + +def run_functional_tests(): + workspace = Path.cwd() + num_procs = str(os.process_cpu_count()) + test_runner_cmd = [ + sys.executable, + str(workspace / "test" / "functional" / "test_runner.py"), + "--jobs", + num_procs, + "--quiet", + f"--tmpdirprefix={workspace}", + "--combinedlogslen=99999999", + *shlex.split(os.environ.get("TEST_RUNNER_EXTRA", "").strip()), + # feature_unsupported_utxo_db.py fails on Windows because of emojis in the test data directory. + "--exclude", + "feature_unsupported_utxo_db.py", + # See https://github.com/bitcoin/bitcoin/issues/31409. + "--exclude", + "wallet_multiwallet.py", + ] + run(test_runner_cmd) + + # Run feature_unsupported_utxo_db sequentially in ASCII-only tmp dir, + # because it is excluded above due to lack of UTF-8 support in the + # ancient release. + cmd_feature_unsupported_db = [ + sys.executable, + str(workspace / "test" / "functional" / "feature_unsupported_utxo_db.py"), + "--previous-releases", + "--tmpdir", + str(Path(workspace) / "test_feature_unsupported_utxo_db"), + ] + run(cmd_feature_unsupported_db) + + +def run_unit_tests(): + # Can't use ctest here like other jobs as we don't have a CMake build tree. + commands = [ + ["./bin/test_bitcoin-qt.exe"], + # Intentionally run sequentially here, to catch test case failures caused by dirty global state from prior test cases: + ["./bin/test_bitcoin.exe", "-l", "test_suite"], + ["./src/secp256k1/bin/exhaustive_tests.exe"], + ["./src/secp256k1/bin/noverify_tests.exe"], + ["./src/secp256k1/bin/tests.exe"], + ["./src/univalue/object.exe"], + ["./src/univalue/unitester.exe"], + ] + for cmd in commands: + run(cmd) + + +def main(): + parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.") + steps = [ + "print_version", + "check_manifests", + "prepare_tests", + "run_unit_tests", + "run_functional_tests", + ] + parser.add_argument("step", choices=steps, help="CI step to perform.") + args = parser.parse_args() + + os.environ.setdefault( + "PREVIOUS_RELEASES_DIR", + str(Path.cwd() / "previous_releases"), + ) + + if args.step == "print_version": + print_version() + elif args.step == "check_manifests": + check_manifests() + elif args.step == "prepare_tests": + prepare_tests() + elif args.step == "run_unit_tests": + run_unit_tests() + elif args.step == "run_functional_tests": + run_functional_tests() + + +if __name__ == "__main__": + main() diff --git a/libbitcoinkernel-sys/bitcoin/.github/ci-windows.py b/libbitcoinkernel-sys/bitcoin/.github/ci-windows.py new file mode 100755 index 00000000..caa2d52c --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/.github/ci-windows.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +import argparse +import os +import shlex +import subprocess +import sys +from pathlib import Path + + +def run(cmd, **kwargs): + print("+ " + shlex.join(cmd), flush=True) + kwargs.setdefault("check", True) + try: + return subprocess.run(cmd, **kwargs) + except Exception as e: + sys.exit(str(e)) + + +GENERATE_OPTIONS = { + "standard": [ + "-DBUILD_BENCH=ON", + "-DBUILD_KERNEL_LIB=ON", + "-DBUILD_UTIL_CHAINSTATE=ON", + "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON", + ], + "fuzz": [ + "-DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON", + "-DVCPKG_MANIFEST_FEATURES=wallet", + "-DBUILD_GUI=OFF", + "-DWITH_ZMQ=OFF", + "-DBUILD_FOR_FUZZING=ON", + "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON", + ], +} + + +def generate(ci_type): + command = [ + "cmake", + "-B", + "build", + "-Werror=dev", + "--preset", + "vs2026", + ] + GENERATE_OPTIONS[ci_type] + run(command) + + +def build(): + command = [ + "cmake", + "--build", + "build", + "--config", + "Release", + ] + if run(command + ["-j", str(os.process_cpu_count())], check=False).returncode != 0: + print("Build failure. Verbose build follows.") + run(command + ["-j1", "--verbose"]) + + +def check_manifests(ci_type): + if ci_type != "standard": + print(f"Skipping manifest validation for '{ci_type}' ci type.") + return + + release_dir = Path.cwd() / "build" / "bin" / "Release" + manifest_path = release_dir / "bitcoind.manifest" + cmd_bitcoind_manifest = [ + "mt.exe", + "-nologo", + f"-inputresource:{release_dir / 'bitcoind.exe'}", + f"-out:{manifest_path}", + ] + run(cmd_bitcoind_manifest) + print(manifest_path.read_text()) + + skips = { # Skip as they currently do not have manifests + "fuzz.exe", + "bench_bitcoin.exe", + "test_bitcoin-qt.exe", + "test_kernel.exe", + "bitcoin-chainstate.exe", + } + for entry in release_dir.iterdir(): + if entry.suffix.lower() != ".exe": + continue + if entry.name in skips: + print(f"Skipping {entry.name} (no manifest present)") + continue + print(f"Checking {entry.name}") + cmd_check_manifest = [ + "mt.exe", + "-nologo", + f"-inputresource:{entry}", + "-validate_manifest", + ] + run(cmd_check_manifest) + + +def prepare_tests(ci_type): + if ci_type == "standard": + run([sys.executable, "-m", "pip", "install", "pyzmq"]) + elif ci_type == "fuzz": + repo_dir = str(Path.cwd() / "qa-assets") + clone_cmd = [ + "git", + "clone", + "--depth=1", + "https://github.com/bitcoin-core/qa-assets", + repo_dir, + ] + run(clone_cmd) + print("Using qa-assets repo from commit ...") + run(["git", "-C", repo_dir, "log", "-1"]) + + +def run_tests(ci_type): + build_dir = Path.cwd() / "build" + num_procs = str(os.process_cpu_count()) + release_bin = build_dir / "bin" / "Release" + + if ci_type == "standard": + test_envs = { + "BITCOIN_BIN": "bitcoin.exe", + "BITCOIND": "bitcoind.exe", + "BITCOINCLI": "bitcoin-cli.exe", + "BITCOIN_BENCH": "bench_bitcoin.exe", + "BITCOINTX": "bitcoin-tx.exe", + "BITCOINUTIL": "bitcoin-util.exe", + "BITCOINWALLET": "bitcoin-wallet.exe", + "BITCOINCHAINSTATE": "bitcoin-chainstate.exe", + } + for var, exe in test_envs.items(): + os.environ[var] = str(release_bin / exe) + + ctest_cmd = [ + "ctest", + "--test-dir", + str(build_dir), + "--output-on-failure", + "--stop-on-failure", + "-j", + num_procs, + "--build-config", + "Release", + ] + run(ctest_cmd) + + test_cmd = [ + sys.executable, + str(build_dir / "test" / "functional" / "test_runner.py"), + "--jobs", + num_procs, + "--quiet", + f"--tmpdirprefix={Path.cwd()}", + "--combinedlogslen=99999999", + *shlex.split(os.environ.get("TEST_RUNNER_EXTRA", "").strip()), + ] + run(test_cmd) + + elif ci_type == "fuzz": + os.environ["BITCOINFUZZ"] = str(release_bin / "fuzz.exe") + fuzz_cmd = [ + sys.executable, + str(build_dir / "test" / "fuzz" / "test_runner.py"), + "--par", + num_procs, + "--loglevel", + "DEBUG", + str(Path.cwd() / "qa-assets" / "fuzz_corpora"), + ] + run(fuzz_cmd) + + +def main(): + parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.") + parser.add_argument("ci_type", choices=GENERATE_OPTIONS, help="CI type to run.") + steps = [ + "generate", + "build", + "check_manifests", + "prepare_tests", + "run_tests", + ] + parser.add_argument("step", choices=steps, help="CI step to perform.") + args = parser.parse_args() + + if args.step == "generate": + generate(args.ci_type) + elif args.step == "build": + build() + elif args.step == "check_manifests": + check_manifests(args.ci_type) + elif args.step == "prepare_tests": + prepare_tests(args.ci_type) + elif args.step == "run_tests": + run_tests(args.ci_type) + + +if __name__ == "__main__": + main() diff --git a/libbitcoinkernel-sys/bitcoin/.github/workflows/ci.yml b/libbitcoinkernel-sys/bitcoin/.github/workflows/ci.yml index f54e0661..ebd0986b 100644 --- a/libbitcoinkernel-sys/bitcoin/.github/workflows/ci.yml +++ b/libbitcoinkernel-sys/bitcoin/.github/workflows/ci.yml @@ -31,7 +31,7 @@ defaults: jobs: runners: name: '[meta] determine runners' - runs-on: ubuntu-latest + runs-on: ubuntu-slim outputs: provider: ${{ steps.runners.outputs.provider }} steps: @@ -56,12 +56,11 @@ jobs: fi test-each-commit: - name: 'test max 6 ancestor commits' - runs-on: ubuntu-24.04 + name: 'test ancestor commits' + needs: runners + runs-on: ${{ needs.runners.outputs.provider == 'cirrus' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' || 'ubuntu-24.04' }} if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 - timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. - env: - MAX_COUNT: 6 # Keep in sync with name above + timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" @@ -72,25 +71,35 @@ jobs: fetch-depth: ${{ env.FETCH_DEPTH }} - name: Determine commit range run: | - # Checkout HEAD~ and find the test base commit - # Checkout HEAD~ because it would be wasteful to rerun tests on the PR - # head commit that are already run by other jobs. + # Checkout HEAD~ and find the test base commit. + # Checkout HEAD~ because it would be wasteful to rerun + # tests on the PR head commit that are already run + # by other jobs. git checkout HEAD~ - # Figure out test base commit by listing ancestors of HEAD, excluding - # ancestors of the most recent merge commit, limiting the list to the - # newest MAX_COUNT ancestors, ordering it from oldest to newest, and - # taking the first one. + # Moreover, pull requests that contain a merge commit + # are generally draft pull requests that merge in other + # pull requests, so only check the relevant commits + # after the last merge commit. A merge commit could + # also be a subtree merge commit, which may be + # worthwhile to check. However, it is rare that the + # subtree merge commit is not the top commit (which + # would be skipped anyway by this task, because it is + # run by all other tasks). Also, `git rebase --exec` + # does not work on merge commits, so if this was + # important to check, the logic would have to be + # rewritten. # - # If the branch contains up to MAX_COUNT ancestor commits after the - # most recent merge commit, all of those commits will be tested. If it - # contains more, only the most recent MAX_COUNT commits will be - # tested. + # Figure out test base commit by listing ancestors of + # HEAD, excluding ancestors of the most recent merge + # commit, ordering them from oldest to newest, and + # taking the first one. # - # In the command below, the ^@ suffix is used to refer to all parents - # of the merge commit as described in: + # In the command below, the ^@ suffix is used to refer + # to all parents of the merge commit as described in: # https://git-scm.com/docs/git-rev-parse#_other_rev_parent_shorthand_notations - # and the ^ prefix is used to exclude these parents and all their - # ancestors from the rev-list output as described in: + # and the ^ prefix is used to exclude these parents + # and all their ancestors from the rev-list output + # as described in: # https://git-scm.com/docs/git-rev-list MERGE_BASE=$(git rev-list -n1 --merges HEAD) EXCLUDE_MERGE_BASE_ANCESTORS= @@ -98,7 +107,7 @@ jobs: if test -n "$MERGE_BASE"; then EXCLUDE_MERGE_BASE_ANCESTORS=^${MERGE_BASE}^@ fi - echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV" + echo "TEST_BASE=$(git rev-list -n${{ github.event.pull_request.commits }} --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV" - run: | git fetch origin "${GITHUB_BASE_REF}" git config user.email "ci@example.com" @@ -168,7 +177,7 @@ jobs: run: | # A workaround for "The `brew link` step did not complete successfully" error. brew install --quiet python@3 || brew link --overwrite python@3 - brew install --quiet coreutils ninja pkgconf gnu-getopt ccache boost libevent zeromq qt@6 qrencode capnp + brew install --quiet coreutils ninja pkgconf ccache boost libevent zeromq qt@6 qrencode capnp - name: Set Ccache directory run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV" @@ -204,7 +213,7 @@ jobs: windows-native-dll: name: ${{ matrix.job-name }} - runs-on: windows-2022 + runs-on: windows-2025-vs2026 if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} @@ -218,11 +227,9 @@ jobs: job-type: [standard, fuzz] include: - job-type: standard - generate-options: '-DBUILD_BENCH=ON -DBUILD_KERNEL_LIB=ON -DBUILD_UTIL_CHAINSTATE=ON -DWERROR=ON' - job-name: 'Windows native, VS 2022' + job-name: 'Windows native, VS' - job-type: fuzz - generate-options: '-DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet" -DBUILD_GUI=OFF -DWITH_ZMQ=OFF -DBUILD_FOR_FUZZING=ON -DWERROR=ON' - job-name: 'Windows native, fuzz, VS 2022' + job-name: 'Windows native, fuzz, VS' steps: - *ANNOTATION_PR_NUMBER @@ -257,11 +264,17 @@ jobs: # Workaround for libevent, which requires CMake 3.1 but is incompatible with CMake >= 4.0. sed -i '1s/^/set(ENV{CMAKE_POLICY_VERSION_MINIMUM} 3.5)\n/' "${VCPKG_INSTALLATION_ROOT}/scripts/ports.cmake" - - name: vcpkg tools cache - uses: actions/cache@v5 + - name: Set VCPKG_ROOT + run: | + echo "VCPKG_ROOT=${VCPKG_INSTALLATION_ROOT}" >> "$GITHUB_ENV" + + - name: Restore vcpkg tools cache + id: vcpkg-tools-cache + uses: actions/cache/restore@v5 with: path: C:/vcpkg/downloads/tools - key: ${{ github.job }}-vcpkg-tools + key: ${{ github.job }}-vcpkg-tools-${{ github.run_id }} + restore-keys: ${{ github.job }}-vcpkg-tools- - name: Restore vcpkg binary cache uses: actions/cache/restore@v4 @@ -272,7 +285,7 @@ jobs: - name: Generate build system run: | - cmake -B build -Werror=dev --preset vs2022 -DCMAKE_TOOLCHAIN_FILE="${VCPKG_INSTALLATION_ROOT}/scripts/buildsystems/vcpkg.cmake" ${{ matrix.generate-options }} + py -3 .github/ci-windows.py ${{ matrix.job-type }} generate - name: Save vcpkg binary cache uses: actions/cache/save@v4 @@ -281,76 +294,36 @@ jobs: path: ~/AppData/Local/vcpkg/archives key: ${{ github.job }}-vcpkg-binary-${{ hashFiles('cmake_version', 'msbuild_version', 'toolset_version', 'vcpkg.json') }} + - name: Save vcpkg tools cache + uses: actions/cache/save@v5 + if: github.event_name != 'pull_request' && github.ref_name == github.event.repository.default_branch && steps.vcpkg-tools-cache.outputs.cache-hit != 'true' + with: + path: C:/vcpkg/downloads/tools + key: ${{ github.job }}-vcpkg-tools-${{ github.run_id }} + - name: Build - working-directory: build run: | - cmake --build . -j $NUMBER_OF_PROCESSORS --config Release + py -3 .github/ci-windows.py ${{ matrix.job-type }} build - name: Check executable manifests - if: matrix.job-type == 'standard' - working-directory: build - shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" run: | - mt.exe -nologo -inputresource:bin\Release\bitcoind.exe -out:bitcoind.manifest - Get-Content bitcoind.manifest - - Get-ChildItem -Filter "bin\Release\*.exe" | ForEach-Object { - $exeName = $_.Name - - # Skip as they currently do not have manifests - if ($exeName -eq "fuzz.exe" -or $exeName -eq "bench_bitcoin.exe" -or $exeName -eq "test_bitcoin-qt.exe" -or $exeName -eq "test_kernel.exe" -or $exeName -eq "bitcoin-chainstate.exe") { - Write-Host "Skipping $exeName (no manifest present)" - return - } - - Write-Host "Checking $exeName" - & mt.exe -nologo -inputresource:$_.FullName -validate_manifest - } + py -3 .github/ci-windows.py ${{ matrix.job-type }} check_manifests - - name: Run test suite - if: matrix.job-type == 'standard' - working-directory: build + - name: Prepare tests run: | - ctest --output-on-failure --stop-on-failure -j $NUMBER_OF_PROCESSORS -C Release + py -3 .github/ci-windows.py ${{ matrix.job-type }} prepare_tests - - name: Run functional tests - if: matrix.job-type == 'standard' - working-directory: build + - name: Run tests env: - BITCOIN_BIN: '${{ github.workspace }}\build\bin\Release\bitcoin.exe' - BITCOIND: '${{ github.workspace }}\build\bin\Release\bitcoind.exe' - BITCOINCLI: '${{ github.workspace }}\build\bin\Release\bitcoin-cli.exe' - BITCOIN_BENCH: '${{ github.workspace }}\build\bin\Release\bench_bitcoin.exe' - BITCOINTX: '${{ github.workspace }}\build\bin\Release\bitcoin-tx.exe' - BITCOINUTIL: '${{ github.workspace }}\build\bin\Release\bitcoin-util.exe' - BITCOINWALLET: '${{ github.workspace }}\build\bin\Release\bitcoin-wallet.exe' - BITCOINCHAINSTATE: '${{ github.workspace }}\build\bin\Release\bitcoin-chainstate.exe' - TEST_RUNNER_EXTRA: ${{ github.event_name != 'pull_request' && '--extended' || '' }} - run: | - py -3 -m pip install pyzmq - py -3 test/functional/test_runner.py --jobs $NUMBER_OF_PROCESSORS --quiet --tmpdirprefix="${RUNNER_TEMP}" --combinedlogslen=99999999 --timeout-factor=${TEST_RUNNER_TIMEOUT_FACTOR} ${TEST_RUNNER_EXTRA} - - - name: Clone corpora - if: matrix.job-type == 'fuzz' + TEST_RUNNER_EXTRA: "--timeout-factor=${{ env.TEST_RUNNER_TIMEOUT_FACTOR }} ${{ case(github.event_name == 'pull_request', '', '--extended') }}" run: | - git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${RUNNER_TEMP}/qa-assets" - cd "${RUNNER_TEMP}/qa-assets" - echo "Using qa-assets repo from commit ..." - git log -1 - - - name: Run fuzz tests - if: matrix.job-type == 'fuzz' - working-directory: build - env: - BITCOINFUZZ: '${{ github.workspace }}\build\bin\Release\fuzz.exe' - run: | - py -3 test/fuzz/test_runner.py --par $NUMBER_OF_PROCESSORS --loglevel DEBUG "${RUNNER_TEMP}/qa-assets/fuzz_corpora" + py -3 .github/ci-windows.py ${{ matrix.job-type }} run_tests record-frozen-commit: # Record frozen commit, so that the native tests on cross-builds can run on # the exact same commit id of the build. name: '[meta] record frozen commit' - runs-on: ubuntu-latest + runs-on: ubuntu-slim outputs: commit: ${{ steps.record-commit.outputs.commit }} steps: @@ -452,67 +425,25 @@ jobs: name: ${{ matrix.artifact-name }}-${{ github.run_id }} - name: Run bitcoind.exe - run: ./bin/bitcoind.exe -version + run: py -3 .github/ci-windows-cross.py print_version - *SET_UP_VS - name: Check executable manifests - shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" - run: | - mt.exe -nologo -inputresource:bin\bitcoind.exe -out:bitcoind.manifest - Get-Content bitcoind.manifest - - Get-ChildItem -Filter "bin\*.exe" | ForEach-Object { - $exeName = $_.Name - - # Skip as they currently do not have manifests - if ($exeName -eq "fuzz.exe" -or $exeName -eq "bench_bitcoin.exe" -or $exeName -eq "test_kernel.exe") { - Write-Host "Skipping $exeName (no manifest present)" - return - } - - Write-Host "Checking $exeName" - & mt.exe -nologo -inputresource:$_.FullName -validate_manifest - } + run: py -3 .github/ci-windows-cross.py check_manifests - name: Run unit tests - # Can't use ctest here like other jobs as we don't have a CMake build tree. - run: | - ./bin/test_bitcoin-qt.exe - ./bin/test_bitcoin.exe -l test_suite # Intentionally run sequentially here, to catch test case failures caused by dirty global state from prior test cases. - ./src/secp256k1/bin/exhaustive_tests.exe - ./src/secp256k1/bin/noverify_tests.exe - ./src/secp256k1/bin/tests.exe - ./src/univalue/object.exe - ./src/univalue/unitester.exe - - - name: Adjust paths in test/config.ini - shell: pwsh - run: | - (Get-Content "test/config.ini") -replace '(?<=^SRCDIR=).*', '${{ github.workspace }}' -replace '(?<=^BUILDDIR=).*', '${{ github.workspace }}' -replace '(?<=^RPCAUTH=).*', '${{ github.workspace }}/share/rpcauth/rpcauth.py' | Set-Content "test/config.ini" - Get-Content "test/config.ini" + run: py -3 .github/ci-windows-cross.py run_unit_tests - - name: Set previous release directory + - name: Prepare Windows test environment run: | - echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> "$GITHUB_ENV" - - - name: Get previous releases - run: ./test/get_previous_releases.py --target-dir $PREVIOUS_RELEASES_DIR + py -3 .github/ci-windows-cross.py prepare_tests - name: Run functional tests env: - TEST_RUNNER_EXTRA: ${{ github.event_name != 'pull_request' && '--extended' || '' }} + TEST_RUNNER_EXTRA: "--timeout-factor=${{ env.TEST_RUNNER_TIMEOUT_FACTOR }} ${{ case(github.event_name == 'pull_request', '', '--extended') }}" run: | - py -3 -m pip install pyzmq - py -3 test/functional/test_runner.py --jobs $NUMBER_OF_PROCESSORS --quiet --tmpdirprefix="$RUNNER_TEMP" --combinedlogslen=99999999 --timeout-factor=$TEST_RUNNER_TIMEOUT_FACTOR $TEST_RUNNER_EXTRA \ - `# feature_unsupported_utxo_db.py fails on Windows because of emojis in the test data directory.` \ - --exclude feature_unsupported_utxo_db.py \ - `# See https://github.com/bitcoin/bitcoin/issues/31409.` \ - --exclude wallet_multiwallet.py - # Run feature_unsupported_utxo_db sequentially in ASCII-only tmp dir, - # because it is excluded above due to lack of UTF-8 support in the - # ancient release. - py -3 test/functional/feature_unsupported_utxo_db.py --previous-releases --tmpdir="${RUNNER_TEMP}/test_feature_unsupported_utxo_db" + py -3 .github/ci-windows-cross.py run_functional_tests ci-matrix: name: ${{ matrix.name }} @@ -578,12 +509,6 @@ jobs: timeout-minutes: 240 file-env: './ci/test/00_setup_env_native_fuzz.sh' - - name: 'Valgrind, fuzz' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' - fallback-runner: 'ubuntu-24.04' - timeout-minutes: 240 - file-env: './ci/test/00_setup_env_native_fuzz_with_valgrind.sh' - - name: 'previous releases' cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' fallback-runner: 'ubuntu-24.04' @@ -681,4 +606,6 @@ jobs: cache-provider: ${{ needs.runners.outputs.provider }} - name: CI script - run: python .github/ci-lint-exec.py + run: | + git worktree add ../lint-worktree HEAD + ../lint-worktree/ci/lint.py diff --git a/libbitcoinkernel-sys/bitcoin/.tx/config b/libbitcoinkernel-sys/bitcoin/.tx/config index a7550d08..41051254 100644 --- a/libbitcoinkernel-sys/bitcoin/.tx/config +++ b/libbitcoinkernel-sys/bitcoin/.tx/config @@ -1,7 +1,7 @@ [main] host = https://www.transifex.com -[o:bitcoin:p:bitcoin:r:qt-translation-030x] +[o:bitcoin:p:bitcoin:r:qt-translation-031x] file_filter = src/qt/locale/bitcoin_.xlf source_file = src/qt/locale/bitcoin_en.xlf source_lang = en diff --git a/libbitcoinkernel-sys/bitcoin/CMakeLists.txt b/libbitcoinkernel-sys/bitcoin/CMakeLists.txt index 1b1df77b..f0f101b2 100644 --- a/libbitcoinkernel-sys/bitcoin/CMakeLists.txt +++ b/libbitcoinkernel-sys/bitcoin/CMakeLists.txt @@ -119,7 +119,7 @@ endif() cmake_dependent_option(BUILD_WALLET_TOOL "Build bitcoin-wallet tool." ${BUILD_TESTS} "ENABLE_WALLET" OFF) option(REDUCE_EXPORTS "Attempt to reduce exported symbols in the resulting executables." OFF) -option(WERROR "Treat compiler warnings as errors." OFF) +option(CMAKE_COMPILE_WARNING_AS_ERROR "Treat compiler warnings as errors." OFF) option(WITH_CCACHE "Attempt to use ccache for compiling." ON) option(WITH_ZMQ "Enable ZMQ notifications." OFF) @@ -127,6 +127,8 @@ if(WITH_ZMQ) find_package(ZeroMQ 4.0.0 MODULE REQUIRED) endif() +option(WITH_EMBEDDED_ASMAP "Embed default ASMap data." ON) + option(WITH_USDT "Enable tracepoints for Userspace, Statically Defined Tracing." OFF) if(WITH_USDT) find_package(USDT MODULE REQUIRED) @@ -216,6 +218,7 @@ if(BUILD_FOR_FUZZING) set(BUILD_GUI OFF) set(ENABLE_EXTERNAL_SIGNER OFF) set(WITH_ZMQ OFF) + set(WITH_EMBEDDED_ASMAP OFF) set(BUILD_TESTS OFF) set(BUILD_GUI_TESTS OFF) set(BUILD_BENCH OFF) @@ -483,14 +486,8 @@ configure_file(contrib/filter-lcov.py filter-lcov.py USE_SOURCE_PERMISSIONS COPY # Don't allow extended (non-ASCII) symbols in identifiers. This is easier for code review. try_append_cxx_flags("-fno-extended-identifiers" TARGET core_interface SKIP_LINK) -# Avoiding the `-ffile-prefix-map` compiler option because it implies -# `-fcoverage-prefix-map` on Clang or `-fprofile-prefix-map` on GCC, -# which can cause issues with coverage builds, particularly when using -# Clang in the OSS-Fuzz environment due to its use of other options -# and a third party script, or with GCC. -try_append_cxx_flags("-fdebug-prefix-map=A=B" TARGET core_interface SKIP_LINK - IF_CHECK_PASSED "-fdebug-prefix-map=${PROJECT_SOURCE_DIR}/src=." -) +# Set `-fmacro-prefix-map`, so that source file names are expanded without the +# src prefix. try_append_cxx_flags("-fmacro-prefix-map=A=B" TARGET core_interface SKIP_LINK IF_CHECK_PASSED "-fmacro-prefix-map=${PROJECT_SOURCE_DIR}/src=." ) @@ -574,19 +571,6 @@ if(REDUCE_EXPORTS) try_append_linker_flag("-Wl,-no_exported_symbols" VAR CMAKE_EXE_LINKER_FLAGS) endif() -if(WERROR) - if(MSVC) - set(werror_flag "/WX") - else() - set(werror_flag "-Werror") - endif() - try_append_cxx_flags(${werror_flag} TARGET core_interface SKIP_LINK RESULT_VAR compiler_supports_werror) - if(NOT compiler_supports_werror) - message(FATAL_ERROR "WERROR set but ${werror_flag} is not usable.") - endif() - unset(werror_flag) -endif() - # Prefer Unix-style package components over frameworks on macOS. # This improves compatibility with Python version managers. set(Python3_FIND_FRAMEWORK LAST CACHE STRING "") @@ -673,6 +657,7 @@ else() set(ipc_status OFF) endif() message(" IPC ................................. ${ipc_status}") +message(" Embedded ASMap ...................... ${WITH_EMBEDDED_ASMAP}") message(" USDT tracing ........................ ${WITH_USDT}") message(" QR code (GUI) ....................... ${WITH_QRENCODE}") message(" DBus (GUI) .......................... ${WITH_DBUS}") @@ -691,7 +676,7 @@ message("Cross compiling ....................... ${cross_status}") message("C++ compiler .......................... ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}, ${CMAKE_CXX_COMPILER}") include(FlagsSummary) flags_summary() -message("Treat compiler warnings as errors ..... ${WERROR}") +message("Treat compiler warnings as errors ..... ${CMAKE_COMPILE_WARNING_AS_ERROR}") message("Use ccache for compiling .............. ${WITH_CCACHE}") message("\n") if(configure_warnings) diff --git a/libbitcoinkernel-sys/bitcoin/CMakePresets.json b/libbitcoinkernel-sys/bitcoin/CMakePresets.json index ae9d06da..a7103e29 100644 --- a/libbitcoinkernel-sys/bitcoin/CMakePresets.json +++ b/libbitcoinkernel-sys/bitcoin/CMakePresets.json @@ -2,14 +2,14 @@ "version": 3, "configurePresets": [ { - "name": "vs2022", - "displayName": "Build using 'Visual Studio 17 2022' generator and 'x64-windows' triplet", + "name": "vs2026", + "displayName": "Build using 'Visual Studio 18 2026' generator and 'x64-windows' triplet", "condition": { "type": "equals", "lhs": "${hostSystemName}", "rhs": "Windows" }, - "generator": "Visual Studio 17 2022", + "generator": "Visual Studio 18 2026", "architecture": "x64", "toolchainFile": "$env{VCPKG_ROOT}\\scripts\\buildsystems\\vcpkg.cmake", "cacheVariables": { @@ -19,14 +19,14 @@ } }, { - "name": "vs2022-static", - "displayName": "Build using 'Visual Studio 17 2022' generator and 'x64-windows-static' triplet", + "name": "vs2026-static", + "displayName": "Build using 'Visual Studio 18 2026' generator and 'x64-windows-static' triplet", "condition": { "type": "equals", "lhs": "${hostSystemName}", "rhs": "Windows" }, - "generator": "Visual Studio 17 2022", + "generator": "Visual Studio 18 2026", "architecture": "x64", "toolchainFile": "$env{VCPKG_ROOT}\\scripts\\buildsystems\\vcpkg.cmake", "cacheVariables": { diff --git a/libbitcoinkernel-sys/bitcoin/CONTRIBUTING.md b/libbitcoinkernel-sys/bitcoin/CONTRIBUTING.md index 7f42f0be..83a0919a 100644 --- a/libbitcoinkernel-sys/bitcoin/CONTRIBUTING.md +++ b/libbitcoinkernel-sys/bitcoin/CONTRIBUTING.md @@ -78,6 +78,13 @@ The codebase is maintained using the "contributor workflow" where everyone without exception contributes patch proposals using "pull requests" (PRs). This facilitates social contribution, easy testing and peer review. +Pull request authors must fully and confidently understand their own changes +and must have tested them. Contributors should mention which tests cover their +changes, or include the manual steps they used to confirm the change. +Contributors are expected to be prepared to clearly motivate and explain their +changes. If there is doubt, the pull request may be closed. +Please refer to the [peer review](#peer-review) section below for more details. + To contribute a patch, the workflow is as follows: 1. Fork repository ([only for the first time](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo)) @@ -338,6 +345,11 @@ reviewers that the changes warrant the review effort, and if reviewers are "Concept NACK'ing" the PR, the author may need to present arguments and/or do research backing their suggested changes. +Moreover, if there is reasonable doubt that the pull request author does not +fully understand the changes they are submitting themselves, or if it becomes +clear that they have not tested the changes on a basic level themselves, the +pull request may be closed immediately. + #### Conceptual Review A review can be a conceptual review, where the reviewer leaves a comment diff --git a/libbitcoinkernel-sys/bitcoin/ci/lint.py b/libbitcoinkernel-sys/bitcoin/ci/lint.py new file mode 100755 index 00000000..6c7e0e8a --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/ci/lint.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +import os +import shlex +import subprocess +import sys +import time +from pathlib import Path + + +def run(cmd, **kwargs): + print("+ " + shlex.join(cmd), flush=True) + kwargs.setdefault("check", True) + try: + return subprocess.run(cmd, **kwargs) + except Exception as e: + sys.exit(str(e)) + + +def get_worktree_mounts(repo_root): + git_path = repo_root / ".git" + if not git_path.is_file(): + return [] + content = git_path.read_text().strip() + if not content.startswith("gitdir: "): + return [] + gitdir = (repo_root / content.removeprefix("gitdir: ")).resolve() + main_gitdir = gitdir.parent.parent + return [ + f"--volume={gitdir}:{gitdir}", + f"--volume={main_gitdir}:{main_gitdir}", + ] + + +def main(): + repo_root = Path(__file__).resolve().parent.parent + is_ci = os.environ.get("GITHUB_ACTIONS") == "true" + container = "bitcoin-linter" + + build_cmd = [ + "docker", + "buildx", + "build", + "--platform=linux", + f"--tag={container}", + *shlex.split(os.environ.get("DOCKER_BUILD_CACHE_ARG", "")), + f"--file={repo_root}/ci/lint_imagefile", + str(repo_root), + ] + if run(build_cmd, check=False).returncode != 0: + if is_ci: + print("Retry building image after failure") + time.sleep(3) + run(build_cmd) + + extra_env = [] + if is_ci: + if os.environ.get("GITHUB_EVENT_NAME") == "pull_request": + extra_env = ["--env", "LINT_CI_IS_PR=1"] + elif os.environ.get("GITHUB_REPOSITORY") == "bitcoin/bitcoin": + extra_env = ["--env", "LINT_CI_SANITY_CHECK_COMMIT_SIG=1"] + + run( + [ + "docker", + "run", + "--rm", + *extra_env, + f"--volume={repo_root}:/bitcoin", + *get_worktree_mounts(repo_root), + *([] if is_ci else ["-it"]), + container, + "./ci/lint/06_script.sh", + *sys.argv[1:], + ] + ) + + +if __name__ == "__main__": + main() diff --git a/libbitcoinkernel-sys/bitcoin/ci/lint/01_install.sh b/libbitcoinkernel-sys/bitcoin/ci/lint/01_install.sh index 0cf9fbb4..44563d42 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/lint/01_install.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/lint/01_install.sh @@ -8,7 +8,8 @@ export LC_ALL=C set -o errexit -o pipefail -o xtrace -export CI_RETRY_EXE="/ci_retry --" +export DEBIAN_FRONTEND=noninteractive +export CI_RETRY_EXE="/ci_retry" pushd "/" diff --git a/libbitcoinkernel-sys/bitcoin/ci/lint/06_script.sh b/libbitcoinkernel-sys/bitcoin/ci/lint/06_script.sh index 82e499c2..1b36fada 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/lint/06_script.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/lint/06_script.sh @@ -8,6 +8,12 @@ export LC_ALL=C set -o errexit -o pipefail -o xtrace +# Fixes permission issues when there is a container UID/GID mismatch with the owner +# of the mounted bitcoin src dir. +git config --global --add safe.directory /bitcoin + +export PATH="/python_build/bin:${PATH}" + if [ -n "${LINT_CI_IS_PR}" ]; then export COMMIT_RANGE="HEAD~..HEAD" if [ "$(git rev-list -1 HEAD)" != "$(git rev-list -1 --merges HEAD)" ]; then @@ -16,7 +22,7 @@ if [ -n "${LINT_CI_IS_PR}" ]; then fi fi -RUST_BACKTRACE=1 cargo run --manifest-path "./test/lint/test_runner/Cargo.toml" +RUST_BACKTRACE=1 cargo run --manifest-path "./test/lint/test_runner/Cargo.toml" -- "$@" if [ "${LINT_CI_SANITY_CHECK_COMMIT_SIG}" = "1" ] ; then # Sanity check only the last few commits to get notified of missing sigs, diff --git a/libbitcoinkernel-sys/bitcoin/ci/lint/container-entrypoint.sh b/libbitcoinkernel-sys/bitcoin/ci/lint/container-entrypoint.sh deleted file mode 100755 index 84e60be2..00000000 --- a/libbitcoinkernel-sys/bitcoin/ci/lint/container-entrypoint.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or https://opensource.org/license/mit/. - -export LC_ALL=C - -# Fixes permission issues when there is a container UID/GID mismatch with the owner -# of the mounted bitcoin src dir. -git config --global --add safe.directory /bitcoin - -export PATH="/python_build/bin:${PATH}" - -if [ -z "$1" ]; then - bash -ic "./ci/lint/06_script.sh" -else - exec "$@" -fi diff --git a/libbitcoinkernel-sys/bitcoin/ci/lint_imagefile b/libbitcoinkernel-sys/bitcoin/ci/lint_imagefile index b3238076..77e9688c 100644 --- a/libbitcoinkernel-sys/bitcoin/ci/lint_imagefile +++ b/libbitcoinkernel-sys/bitcoin/ci/lint_imagefile @@ -6,19 +6,11 @@ FROM mirror.gcr.io/ubuntu:24.04 -ENV DEBIAN_FRONTEND=noninteractive -ENV LC_ALL=C.UTF-8 - COPY ./ci/retry/retry /ci_retry COPY ./.python-version /.python-version -COPY ./ci/lint/container-entrypoint.sh /entrypoint.sh COPY ./ci/lint/01_install.sh /install.sh RUN /install.sh && \ - echo 'alias lint="./ci/lint/06_script.sh"' >> ~/.bashrc && \ - chmod 755 /entrypoint.sh && \ rm -rf /var/lib/apt/lists/* - WORKDIR /bitcoin -ENTRYPOINT ["/entrypoint.sh"] diff --git a/libbitcoinkernel-sys/bitcoin/ci/retry/retry b/libbitcoinkernel-sys/bitcoin/ci/retry/retry index 3c06519d..37021f00 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/retry/retry +++ b/libbitcoinkernel-sys/bitcoin/ci/retry/retry @@ -1,8 +1,5 @@ #!/usr/bin/env bash -GETOPT_BIN=$IN_GETOPT_BIN -GETOPT_BIN=${GETOPT_BIN:-getopt} - __sleep_amount() { if [ -n "$constant_sleep" ]; then sleep_time=$constant_sleep @@ -71,93 +68,10 @@ retry() exit $return_code } -# If we're being sourced, don't worry about such things -if [ "$BASH_SOURCE" == "$0" ]; then - # Prints the help text - help() - { - local retry=$(basename $0) - cat < /dev/null - if [[ $? -ne 4 ]]; then - echo "I’m sorry, 'getopt --test' failed in this environment. Please load GNU getopt." - exit 1 - fi - - OPTIONS=vt:s:m:x:f: - LONGOPTIONS=verbose,tries:,sleep:,min:,max:,fail: - - PARSED=$($GETOPT_BIN --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@") - if [[ $? -ne 0 ]]; then - # e.g. $? == 1 - # then getopt has complained about wrong arguments to stdout - exit 2 - fi - # read getopt’s output this way to handle the quoting right: - eval set -- "$PARSED" - max_tries=10 min_sleep=0.3 max_sleep=60.0 constant_sleep= fail_script= - # now enjoy the options in order and nicely split until we see -- - while true; do - case "$1" in - -v|--verbose) - VERBOSE=true - shift - ;; - -t|--tries) - max_tries="$2" - shift 2 - ;; - -s|--sleep) - constant_sleep="$2" - shift 2 - ;; - -m|--min) - min_sleep="$2" - shift 2 - ;; - -x|--max) - max_sleep="$2" - shift 2 - ;; - -f|--fail) - fail_script="$2" - shift 2 - ;; - --) - shift - break - ;; - *) - echo "Programming error" - exit 3 - ;; - esac - done - retry "$max_tries" "$min_sleep" "$max_sleep" "$constant_sleep" "$fail_script" "$@" - -fi diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env.sh index 890bccde..8d9a16ab 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env.sh @@ -48,7 +48,7 @@ export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1} # See man 7 debconf export DEBIAN_FRONTEND=noninteractive -export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-500M} +export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-2G} export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp} export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1} # The cache dir. @@ -62,7 +62,7 @@ export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/prev_releas export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential pkgconf curl ca-certificates ccache python3-dev rsync git procps bison e2fsprogs cmake ninja-build} export GOAL=${GOAL:-install} export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} -export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} +export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry"} # The --platform argument used with `docker build` and `docker run`. export CI_IMAGE_PLATFORM=${CI_IMAGE_PLATFORM:-"linux"} # Force linux, but use native arch by default diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross.sh index 5b33c127..63d89fb9 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross.sh @@ -12,8 +12,8 @@ export CONTAINER_NAME=ci_macos_cross export CI_IMAGE_NAME_TAG="mirror.gcr.io/debian:trixie" # Check that https://packages.debian.org/trixie/clang (version 19, similar to guix) can cross-compile export HOST=arm64-apple-darwin export PACKAGES="clang lld llvm zip" -export XCODE_VERSION=15.0 -export XCODE_BUILD_ID=15A240d +export XCODE_VERSION=26.1.1 +export XCODE_BUILD_ID=17B100 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross_intel.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross_intel.sh index 28eb7a09..4b07e14b 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross_intel.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_mac_cross_intel.sh @@ -12,8 +12,8 @@ export CONTAINER_NAME=ci_macos_cross_intel export CI_IMAGE_NAME_TAG="mirror.gcr.io/debian:trixie" # Check that https://packages.debian.org/trixie/clang (version 19, similar to guix) can cross-compile export HOST=x86_64-apple-darwin export PACKAGES="clang lld llvm zip" -export XCODE_VERSION=15.0 -export XCODE_BUILD_ID=15A240d +export XCODE_VERSION=26.1.1 +export XCODE_BUILD_ID=17B100 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_alpine_musl.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_alpine_musl.sh index ce7614ab..e42cb4a3 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_alpine_musl.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_alpine_musl.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_alpine_musl -export CI_IMAGE_NAME_TAG="mirror.gcr.io/alpine:3.22" +export CI_IMAGE_NAME_TAG="mirror.gcr.io/alpine:3.23" export CI_BASE_PACKAGES="build-base musl-dev pkgconf curl ccache make ninja git python3-dev py3-pip which patch xz procps rsync util-linux bison e2fsprogs cmake dash linux-headers" export PIP_PACKAGES="--break-system-packages pyzmq pycapnp" export DEP_OPTS="DEBUG=1" diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_chimera_lto.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_chimera_lto.sh new file mode 100755 index 00000000..fb5a343d --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_chimera_lto.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit. + +# This config is experimental, and may not be reproducible, given +# the use of a rolling distro. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_chimera_musl +export CI_IMAGE_NAME_TAG="mirror.gcr.io/chimeralinux/chimera" +export CI_BASE_PACKAGES="ccache chimerautils chimerautils-extra clang cmake curl e2fsprogs git gmake gtar linux-headers ninja pkgconf procps python-devel python-pip rsync util-linux util-linux-lscpu xz" +export PIP_PACKAGES="--break-system-packages pyzmq pycapnp" +export DEP_OPTS="build_CC=clang build_CXX=clang++ build_TAR=gtar AR=llvm-ar CC=clang CXX=clang++ NM=llvm-nm RANLIB=llvm-ranlib STRIP=llvm-strip NO_QT=1" +export GOAL="install" +export BITCOIN_CONFIG="\ + --preset=dev-mode \ + -DBUILD_GUI=OFF \ + -DREDUCE_EXPORTS=ON \ + -DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON \ +" diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_nowallet.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_nowallet.sh index 28446a70..0b21ab22 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_nowallet.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_nowallet.sh @@ -17,4 +17,5 @@ export BITCOIN_CONFIG="\ --preset=dev-mode \ -DREDUCE_EXPORTS=ON \ -DENABLE_WALLET=OFF \ + -DWITH_EMBEDDED_ASMAP=OFF \ " diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_previous_releases.sh b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_previous_releases.sh index a44a51c3..d6af52c4 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_previous_releases.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/00_setup_env_native_previous_releases.sh @@ -15,13 +15,14 @@ export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude fe export GOAL="install" export CI_LIMIT_STACK_SIZE=1 export DOWNLOAD_PREVIOUS_RELEASES="true" +# Use -Werror as the CMake version does not support CMAKE_COMPILE_WARNING_AS_ERROR export BITCOIN_CONFIG="\ --preset=dev-mode \ -DREDUCE_EXPORTS=ON \ -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_C_FLAGS='-funsigned-char' \ + -DCMAKE_C_FLAGS='-funsigned-char -Werror' \ -DCMAKE_C_FLAGS_DEBUG='-g2 -O2' \ - -DCMAKE_CXX_FLAGS='-funsigned-char' \ + -DCMAKE_CXX_FLAGS='-funsigned-char -Werror' \ -DCMAKE_CXX_FLAGS_DEBUG='-g2 -O2' \ -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' \ " diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/01_base_install.sh b/libbitcoinkernel-sys/bitcoin/ci/test/01_base_install.sh index 0ac56208..ea1ce07b 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/01_base_install.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/01_base_install.sh @@ -32,7 +32,7 @@ if [ -n "${APT_LLVM_V}" ]; then ) fi -if [[ $CI_IMAGE_NAME_TAG == *alpine* ]]; then +if command -v apk >/dev/null 2>&1; then ${CI_RETRY_EXE} apk update # shellcheck disable=SC2086 ${CI_RETRY_EXE} apk add --no-cache $CI_BASE_PACKAGES $PACKAGES diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/01_iwyu.patch b/libbitcoinkernel-sys/bitcoin/ci/test/01_iwyu.patch index e7d75f4e..209e133f 100644 --- a/libbitcoinkernel-sys/bitcoin/ci/test/01_iwyu.patch +++ b/libbitcoinkernel-sys/bitcoin/ci/test/01_iwyu.patch @@ -535,7 +535,7 @@ See: https://github.com/include-what-you-use/include-what-you-use/blob/clang_21/ { "", kPrivate, "", kPublic }, // Exports guaranteed by the C standard - { "", kPublic, "", kPublic }, -+ { "", kPrivate, "", kPrivate }, ++ { "", kPublic, "", kPublic }, }; const IncludeMapEntry stdlib_c_include_map[] = { diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/02_run_container.py b/libbitcoinkernel-sys/bitcoin/ci/test/02_run_container.py index 64077c4b..dce3730a 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/02_run_container.py +++ b/libbitcoinkernel-sys/bitcoin/ci/test/02_run_container.py @@ -17,7 +17,7 @@ def run(cmd, **kwargs): try: return subprocess.run(cmd, **kwargs) except Exception as e: - sys.exit(e) + sys.exit(str(e)) def main(): @@ -58,14 +58,6 @@ def main(): # Modify PATH to prepend the retry script, needed for CI_RETRY_EXE os.environ["PATH"] = f"{os.environ['BASE_ROOT_DIR']}/ci/retry:{os.environ['PATH']}" - # GNU getopt is required for the CI_RETRY_EXE script - if os.getenv("CI_OS_NAME") == "macos": - prefix = run( - ["brew", "--prefix", "gnu-getopt"], - stdout=subprocess.PIPE, - text=True, - ).stdout.strip() - os.environ["IN_GETOPT_BIN"] = f"{prefix}/bin/getopt" else: CI_IMAGE_LABEL = "bitcoin-ci-test" diff --git a/libbitcoinkernel-sys/bitcoin/ci/test/03_test_script.sh b/libbitcoinkernel-sys/bitcoin/ci/test/03_test_script.sh index 050d9e6a..d81bd0b9 100755 --- a/libbitcoinkernel-sys/bitcoin/ci/test/03_test_script.sh +++ b/libbitcoinkernel-sys/bitcoin/ci/test/03_test_script.sh @@ -44,32 +44,6 @@ echo "=== BEGIN env ===" env echo "=== END env ===" -# Don't apply patches in the iwyu job, because it relies on the `git diff` -# command to detect IWYU errors. It is safe to skip this patch in the iwyu job -# because it doesn't run a UB detector. -if [[ "${RUN_IWYU}" != true ]]; then - # compact->outputs[i].file_size is uninitialized memory, so reading it is UB. - # The statistic bytes_written is only used for logging, which is disabled in - # CI, so as a temporary minimal fix to work around UB and CI failures, leave - # bytes_written unmodified. - # See https://github.com/bitcoin/bitcoin/pull/28359#issuecomment-1698694748 - # Tee patch to stdout to make it clear CI is testing modified code. - tee >(patch -p1) <<'EOF' ---- a/src/leveldb/db/db_impl.cc -+++ b/src/leveldb/db/db_impl.cc -@@ -1028,9 +1028,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { - stats.bytes_read += compact->compaction->input(which, i)->file_size; - } - } -- for (size_t i = 0; i < compact->outputs.size(); i++) { -- stats.bytes_written += compact->outputs[i].file_size; -- } - - mutex_.Lock(); - stats_[compact->compaction->level() + 1].Add(stats); -EOF -fi - if [ "$RUN_FUZZ_TESTS" = "true" ]; then export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_corpora/ if [ ! -d "$DIR_FUZZ_IN" ]; then @@ -107,16 +81,12 @@ if [ "$DOWNLOAD_PREVIOUS_RELEASES" = "true" ]; then test/get_previous_releases.py --target-dir "$PREVIOUS_RELEASES_DIR" fi -BITCOIN_CONFIG_ALL="-DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON" +BITCOIN_CONFIG_ALL="-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON" if [ -z "$NO_DEPENDS" ]; then BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} -DCMAKE_TOOLCHAIN_FILE=$DEPENDS_DIR/$HOST/toolchain.cmake" fi -if [ -z "$NO_WERROR" ]; then - BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} -DWERROR=ON" -fi ccache --zero-stats -PRINT_CCACHE_STATISTICS="ccache --version | head -n 1 && ccache --show-stats" # Folder where the build is done. BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build-$HOST} @@ -147,12 +117,10 @@ cmake --build "${BASE_BUILD_DIR}" "$MAKEJOBS" --target $GOAL || ( false ) -bash -c "${PRINT_CCACHE_STATISTICS}" -if [ "$CI" = "true" ]; then - hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/') - if [ "${hit_rate%.*}" -lt 75 ]; then - echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%" - fi +ccache --version | head -n 1 && ccache --show-stats --verbose +hit_rate=$(ccache --show-stats | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/') +if [ "${hit_rate%.*}" -lt 75 ]; then + echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%" fi du -sh "${DEPENDS_DIR}"/*/ du -sh "${PREVIOUS_RELEASES_DIR}" @@ -215,7 +183,7 @@ fi if [[ "${RUN_IWYU}" == true ]]; then # TODO: Consider enforcing IWYU across the entire codebase. - FILES_WITH_ENFORCED_IWYU="/src/((crypto|index|kernel)/.*\\.cpp|node/blockstorage.cpp|node/utxo_snapshot.cpp|core_io.cpp|signet.cpp)" + FILES_WITH_ENFORCED_IWYU="/src/((crypto|index|kernel|primitives|univalue/(lib|test)|zmq)/.*\\.cpp|node/blockstorage\\.cpp|node/utxo_snapshot\\.cpp|core_io\\.cpp|signet\\.cpp)" jq --arg patterns "$FILES_WITH_ENFORCED_IWYU" 'map(select(.file | test($patterns)))' "${BASE_BUILD_DIR}/compile_commands.json" > "${BASE_BUILD_DIR}/compile_commands_iwyu_errors.json" jq --arg patterns "$FILES_WITH_ENFORCED_IWYU" 'map(select(.file | test($patterns) | not))' "${BASE_BUILD_DIR}/compile_commands.json" > "${BASE_BUILD_DIR}/compile_commands_iwyu_warnings.json" @@ -227,9 +195,10 @@ if [[ "${RUN_IWYU}" == true ]]; then -p "${BASE_BUILD_DIR}" "${MAKEJOBS}" \ -- -Xiwyu --cxx17ns -Xiwyu --mapping_file="${BASE_ROOT_DIR}/contrib/devtools/iwyu/bitcoin.core.imp" \ -Xiwyu --max_line_length=160 \ + -Xiwyu --check_also="*/primitives/*.h" \ 2>&1 | tee /tmp/iwyu_ci.out python3 "/include-what-you-use/fix_includes.py" --nosafe_headers < /tmp/iwyu_ci.out - git diff -U0 | ./contrib/devtools/clang-format-diff.py -binary="clang-format-${TIDY_LLVM_V}" -p1 -i -v + git diff -U1 | ./contrib/devtools/clang-format-diff.py -binary="clang-format-${TIDY_LLVM_V}" -p1 -i -v } run_iwyu "compile_commands_iwyu_errors.json" diff --git a/libbitcoinkernel-sys/bitcoin/cmake/module/AddBoostIfNeeded.cmake b/libbitcoinkernel-sys/bitcoin/cmake/module/AddBoostIfNeeded.cmake index b3f24800..80a6d2e8 100644 --- a/libbitcoinkernel-sys/bitcoin/cmake/module/AddBoostIfNeeded.cmake +++ b/libbitcoinkernel-sys/bitcoin/cmake/module/AddBoostIfNeeded.cmake @@ -32,12 +32,14 @@ function(add_boost_if_needed) find_package(Boost 1.74.0 REQUIRED CONFIG) mark_as_advanced(Boost_INCLUDE_DIR boost_headers_DIR) # Workaround for a bug in NetBSD pkgsrc. - # See: https://github.com/NetBSD/pkgsrc/issues/167. + # See https://gnats.netbsd.org/59856. if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") get_filename_component(_boost_include_dir "${boost_headers_DIR}/../../../include/" ABSOLUTE) - set_target_properties(Boost::headers PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir} - ) + if(_boost_include_dir MATCHES "^/usr/pkg/") + set_target_properties(Boost::headers PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir} + ) + endif() unset(_boost_include_dir) endif() set_target_properties(Boost::headers PROPERTIES IMPORTED_GLOBAL TRUE) diff --git a/libbitcoinkernel-sys/bitcoin/cmake/script/GenerateHeaderFromRaw.cmake b/libbitcoinkernel-sys/bitcoin/cmake/script/GenerateHeaderFromRaw.cmake index d373d1c4..2c40e419 100644 --- a/libbitcoinkernel-sys/bitcoin/cmake/script/GenerateHeaderFromRaw.cmake +++ b/libbitcoinkernel-sys/bitcoin/cmake/script/GenerateHeaderFromRaw.cmake @@ -18,6 +18,5 @@ ${formatted_bytes} }; inline constexpr std::span ${raw_source_basename}{detail_${raw_source_basename}_raw}; -} -") +}") file(WRITE ${HEADER_PATH} "${header_content}") diff --git a/libbitcoinkernel-sys/bitcoin/cmake/secp256k1.cmake b/libbitcoinkernel-sys/bitcoin/cmake/secp256k1.cmake index 5302f516..c82f361a 100644 --- a/libbitcoinkernel-sys/bitcoin/cmake/secp256k1.cmake +++ b/libbitcoinkernel-sys/bitcoin/cmake/secp256k1.cmake @@ -9,6 +9,11 @@ function(add_secp256k1 subdir) message("Configuring secp256k1 subtree...") set(BUILD_SHARED_LIBS OFF) set(CMAKE_EXPORT_COMPILE_COMMANDS OFF) + + # Unconditionally prevent secp's symbols from being exported by our libs + set(CMAKE_C_VISIBILITY_PRESET hidden) + set(SECP256K1_ENABLE_API_VISIBILITY_ATTRIBUTES OFF CACHE BOOL "" FORCE) + set(SECP256K1_ENABLE_MODULE_ECDH OFF CACHE BOOL "" FORCE) set(SECP256K1_ENABLE_MODULE_RECOVERY ON CACHE BOOL "" FORCE) set(SECP256K1_ENABLE_MODULE_MUSIG ON CACHE BOOL "" FORCE) diff --git a/libbitcoinkernel-sys/bitcoin/contrib/README.md b/libbitcoinkernel-sys/bitcoin/contrib/README.md index f23d7ac5..037ea2f0 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/README.md +++ b/libbitcoinkernel-sys/bitcoin/contrib/README.md @@ -18,6 +18,9 @@ A Linux bash script that will set up traffic control (tc) to limit the outgoing ### [Seeds](/contrib/seeds) ### Utility to generate the pnSeed[] array that is compiled into the client. +### [ASMap](/contrib/asmap) ### +Utilities to analyze and process asmap files. + Build Tools and Keys --------------------- diff --git a/libbitcoinkernel-sys/bitcoin/contrib/asmap/asmap.py b/libbitcoinkernel-sys/bitcoin/contrib/asmap/asmap.py index 2ae84a3f..292048b6 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/asmap/asmap.py +++ b/libbitcoinkernel-sys/bitcoin/contrib/asmap/asmap.py @@ -157,7 +157,7 @@ class _Instruction(Enum): JUMP = 1 # A match instruction, encoded as [1,1,0] inspects 1 or more of the next unused bits # in the input with its argument. If they all match, execution continues. If they do - # not, failure is returned. If a default instruction has been executed before, instead + # not, failure (represented by 0) is returned. If a default instruction has been executed before, instead # of failure the default instruction's argument is returned. It is followed by an # integer in match encoding, and a subprogram. That value is at least 2 bits and at # most 9 bits. An n-bit value signifies matching (n-1) bits in the input with the lower diff --git a/libbitcoinkernel-sys/bitcoin/contrib/devtools/iwyu/bitcoin.core.imp b/libbitcoinkernel-sys/bitcoin/contrib/devtools/iwyu/bitcoin.core.imp index 9067bc32..960cb05d 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/devtools/iwyu/bitcoin.core.imp +++ b/libbitcoinkernel-sys/bitcoin/contrib/devtools/iwyu/bitcoin.core.imp @@ -6,15 +6,13 @@ { "include": [ "", "private", "", "public" ] }, # libc symbols. + # See: https://github.com/include-what-you-use/include-what-you-use/issues/1809. { "symbol": ["AT_HWCAP", "private", "", "public"] }, { "symbol": ["AT_HWCAP2", "private", "", "public"] }, - # Fixed in https://github.com/include-what-you-use/include-what-you-use/pull/1706. - { "symbol": ["SEEK_CUR", "private", "", "public"] }, - { "symbol": ["SEEK_END", "private", "", "public"] }, - { "symbol": ["SEEK_SET", "private", "", "public"] }, - - # IWYU bug. + # Workarounds for IWYU issues. + # See: https://github.com/include-what-you-use/include-what-you-use/issues/1616. + { "symbol": ["std::pair", "private", "", "public"] }, # See: https://github.com/include-what-you-use/include-what-you-use/issues/1863. { "symbol": ["std::vector", "private", "", "public"] }, ] diff --git a/libbitcoinkernel-sys/bitcoin/contrib/guix/README.md b/libbitcoinkernel-sys/bitcoin/contrib/guix/README.md index aadc231e..088ed807 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/guix/README.md +++ b/libbitcoinkernel-sys/bitcoin/contrib/guix/README.md @@ -103,6 +103,18 @@ worktree to save disk space: ./contrib/guix/guix-clean ``` +## Gathering shasums of build outputs + +After a successful build, the shasums of the build outputs are gathered +into files named `SHA256SUMS`. These files are located in each of the +architecture-specific output directories. + +To gather all shasums and output them together to console, for e.g. +inclusion in a Guix pull request comment, run: + +``` sh +source contrib/shell/git-utils.bash && uname -m && find guix-build-$(git_head_version)/output/ -type f -print0 | env LC_ALL=C sort -z | xargs -r0 sha256sum +``` ## Attesting to build outputs @@ -249,7 +261,7 @@ details. Set the path where _extracted_ SDKs can be found. This is passed through to the depends tree. Note that this should be set to the _parent_ directory of the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of - `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`). + `$HOME/Downloads/macOS-SDKs/Xcode-26.1.1-17B100-extracted-SDK-with-libcxx-headers`). The path that this environment variable points to **must be a directory**, and **NOT a symlink to a directory**. diff --git a/libbitcoinkernel-sys/bitcoin/contrib/guix/guix-codesign b/libbitcoinkernel-sys/bitcoin/contrib/guix/guix-codesign index ec8fbc0c..791b75c5 100755 --- a/libbitcoinkernel-sys/bitcoin/contrib/guix/guix-codesign +++ b/libbitcoinkernel-sys/bitcoin/contrib/guix/guix-codesign @@ -289,7 +289,7 @@ INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}: EOF - # Run the build script 'contrib/guix/libexec/build.sh' in the build + # Run the build script 'contrib/guix/libexec/codesign.sh' in the build # container specified by 'contrib/guix/manifest.scm'. # # Explanation of `guix shell` flags: diff --git a/libbitcoinkernel-sys/bitcoin/contrib/guix/libexec/build.sh b/libbitcoinkernel-sys/bitcoin/contrib/guix/libexec/build.sh index f4db25f4..78976711 100755 --- a/libbitcoinkernel-sys/bitcoin/contrib/guix/libexec/build.sh +++ b/libbitcoinkernel-sys/bitcoin/contrib/guix/libexec/build.sh @@ -4,6 +4,9 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C set -e -o pipefail + +# Environment variables for determinism +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" export TZ=UTC # Although Guix _does_ set umask when building its own packages (in our case, @@ -157,10 +160,6 @@ case "$HOST" in ;; esac -# Environment variables for determinism -export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" -export TZ="UTC" - #################### # Depends Building # #################### @@ -211,6 +210,7 @@ CONFIGFLAGS="-DREDUCE_EXPORTS=ON -DBUILD_BENCH=OFF -DBUILD_GUI_TESTS=OFF -DBUILD # CFLAGS HOST_CFLAGS="-O2 -g" HOST_CFLAGS+=$(find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) +HOST_CFLAGS+=" -fdebug-prefix-map=${DISTSRC}/src=." case "$HOST" in *mingw*) HOST_CFLAGS+=" -fno-ident" ;; *darwin*) unset HOST_CFLAGS ;; diff --git a/libbitcoinkernel-sys/bitcoin/contrib/macdeploy/README.md b/libbitcoinkernel-sys/bitcoin/contrib/macdeploy/README.md index 1763c6cb..4633246d 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/macdeploy/README.md +++ b/libbitcoinkernel-sys/bitcoin/contrib/macdeploy/README.md @@ -15,14 +15,14 @@ When complete, it will have produced `Bitcoin-Core.zip`. A free Apple Developer Account is required to proceed. Our macOS SDK can be extracted from -[Xcode_15.xip](https://download.developer.apple.com/Developer_Tools/Xcode_15/Xcode_15.xip). +[Xcode_26.1.1_Apple_silicon.xip](https://download.developer.apple.com/Developer_Tools/Xcode_26.1.1/Xcode_26.1.1_Apple_silicon.xip). Alternatively, after logging in to your account go to 'Downloads', then 'More' -and search for [`Xcode 15`](https://developer.apple.com/download/all/?q=Xcode%2015). +and search for [`Xcode 26.1.1`](https://developer.apple.com/download/all/?q=Xcode%2026.1.1). An Apple ID and cookies enabled for the hostname are needed to download this. -The `sha256sum` of the downloaded XIP archive should be `4daaed2ef2253c9661779fa40bfff50655dc7ec45801aba5a39653e7bcdde48e`. +The `sha256sum` of the downloaded XIP archive should be `f4c65b01e2807372b61553c71036dbfef492d7c79d4c380a5afb61aa1018e555`. To extract the `.xip` on Linux: @@ -33,13 +33,13 @@ git clone https://github.com/bitcoin-core/apple-sdk-tools.git # Unpack the .xip and place the resulting Xcode.app in your current # working directory -python3 apple-sdk-tools/extract_xcode.py -f Xcode_15.xip | cpio -d -i +python3 apple-sdk-tools/extract_xcode.py -f Xcode_26.1.1_Apple_silicon.xip | cpio -d -i ``` On macOS: ```bash -xip -x Xcode_15.xip +xip -x Xcode_26.1.1_Apple_silicon.xip ``` ### Step 2: Generating the SDK tarball from `Xcode.app` @@ -51,8 +51,8 @@ path to `Xcode.app` (extracted in the previous stage) as the first argument. ./contrib/macdeploy/gen-sdk.py '/path/to/Xcode.app' ``` -The generated archive should be: `Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar`. -The `sha256sum` should be `95b00dc41fa090747dc0a7907a5031a2fcb2d7f95c9584ba6bccdb99b6e3d498`. +The generated archive should be: `Xcode-26.1.1-17B100-extracted-SDK-with-libcxx-headers.tar`. +The `sha256sum` should be `9600fa93644df674ee916b5e2c8a6ba8dacf631996a65dc922d003b98b5ea3b1`. ## Deterministic macOS App Notes diff --git a/libbitcoinkernel-sys/bitcoin/contrib/tracing/README.md b/libbitcoinkernel-sys/bitcoin/contrib/tracing/README.md index 252053e7..3c85a2ad 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/tracing/README.md +++ b/libbitcoinkernel-sys/bitcoin/contrib/tracing/README.md @@ -22,7 +22,7 @@ corresponding packages. See [installing bpftrace] and [installing BCC] for more information. For development there exist a [bpftrace Reference Guide], a [BCC Reference Guide], and a [bcc Python Developer Tutorial]. -[installing bpftrace]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md +[installing bpftrace]: https://github.com/bpftrace/bpftrace/blob/master/README.md#quick-start [installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md [bpftrace Reference Guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md [BCC Reference Guide]: https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md @@ -246,10 +246,10 @@ $ python3 contrib/tracing/log_utxocache_flush.py $(pidof bitcoind) ``` Logging utxocache flushes. Ctrl-C to end... -Duration (µs) Mode Coins Count Memory Usage Prune -730451 IF_NEEDED 22990 3323.54 kB True -637657 ALWAYS 122320 17124.80 kB False -81349 ALWAYS 0 1383.49 kB False +Duration (µs) Mode Coins Count Memory Usage Flush for Prune +2556340 IF_NEEDED 2899141 394844.34 kB False +2005788 FORCE_FLUSH 2238117 310189.68 kB False +2685 FORCE_FLUSH 0 262.24 kB False ``` ### log_utxos.bt diff --git a/libbitcoinkernel-sys/bitcoin/contrib/tracing/log_utxocache_flush.py b/libbitcoinkernel-sys/bitcoin/contrib/tracing/log_utxocache_flush.py index 230b38e9..fd35e7f6 100755 --- a/libbitcoinkernel-sys/bitcoin/contrib/tracing/log_utxocache_flush.py +++ b/libbitcoinkernel-sys/bitcoin/contrib/tracing/log_utxocache_flush.py @@ -10,7 +10,7 @@ """Example logging Bitcoin Core utxo set cache flushes utilizing the utxocache:flush tracepoint.""" -# USAGE: ./contrib/tracing/log_utxocache_flush.py path/to/bitcoind +# USAGE: ./contrib/tracing/log_utxocache_flush.py # BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into # a sandboxed Linux kernel VM. @@ -45,7 +45,8 @@ 'NONE', 'IF_NEEDED', 'PERIODIC', - 'ALWAYS' + 'FORCE_FLUSH', + 'FORCE_SYNC', ] @@ -61,7 +62,7 @@ class Data(ctypes.Structure): def print_event(event): - print("%-15d %-10s %-15d %-15s %-8s" % ( + print("%-15d %-12s %-15d %-15s %-8s" % ( event.duration, FLUSH_MODES[event.mode], event.coins_count, @@ -88,7 +89,7 @@ def handle_flush(_, data, size): b["flush"].open_perf_buffer(handle_flush) print("Logging utxocache flushes. Ctrl-C to end...") - print("%-15s %-10s %-15s %-15s %-8s" % ("Duration (µs)", "Mode", + print("%-15s %-12s %-15s %-15s %-8s" % ("Duration (µs)", "Mode", "Coins Count", "Memory Usage", "Flush for Prune")) diff --git a/libbitcoinkernel-sys/bitcoin/contrib/utxo-tools/utxo_to_sqlite.py b/libbitcoinkernel-sys/bitcoin/contrib/utxo-tools/utxo_to_sqlite.py index 4758fe39..56de7369 100755 --- a/libbitcoinkernel-sys/bitcoin/contrib/utxo-tools/utxo_to_sqlite.py +++ b/libbitcoinkernel-sys/bitcoin/contrib/utxo-tools/utxo_to_sqlite.py @@ -9,6 +9,9 @@ The created database contains a table `utxos` with the following schema: (txid TEXT, vout INT, value INT, coinbase INT, height INT, scriptpubkey TEXT) + +If --txid=raw or --txid=rawle is specified, txid will be BLOB instead; +if --spk=raw, then scriptpubkey will be BLOB instead. """ import argparse import os @@ -111,7 +114,9 @@ def main(): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('infile', help='filename of compact-serialized UTXO set (input)') parser.add_argument('outfile', help='filename of created SQLite3 database (output)') - parser.add_argument('-v', '--verbose', action='store_true', help='show details about each UTXO') + parser.add_argument('--verbose', action='store_true', help='show details about each UTXO') + parser.add_argument('--spk', choices=['hex', 'raw'], default='hex', help='encode scriptPubKey as hex or raw bytes') + parser.add_argument('--txid', choices=['hex', 'raw', 'rawle'], default='hex', help='encode txid as hex, raw bytes (sha256 byteorder), or reversed raw bytes (little endian)') args = parser.parse_args() if not os.path.exists(args.infile): @@ -122,9 +127,15 @@ def main(): print(f"Error: provided output file '{args.outfile}' already exists.") sys.exit(1) + spk_hex = (args.spk == 'hex') + txid_hex = (args.txid == 'hex') + txid_reverse = (args.txid != 'raw') + # create database table + txid_fmt = "TEXT" if txid_hex else "BLOB" + spk_fmt = "TEXT" if spk_hex else "BLOB" con = sqlite3.connect(args.outfile) - con.execute("CREATE TABLE utxos(txid TEXT, vout INT, value INT, coinbase INT, height INT, scriptpubkey TEXT)") + con.execute(f"CREATE TABLE utxos(txid {txid_fmt}, vout INT, value INT, coinbase INT, height INT, scriptpubkey {spk_fmt})") # read metadata (magic bytes, version, network magic, block hash, UTXO count) f = open(args.infile, 'rb') @@ -153,7 +164,7 @@ def main(): for coin_idx in range(1, num_utxos+1): # read key (COutPoint) if coins_per_hash_left == 0: # read next prevout hash - prevout_hash = f.read(32)[::-1].hex() + prevout_hash = f.read(32) coins_per_hash_left = read_compactsize(f) prevout_index = read_compactsize(f) # read value (Coin) @@ -161,17 +172,21 @@ def main(): height = code >> 1 is_coinbase = code & 1 amount = decompress_amount(read_varint(f)) - scriptpubkey = decompress_script(f).hex() - write_batch.append((prevout_hash, prevout_index, amount, is_coinbase, height, scriptpubkey)) + scriptpubkey = decompress_script(f) + + scriptpubkey_write = scriptpubkey.hex() if spk_hex else scriptpubkey + txid_write = prevout_hash[::-1] if txid_reverse else prevout_hash + txid_write = txid_write.hex() if txid_hex else txid_write + write_batch.append((txid_write, prevout_index, amount, is_coinbase, height, scriptpubkey_write)) if height > max_height: max_height = height coins_per_hash_left -= 1 if args.verbose: print(f"Coin {coin_idx}/{num_utxos}:") - print(f" prevout = {prevout_hash}:{prevout_index}") + print(f" prevout = {prevout_hash[::-1].hex()}:{prevout_index}") print(f" amount = {amount}, height = {height}, coinbase = {is_coinbase}") - print(f" scriptPubKey = {scriptpubkey}\n") + print(f" scriptPubKey = {scriptpubkey.hex()}\n") if coin_idx % (16*1024) == 0 or coin_idx == num_utxos: # write utxo batch to database diff --git a/libbitcoinkernel-sys/bitcoin/contrib/valgrind.supp b/libbitcoinkernel-sys/bitcoin/contrib/valgrind.supp index 7960f9c8..ef53f380 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/valgrind.supp +++ b/libbitcoinkernel-sys/bitcoin/contrib/valgrind.supp @@ -56,9 +56,3 @@ ... fun:_ZN5BCLog6Logger12StartLoggingEv } -{ - Suppress https://bugs.kde.org/show_bug.cgi?id=472219 - fixed in Valgrind 3.22. - Memcheck:Param - ppoll(ufds.events) - obj:/lib/ld-musl-aarch64.so.1 -} diff --git a/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-git-root b/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-git-root index 7ec318e1..ec52b7f3 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-git-root +++ b/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-git-root @@ -1 +1 @@ -437dfe1c26e752c280014a30f809e62c684ad99e +88a7294356e75bbaa136c9427c64e239f7c6fd40 diff --git a/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-keys b/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-keys index 0121f290..b4e4095b 100644 --- a/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-keys +++ b/libbitcoinkernel-sys/bitcoin/contrib/verify-commits/trusted-keys @@ -1,6 +1,5 @@ E777299FC265DD04793070EB944D35F9AC3DB76A D1DBF2C4B96F2DEBF4C16654410108112E7EA81F 152812300785C96444D3334D17565732E08E5E41 -6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C 4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66 A8FC55F3B04BA3146F3492E79303B33A305224CB diff --git a/libbitcoinkernel-sys/bitcoin/depends/hosts/darwin.mk b/libbitcoinkernel-sys/bitcoin/depends/hosts/darwin.mk index d33b681b..71fac7cc 100644 --- a/libbitcoinkernel-sys/bitcoin/depends/hosts/darwin.mk +++ b/libbitcoinkernel-sys/bitcoin/depends/hosts/darwin.mk @@ -1,7 +1,7 @@ OSX_MIN_VERSION=14.0 OSX_SDK_VERSION=14.0 -XCODE_VERSION=15.0 -XCODE_BUILD_ID=15A240d +XCODE_VERSION=26.1.1 +XCODE_BUILD_ID=17B100 LLD_VERSION=711 OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers @@ -50,6 +50,12 @@ darwin_STRIP=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-strip") # # Disable adhoc codesigning (for now) when using LLVM tooling, to avoid # non-determinism issues with the Identifier field. +# +# -Xclang -fno-cxx-modules +# +# Disable C++ modules. We don't use these, and modules cause definition issues +# in the SDK, where __has_feature(modules) is used to define USE_CLANG_TYPES, +# which is in turn used as an include guard. darwin_CC=$(clang_prog) --target=$(host) \ -isysroot$(OSX_SDK) -nostdlibinc \ @@ -61,7 +67,7 @@ darwin_CXX=$(clangxx_prog) --target=$(host) \ -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks darwin_CFLAGS=-mmacos-version-min=$(OSX_MIN_VERSION) -darwin_CXXFLAGS=-mmacos-version-min=$(OSX_MIN_VERSION) +darwin_CXXFLAGS=-mmacos-version-min=$(OSX_MIN_VERSION) -Xclang -fno-cxx-modules darwin_LDFLAGS=-Wl,-platform_version,macos,$(OSX_MIN_VERSION),$(OSX_SDK_VERSION) ifneq ($(build_os),darwin) diff --git a/libbitcoinkernel-sys/bitcoin/depends/packages/boost.mk b/libbitcoinkernel-sys/bitcoin/depends/packages/boost.mk index 2d027c68..02f59abe 100644 --- a/libbitcoinkernel-sys/bitcoin/depends/packages/boost.mk +++ b/libbitcoinkernel-sys/bitcoin/depends/packages/boost.mk @@ -13,6 +13,8 @@ define $(package)_set_vars $(package)_config_opts += -DBOOST_INSTALL_LAYOUT=system $(package)_config_opts += -DBUILD_TESTING=OFF $(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_ICU=ON + # Install to a unique path to prevent accidental inclusion via other dependencies' -I flags. + $(package)_config_opts += -DCMAKE_INSTALL_INCLUDEDIR=$(package)/include endef define $(package)_config_cmds diff --git a/libbitcoinkernel-sys/bitcoin/depends/packages/systemtap.mk b/libbitcoinkernel-sys/bitcoin/depends/packages/systemtap.mk index 668099b0..a9f5e354 100644 --- a/libbitcoinkernel-sys/bitcoin/depends/packages/systemtap.mk +++ b/libbitcoinkernel-sys/bitcoin/depends/packages/systemtap.mk @@ -6,7 +6,11 @@ $(package)_sha256_hash=966a360fb73a4b65a8d0b51b389577b3c4f92a327e84aae58682103e8 $(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch && \ - mkdir -p $($(package)_staging_prefix_dir)/include/sys && \ - cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/include/sys/sdt.h + patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch +endef + +# Install to a unique path to prevent accidental inclusion via other dependencies' -I flags. +define $(package)_stage_cmds + mkdir -p $($(package)_staging_prefix_dir)/$(package)/include/sys && \ + cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/$(package)/include/sys/sdt.h endef diff --git a/libbitcoinkernel-sys/bitcoin/depends/toolchain.cmake.in b/libbitcoinkernel-sys/bitcoin/depends/toolchain.cmake.in index e31d9eef..87189efa 100644 --- a/libbitcoinkernel-sys/bitcoin/depends/toolchain.cmake.in +++ b/libbitcoinkernel-sys/bitcoin/depends/toolchain.cmake.in @@ -163,6 +163,7 @@ if("@usdt_packages@" MATCHES "^[ ]*$") set(WITH_USDT OFF CACHE BOOL "") else() set(WITH_USDT ON CACHE BOOL "") + set(USDT_INCLUDE_DIR "${CMAKE_CURRENT_LIST_DIR}/systemtap/include" CACHE PATH "") endif() set(ipc_packages @ipc_packages@) diff --git a/libbitcoinkernel-sys/bitcoin/doc/asmap-data.md b/libbitcoinkernel-sys/bitcoin/doc/asmap-data.md new file mode 100644 index 00000000..09e2f95c --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/asmap-data.md @@ -0,0 +1,59 @@ +# Embedded ASMap data + +## Background + +The ASMap feature (available via `-asmap`) makes it possible to use a peer's AS Number (ASN), an ISP/hoster identifier, +in netgroup bucketing in order to ensure a higher diversity in the peer +set. When not using this, the default behavior is to have the buckets formed +based on IP prefixes but this does not +prevent having connections dominated by peers at the same large-scale hoster, +for example, since such companies usually control many diverse IP ranges. +In order to use ASMap, the mapping between IP prefixes and AS Numbers needs +to be available. This mapping data can be provided through an external file +but Bitcoin Core also embeds a default map in its builds to make the feature +available to users when they are unable to provide a file. + +## Data sourcing and tools + +ASMap is a mapping of IP prefix to ASN, essentially a snapshot of the +internet routing table at some point in time. Due to the high volatility +of parts of this routing table and the known vulnerabilities in the BGP +protocol it is challenging to collect this data and prove its consistency. +Sourcing the data from a single trusted source is problematic as well. + +The [Kartograf](https://github.com/asmap/kartograf) tool was created to +deal with these uncertainties as good as possible. The mapping data is sourced from RPKI, IRR and +Routeviews. The former two are themselves used as security mechanisms to +protect against BGP security issues, which is why they are considered more secure and +their data takes precedence. The latter is a trusted collector of BGP traffic +and only used for IP space that is not covered by RPKI and IRR. + +The process in which the Kartograf project parses, processes and merges these +data sources is deterministic. Given the raw download files from these +different sources, anyone can build their own map file and verify the content +matches with other users' results. Before the map is usable by Bitcoin Core +it needs to be encoded as well. This is done using `asmap-tool.py` in `contrib/asmap` +and this step is deterministic as well. + +When it comes to obtaining the initial input data, the high volatility remains +a challenge if users don't want to trust a single creator of the used ASMap file. +To overcome this, multiple users can start the download process at the exact +same time which leads to a high likelihood that their downloaded data will be +similar enough that they receive the same output at the end of the process. +This process is regularly coordinated at the [asmap-data](https://github.com/asmap/asmap-data) +project. If enough participants have joined the effort (5 or more is recommended) and a majority of the +participants have received the same result, the resulting ASMap file is added +to the repository for public use. Files will not be merged to the repository +without at least two additional reviewers confirming that the process described +above was followed as expected and that the encoding step yielded the same +file hash. New files are created on an ongoing basis but without any central planning +or an explicit schedule. + +## Release process + +As an upcoming release approaches the embedded ASMap data should be updated +by replacing the `ip_asn.dat` with a newer ASMap file from the asmap-data +repository so that its data is embedded in the release. Ideally, there may be a file +already created recently that can be selected for an upcoming release. Alternatively, +a new creation process can be initiated with the goal of obtaining a fresh map +for use in the upcoming release. diff --git a/libbitcoinkernel-sys/bitcoin/doc/build-windows-msvc.md b/libbitcoinkernel-sys/bitcoin/doc/build-windows-msvc.md index 5b75a62f..86639396 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/build-windows-msvc.md +++ b/libbitcoinkernel-sys/bitcoin/doc/build-windows-msvc.md @@ -6,25 +6,49 @@ For cross-compiling options, please see [`build-windows.md`](./build-windows.md) ## Preparation -### 1. Visual Studio +### 1. Install Required Dependencies + +The first step is to install the required build applications. The instructions below use WinGet to install the applications. + +WinGet is available on all supported Windows versions. The applications mentioned can also be installed manually. + +#### Visual Studio This guide relies on using CMake and vcpkg package manager provided with the Visual Studio installation. -Here are requirements for the Visual Studio installation: -1. Minimum required version: Visual Studio 2022 version 17.13. -2. Installed components: -- The "Desktop development with C++" workload. -The commands in this guide should be executed in "Developer PowerShell for VS 2022" or "Developer Command Prompt for VS 2022". +Minimum required version: Visual Studio 2026 version 18.3 with the "Desktop development with C++" workload. + +To install Visual Studio Community Edition with the necessary components, run: + +```powershell +winget install --id Microsoft.VisualStudio.Community --override "--wait --quiet --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Component.Git --includeRecommended" +``` + +This installs: +- Visual Studio +- The "Desktop development with C++" workload (NativeDesktop) +- Git component + +After installation, the commands in this guide should be executed in "Developer PowerShell for VS" or "Developer Command Prompt for VS". The former is assumed hereinafter. -### 2. Git +#### Python -Download and install [Git for Windows](https://git-scm.com/downloads/win). Once installed, Git is available from PowerShell or the Command Prompt. +Python is required for running the test suite. -### 3. Clone Bitcoin Repository +To install Python, run: -Clone the Bitcoin Core repository to a directory. All build scripts and commands will run from this directory. +```powershell +winget install python3 ``` + +### 2. Clone Bitcoin Repository + +`git` should already be installed as a component of Visual Studio. If not, download and install [Git for Windows](https://git-scm.com/downloads/win). + +Clone the Bitcoin Core repository to a directory. All build scripts and commands will run from this directory. + +```powershell git clone https://github.com/bitcoin/bitcoin.git ``` @@ -38,7 +62,7 @@ The Bitcoin Core project supports the following vcpkg triplets: To facilitate build process, the Bitcoin Core project provides presets, which are used in this guide. Available presets can be listed as follows: -``` +```powershell cmake --list-presets ``` @@ -52,24 +76,24 @@ In the following instructions, the "Debug" configuration can be specified instea Run `cmake -B build -LH` to see the full list of available options. -### 4. Building with Static Linking with GUI +### Building with Static Linking with GUI -``` -cmake -B build --preset vs2022-static # It might take a while if the vcpkg binary cache is unpopulated or invalidated. +```powershell +cmake -B build --preset vs2026-static # It might take a while if the vcpkg binary cache is unpopulated or invalidated. cmake --build build --config Release # Append "-j N" for N parallel jobs. ctest --test-dir build --build-config Release # Append "-j N" for N parallel tests. cmake --install build --config Release # Optional. ``` -### 5. Building with Dynamic Linking without GUI +### Building with Dynamic Linking without GUI -``` -cmake -B build --preset vs2022 -DBUILD_GUI=OFF # It might take a while if the vcpkg binary cache is unpopulated or invalidated. +```powershell +cmake -B build --preset vs2026 -DBUILD_GUI=OFF # It might take a while if the vcpkg binary cache is unpopulated or invalidated. cmake --build build --config Release # Append "-j N" for N parallel jobs. ctest --test-dir build --build-config Release # Append "-j N" for N parallel tests. ``` -### 6. vcpkg-specific Issues and Workarounds +### vcpkg-specific Issues and Workarounds vcpkg installation during the configuration step might fail for various reasons unrelated to Bitcoin Core. @@ -79,7 +103,7 @@ specify a shorter path to store intermediate build files by using the [`--x-buildtrees-root`](https://learn.microsoft.com/en-us/vcpkg/commands/common-options#buildtrees-root) option: ```powershell -cmake -B build --preset vs2022-static -DVCPKG_INSTALL_OPTIONS="--x-buildtrees-root=C:\vcpkg" +cmake -B build --preset vs2026-static -DVCPKG_INSTALL_OPTIONS="--x-buildtrees-root=C:\vcpkg" ``` If vcpkg installation fails with the message "Paths with embedded space may be handled incorrectly", which @@ -87,21 +111,21 @@ can occur if your local Bitcoin Core repository path contains spaces, you can ov by setting the [`VCPKG_INSTALLED_DIR`](https://github.com/microsoft/vcpkg-docs/blob/main/vcpkg/users/buildsystems/cmake-integration.md#vcpkg_installed_dir) variable: ```powershell -cmake -B build --preset vs2022-static -DVCPKG_INSTALLED_DIR="C:\path_without_spaces" +cmake -B build --preset vs2026-static -DVCPKG_INSTALLED_DIR="C:\path_without_spaces" ``` ## Performance Notes -### 7. vcpkg Manifest Default Features +### vcpkg Manifest Default Features -One can skip vcpkg manifest default features to speedup the configuration step. +One can skip vcpkg manifest default features to speed up the configuration step. For example, the following invocation will skip all features except for "wallet" and "tests" and their dependencies: -``` -cmake -B build --preset vs2022 -DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet;tests" -DBUILD_GUI=OFF -DWITH_ZMQ=OFF +```powershell +cmake -B build --preset vs2026 -DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet;tests" -DBUILD_GUI=OFF -DWITH_ZMQ=OFF ``` Available features are listed in the [`vcpkg.json`](/vcpkg.json) file. -### 8. Antivirus Software +### Antivirus Software To improve the build process performance, one might add the Bitcoin repository directory to the Microsoft Defender Antivirus exclusions. diff --git a/libbitcoinkernel-sys/bitcoin/doc/descriptors.md b/libbitcoinkernel-sys/bitcoin/doc/descriptors.md index 292773fc..1b79c99f 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/descriptors.md +++ b/libbitcoinkernel-sys/bitcoin/doc/descriptors.md @@ -67,8 +67,8 @@ Output descriptors currently support: - `wsh(sortedmulti(1,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1/0/*,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/0/0/*))` describes a set of *1-of-2* P2WSH multisig outputs where one multisig key is the *1/0/`i`* child of the first specified xpub and the other multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default). The order of public keys in the resulting witnessScripts is determined by the lexicographic order of the public keys at that index. - `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,{pk(fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556),pk(e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)})` describes a P2TR output with the `c6...` x-only pubkey as internal key, and two script paths. - `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,sortedmulti_a(2,2f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,5cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc))` describes a P2TR output with the `c6...` x-only pubkey as internal key, and a single `multi_a` script that needs 2 signatures with 2 specified x-only keys, which will be sorted lexicographically. -- `wsh(sortedmulti(2,[6f53d49c/44h/1h/0h]tpubDDjsCRDQ9YzyaAq9rspCfq8RZFrWoBpYnLxK6sS2hS2yukqSczgcYiur8Scx4Hd5AZatxTuzMtJQJhchufv1FRFanLqUP7JHwusSSpfcEp2/0/*,[e6807791/44h/1h/0h]tpubDDAfvogaaAxaFJ6c15ht7Tq6ZmiqFYfrSmZsHu7tHXBgnjMZSHAeHSwhvjARNA6Qybon4ksPksjRbPDVp7yXA1KjTjSd5x18KHqbppnXP1s/0/*,[367c9cfa/44h/1h/0h]tpubDDtPnSgWYk8dDnaDwnof4ehcnjuL5VoUt1eW2MoAed1grPHuXPDnkX1fWMvXfcz3NqFxPbhqNZ3QBdYjLz2hABeM9Z2oqMR1Gt2HHYDoCgh/0/*))#av0kxgw0` describes a *2-of-3* multisig. For brevity, the internal "change" descriptor accompanying the above external "receiving" descriptor is not included here, but it typically differs only in the xpub derivation steps, ending in `/1/*` for change addresses. -- `wsh(thresh(4,pk([7258e4f9/44h/1h/0h]tpubDCZrkQoEU3845aFKUu9VQBYWZtrTwxMzcxnBwKFCYXHD6gEXvtFcxddCCLFsEwmxQaG15izcHxj48SXg1QS5FQGMBx5Ak6deXKPAL7wauBU/0/*),s:pk([c80b1469/44h/1h/0h]tpubDD3UwwHoNUF4F3Vi5PiUVTc3ji1uThuRfFyBexTSHoAcHuWW2z8qEE2YujegcLtgthr3wMp3ZauvNG9eT9xfJyxXCfNty8h6rDBYU8UU1qq/0/*),s:pk([4e5024fe/44h/1h/0h]tpubDDLrpPymPLSCJyCMLQdmcWxrAWwsqqssm5NdxT2WSdEBPSXNXxwbeKtsHAyXPpLkhUyKovtZgCi47QxVpw9iVkg95UUgeevyAqtJ9dqBqa1/0/*),s:pk([3b1d1ee9/44h/1h/0h]tpubDCmDTANBWPzf6d8Ap1J5Ku7J1Ay92MpHMrEV7M5muWxCrTBN1g5f1NPcjMEL6dJHxbvEKNZtYCdowaSTN81DAyLsmv6w6xjJHCQNkxrsrfu/0/*),sln:after(840000),sln:after(1050000),sln:after(1260000)))#k28080kv` describes a Miniscript multisig with spending policy: `thresh(4,pk(key_1),pk(key_2),pk(key_3),pk(key_4),after(t1),after(t2),after(t3))` that starts as 4-of-4 and "decays" to 3-of-4, 2-of-4, and finally 1-of-4 at each future halvening block height. For brevity, the internal "change" descriptor accompanying the above external "receiving" descriptor is not included here, but it typically differs only in the xpub derivation steps, ending in `/1/*` for change addresses. +- `wsh(sortedmulti(2,[6f53d49c/44h/1h/0h]tpubDDjsCRDQ9YzyaAq9rspCfq8RZFrWoBpYnLxK6sS2hS2yukqSczgcYiur8Scx4Hd5AZatxTuzMtJQJhchufv1FRFanLqUP7JHwusSSpfcEp2/<0;1>/*,[e6807791/44h/1h/0h]tpubDDAfvogaaAxaFJ6c15ht7Tq6ZmiqFYfrSmZsHu7tHXBgnjMZSHAeHSwhvjARNA6Qybon4ksPksjRbPDVp7yXA1KjTjSd5x18KHqbppnXP1s/<0;1>/*,[367c9cfa/44h/1h/0h]tpubDDtPnSgWYk8dDnaDwnof4ehcnjuL5VoUt1eW2MoAed1grPHuXPDnkX1fWMvXfcz3NqFxPbhqNZ3QBdYjLz2hABeM9Z2oqMR1Gt2HHYDoCgh/<0;1>/*))` describes a *2-of-3* multisig with a multipath descriptor specifying both receiving (/0) and change (/1) address derivation paths. +- `wsh(thresh(4,pk([7258e4f9/44h/1h/0h]tpubDCZrkQoEU3845aFKUu9VQBYWZtrTwxMzcxnBwKFCYXHD6gEXvtFcxddCCLFsEwmxQaG15izcHxj48SXg1QS5FQGMBx5Ak6deXKPAL7wauBU/<0;1>/*),s:pk([c80b1469/44h/1h/0h]tpubDD3UwwHoNUF4F3Vi5PiUVTc3ji1uThuRfFyBexTSHoAcHuWW2z8qEE2YujegcLtgthr3wMp3ZauvNG9eT9xfJyxXCfNty8h6rDBYU8UU1qq/<0;1>/*),s:pk([4e5024fe/44h/1h/0h]tpubDDLrpPymPLSCJyCMLQdmcWxrAWwsqqssm5NdxT2WSdEBPSXNXxwbeKtsHAyXPpLkhUyKovtZgCi47QxVpw9iVkg95UUgeevyAqtJ9dqBqa1/<0;1>/*),s:pk([3b1d1ee9/44h/1h/0h]tpubDCmDTANBWPzf6d8Ap1J5Ku7J1Ay92MpHMrEV7M5muWxCrTBN1g5f1NPcjMEL6dJHxbvEKNZtYCdowaSTN81DAyLsmv6w6xjJHCQNkxrsrfu/<0;1>/*),sln:after(840000),sln:after(1050000),sln:after(1260000)))` describes a Miniscript multisig with spending policy: `thresh(4,pk(key_1),pk(key_2),pk(key_3),pk(key_4),after(t1),after(t2),after(t3))` that starts as 4-of-4 and "decays" to 3-of-4, 2-of-4, and finally 1-of-4 at each future halvening block height. This uses a multipath descriptor specifying both receiving (/0) and change (/1) address derivation paths. - `tr(musig(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y)/0/*)` describes a MuSig2 multisig with key derivation. The internal keys are derived at `m/0/*` from the aggregate key computed from the 2 participants. ## Reference @@ -175,9 +175,9 @@ The basic steps are: the participant's signer wallet. Avoid reusing this wallet for any purpose other than signing transactions from the corresponding multisig we are about to create. Hint: extract the wallet's xpubs using `listdescriptors` and pick the one from the `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses) - 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the external and internal descriptors: - `wsh(sortedmulti(,XPUB1/0/*,XPUB2/0/*,…,XPUBN/0/*))` and `wsh(sortedmulti(,XPUB1/1/*,XPUB2/1/*,…,XPUBN/1/*))` - (one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this. All key origin information (master key fingerprint and all derivation steps) should be included with xpubs for proper support of hardware devices / external signers + 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing a single multipath descriptor: + `wsh(sortedmulti(,XPUB1/<0;1>/*,XPUB2/<0;1>/*,…,XPUBN/<0;1>/*))` + This single descriptor specifies both receiving (`/0`) and change (`/1`) addresses. Every participant does this. All key origin information (master key fingerprint and all derivation steps) should be included with xpubs for proper support of hardware devices / external signers 3. A receiving address is generated for the multisig. As a check to ensure step 2 was done correctly, every participant should verify they get the same addresses 4. Funds are sent to the resulting address diff --git a/libbitcoinkernel-sys/bitcoin/doc/developer-notes.md b/libbitcoinkernel-sys/bitcoin/doc/developer-notes.md index b8a16ec0..97ffc0cd 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/developer-notes.md +++ b/libbitcoinkernel-sys/bitcoin/doc/developer-notes.md @@ -305,37 +305,6 @@ If you need to build exclusively for debugging, set the `-DCMAKE_BUILD_TYPE` to `Debug` (i.e. `-DCMAKE_BUILD_TYPE=Debug`). You can always check the cmake build options of an existing build with `ccmake build`. -### Show sources in debugging - -If you have ccache enabled, absolute paths are stripped from debug information -with the `-fdebug-prefix-map` and `-fmacro-prefix-map` options (if supported by the -compiler). This might break source file detection in case you move binaries -after compilation, debug from the directory other than the project root or use -an IDE that only supports absolute paths for debugging (e.g. it won't stop at breakpoints). - -There are a few possible fixes: - -1. Configure source file mapping. - -For `gdb` create or append to [`.gdbinit` file](https://sourceware.org/gdb/current/onlinedocs/gdb#gdbinit-man): -``` -set substitute-path ./src /path/to/project/root/src -``` - -For `lldb` create or append to [`.lldbinit` file](https://lldb.llvm.org/man/lldb.html#configuration-files): -``` -settings set target.source-map ./src /path/to/project/root/src -``` - -2. Add a symlink to the `./src` directory: -``` -ln -s /path/to/project/root/src src -``` - -3. Use `debugedit` to modify debug information in the binary. - -4. If your IDE has an option for this, change your breakpoints to use the file name only. - ### debug.log If the code is behaving strangely, take a look in the `debug.log` file in the data directory; diff --git a/libbitcoinkernel-sys/bitcoin/doc/files.md b/libbitcoinkernel-sys/bitcoin/doc/files.md index 12c6cefb..27fe5615 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/files.md +++ b/libbitcoinkernel-sys/bitcoin/doc/files.md @@ -55,6 +55,7 @@ Subdirectory | File(s) | Description `blocks/` | `xor.dat` | Rolling XOR pattern for block and undo data files `chainstate/` | LevelDB database | Blockchain state (a compact representation of all currently unspent transaction outputs (UTXOs) and metadata about the transactions they are from) `indexes/txindex/` | LevelDB database | Transaction index; *optional*, used if `-txindex=1` +`indexes/txospenderindex/` | LevelDB database | Transaction spender index; *optional*, used if `-txospenderindex=1` `indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `indexes/blockfilter/basic/` | `fltrNNNNN.dat`[\[2\]](#note2) | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `indexes/coinstatsindex/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1` diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-24539.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-24539.md new file mode 100644 index 00000000..63b4d70e --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-24539.md @@ -0,0 +1,14 @@ +New settings +------------ +- `-txospenderindex` enables the creation of a transaction output spender + index that, if present, will be scanned by `gettxspendingprevout` if a + spending transaction was not found in the mempool. + (#24539) + +Updated RPCs +------------ +- `gettxspendingprevout` has 2 new optional arguments: `mempool_only` and `return_spending_tx`. + If `mempool_only` is true it will limit scans to the mempool even if `txospenderindex` is available. + If `return_spending_tx` is true, the full spending tx will be returned. + In addition if `txospenderindex` is available and a confirmed spending transaction is found, + its block hash will be returned. (#24539) diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-29415.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-29415.md index d5040a31..c0e0f3dc 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/release-notes-29415.md +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-29415.md @@ -12,3 +12,8 @@ P2P and network changes 2. If the originator sends two otherwise unrelated transactions, they will not be linkable. This is because a separate connection is used for broadcasting each transaction. (#29415) + +- New RPCs have been added to introspect and control private broadcast: + `getprivatebroadcastinfo` reports transactions currently being privately + broadcast, and `abortprivatebroadcast` removes matching + transactions from the private broadcast queue. diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-32138.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-32138.md new file mode 100644 index 00000000..56699850 --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-32138.md @@ -0,0 +1,3 @@ +RPC and Startup Option +--- +The `-paytxfee` startup option and the `settxfee` RPC are now deleted after being deprecated in Bitcoin Core 30.0. They used to allow the user to set a static fee rate for wallet transactions, which could potentially lead to overpaying or underpaying. Users should instead rely on fee estimation or specify a fee rate per transaction using the `fee_rate` argument in RPCs such as `fundrawtransaction`, `sendtoaddress`, `send`, `sendall`, and `sendmany`. (#32138) diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-33199.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-33199.md new file mode 100644 index 00000000..90246d78 --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-33199.md @@ -0,0 +1,9 @@ +Fee Estimation +======================== + +- The Bitcoin Core fee estimator minimum fee rate bucket was updated from **1 sat/vB** to **0.1 sat/vB**, + which matches the node’s default `minrelayfee`. + This means that for a given confirmation target, if a sub-1 sat/vB fee rate bucket is the minimum tracked + with sufficient data, its average value will be returned as the fee rate estimate. + +- Note: Restarting a node with this change invalidates previously saved estimates in `fee_estimates.dat`, the fee estimator will start tracking fresh stats. diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-33819.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-33819.md deleted file mode 100644 index 79ed1f70..00000000 --- a/libbitcoinkernel-sys/bitcoin/doc/release-notes-33819.md +++ /dev/null @@ -1,8 +0,0 @@ -Mining IPC ----------- - -- The `getCoinbaseTx()` method is renamed to `getCoinbaseRawTx()` and deprecated. - IPC clients do not use the function name, so they're not affected. (#33819) -- Adds `getCoinbaseTx()` which clients should use instead of `getCoinbaseRawTx()`. It - contains all fields required to construct a coinbase transaction, and omits the - dummy output which Bitcoin Core uses internally. (#33819) diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-34512.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-34512.md new file mode 100644 index 00000000..b8634488 --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-34512.md @@ -0,0 +1,8 @@ +Updated RPCs +------------ + +- The `getblock` RPC now returns a `coinbase_tx` object at verbosity levels 1, 2, + and 3. It contains `version`, `locktime`, `sequence`, `coinbase` and + `witness`. This allows for efficiently querying coinbase + transaction properties without fetching the full transaction data at + verbosity 2+. (#34512) diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes-34568.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes-34568.md new file mode 100644 index 00000000..e4877233 --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes-34568.md @@ -0,0 +1,11 @@ +Mining IPC +---------- + +The IPC mining interface now requires mining clients to use the latest `mining.capnp` schema. Clients built against older schemas will fail when calling `Init.makeMining` and receive an RPC error indicating the old mining interface is no longer supported. Mining clients must update to the latest schema and regenerate bindings to continue working. (#34568) + +Notable IPC mining interface changes since the last release: +- `Mining.createNewBlock` and `Mining.checkBlock` now require a `context` parameter. +- `Mining.waitTipChanged` now has a default `timeout` (effectively infinite / `maxDouble`) if the client omits it. +- `BlockTemplate.getCoinbaseTx()` now returns a structured `CoinbaseTx` instead of raw bytes. +- Removed `BlockTemplate.getCoinbaseCommitment()` and `BlockTemplate.getWitnessCommitmentIndex()`. +- Cap’n Proto default values were updated to match the corresponding C++ defaults for mining-related option structs (e.g. `BlockCreateOptions`, `BlockWaitOptions`, `BlockCheckOptions`). diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-notes/release-notes-29.3.md b/libbitcoinkernel-sys/bitcoin/doc/release-notes/release-notes-29.3.md new file mode 100644 index 00000000..98713c6c --- /dev/null +++ b/libbitcoinkernel-sys/bitcoin/doc/release-notes/release-notes-29.3.md @@ -0,0 +1,105 @@ +Bitcoin Core version 29.3 is now available from: + + + +This release includes various bug fixes and performance +improvements, as well as updated translations. + +Please report bugs using the issue tracker at GitHub: + + + +To receive security and update notifications, please subscribe to: + + + +How to Upgrade +============== + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes in some cases), then run the +installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on macOS) +or `bitcoind`/`bitcoin-qt` (on Linux). + +Upgrading directly from a version of Bitcoin Core that has reached its EOL is +possible, but it might take some time if the data directory needs to be migrated. Old +wallet versions of Bitcoin Core are generally supported. + +Compatibility +============== + +Bitcoin Core is supported and tested on operating systems using the +Linux Kernel 3.17+, macOS 13+, and Windows 10+. Bitcoin +Core should also work on most other Unix-like systems but is not as +frequently tested on them. It is not recommended to use Bitcoin Core on +unsupported systems. + +Notable changes +=============== + +### P2P + +- #33050 net, validation: don't punish peers for consensus-invalid txs +- #33723 chainparams: remove dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us + +### Validation + +- #32473 Introduce per-txin sighash midstate cache for legacy/p2sh/segwitv0 scripts +- #33105 validation: detect witness stripping without re-running Script checks + +### Wallet + +- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet +- #34156 wallet: fix unnamed legacy wallet migration failure +- #34226 wallet: test: Relative wallet failed migration cleanup +- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet +- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion +- #34370 wallet: Additional cleanups for migration, and fixes for createfromdump with BDB + +### Mining + +- #33475 bugfix: miner: fix `addPackageTxs` unsigned integer overflow + +### Build + +- #34227 guix: Fix `osslsigncode` tests + +### Documentation + +- #33623 doc: document capnproto and libmultiprocess deps in 29.x + +### Test + +- #33612 test: change log rate limit version gate + +### Misc + +- #32513 ci: remove 3rd party js from windows dll gha job +- #33508 ci: fix buildx gha cache authentication on forks +- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH +- #34344 ci: update GitHub Actions versions + +Credits +======= + +Thanks to everyone who directly contributed to this release: + +- Anthony Towns +- Antoine Poinsot +- Ava Chow +- David Gumberg +- Eugene Siegel +- fanquake +- furszy +- Hennadii Stepanov +- ismaelsadeeq +- luke-jr +- m3dwards +- Padraic Slattery +- Pieter Wuille +- SatsAndSports +- sedited +- willcl-ark + +As well as to everyone that helped with translations on +[Transifex](https://explore.transifex.com/bitcoin/bitcoin/). diff --git a/libbitcoinkernel-sys/bitcoin/doc/release-process.md b/libbitcoinkernel-sys/bitcoin/doc/release-process.md index 272f36ea..90ffd852 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/release-process.md +++ b/libbitcoinkernel-sys/bitcoin/doc/release-process.md @@ -30,6 +30,7 @@ Release Process * Update translations see [translation_process.md](/doc/translation_process.md#synchronising-translations). * Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/27488) for an example. +* Update embedded asmap data at `/src/node/data/ip_asn.dat`, see [asmap data documentation](./asmap-data.md). * Update the following variables in [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp) for mainnet, testnet, and signet: - `m_assumed_blockchain_size` and `m_assumed_chain_state_size` with the current size plus some overhead (see [this](#how-to-calculate-assumed-blockchain-and-chain-state-size) for information on how to calculate them). diff --git a/libbitcoinkernel-sys/bitcoin/doc/tracing.md b/libbitcoinkernel-sys/bitcoin/doc/tracing.md index 927fd34b..f2599708 100644 --- a/libbitcoinkernel-sys/bitcoin/doc/tracing.md +++ b/libbitcoinkernel-sys/bitcoin/doc/tracing.md @@ -185,8 +185,8 @@ Is called *after* the in-memory UTXO cache is flushed. Arguments passed: 1. Time it took to flush the cache microseconds as `int64` -2. Flush state mode as `uint32`. It's an enumerator class with values `0` - (`NONE`), `1` (`IF_NEEDED`), `2` (`PERIODIC`), `3` (`ALWAYS`) +2. Flush state mode as `uint32`. It's an enumerator class with values + `0` (`NONE`), `1` (`IF_NEEDED`), `2` (`PERIODIC`), `3` (`FORCE_FLUSH`), `4` (`FORCE_SYNC`) 3. Cache size (number of coins) before the flush as `uint64` 4. Cache memory usage in bytes as `uint64` 5. If pruning caused the flush as `bool` diff --git a/libbitcoinkernel-sys/bitcoin/src/.clang-format b/libbitcoinkernel-sys/bitcoin/src/.clang-format index 2b74e40b..72a20228 100644 --- a/libbitcoinkernel-sys/bitcoin/src/.clang-format +++ b/libbitcoinkernel-sys/bitcoin/src/.clang-format @@ -108,6 +108,9 @@ IncludeCategories: - Regex: '^$' + Priority: 2 + CaseSensitive: true - Regex: '^<[^>.]*>' Priority: 3 CaseSensitive: false diff --git a/libbitcoinkernel-sys/bitcoin/src/.clang-tidy b/libbitcoinkernel-sys/bitcoin/src/.clang-tidy index f54e07fa..9bdcc03f 100644 --- a/libbitcoinkernel-sys/bitcoin/src/.clang-tidy +++ b/libbitcoinkernel-sys/bitcoin/src/.clang-tidy @@ -10,6 +10,7 @@ bugprone-unhandled-self-assignment, bugprone-unused-return-value, misc-unused-using-decls, misc-no-recursion, +modernize-avoid-bind, modernize-deprecated-headers, modernize-use-default-member-init, modernize-use-emplace, @@ -36,7 +37,7 @@ CheckOptions: - key: modernize-deprecated-headers.CheckHeaderFile value: false - key: performance-move-const-arg.CheckTriviallyCopyableMove - value: false + value: false # Disabled, to allow the bugprone-use-after-move rule on trivially copyable types, to catch logic bugs - key: bugprone-unhandled-self-assignment.WarnOnlyIfThisHasSuspiciousField value: false - key: bugprone-unused-return-value.CheckedReturnTypes diff --git a/libbitcoinkernel-sys/bitcoin/src/CMakeLists.txt b/libbitcoinkernel-sys/bitcoin/src/CMakeLists.txt index cf1f26c9..ad18115b 100644 --- a/libbitcoinkernel-sys/bitcoin/src/CMakeLists.txt +++ b/libbitcoinkernel-sys/bitcoin/src/CMakeLists.txt @@ -193,6 +193,7 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL index/blockfilterindex.cpp index/coinstatsindex.cpp index/txindex.cpp + index/txospenderindex.cpp init.cpp kernel/chain.cpp kernel/checks.cpp @@ -286,6 +287,13 @@ target_link_libraries(bitcoin_node $ $ ) +if(WITH_EMBEDDED_ASMAP) + target_compile_definitions(bitcoin_node PRIVATE ENABLE_EMBEDDED_ASMAP=1) + include(TargetDataSources) + target_raw_data_sources(bitcoin_node NAMESPACE node::data + node/data/ip_asn.dat + ) +endif() # Bitcoin wrapper executable that can call other executables. if(BUILD_BITCOIN_BIN) diff --git a/libbitcoinkernel-sys/bitcoin/src/addrman.cpp b/libbitcoinkernel-sys/bitcoin/src/addrman.cpp index 206b5411..2e514909 100644 --- a/libbitcoinkernel-sys/bitcoin/src/addrman.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/addrman.cpp @@ -156,7 +156,7 @@ void AddrManImpl::Serialize(Stream& s_) const * * for each new bucket: * * number of elements * * for each element: index in the serialized "all new addresses" - * * asmap checksum + * * asmap version * * 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it * as incompatible. This is necessary because it did not check the version number on @@ -222,9 +222,9 @@ void AddrManImpl::Serialize(Stream& s_) const } } } - // Store asmap checksum after bucket entries so that it + // Store asmap version after bucket entries so that it // can be ignored by older clients for backward compatibility. - s << m_netgroupman.GetAsmapChecksum(); + s << m_netgroupman.GetAsmapVersion(); } template @@ -330,16 +330,16 @@ void AddrManImpl::Unserialize(Stream& s_) } } - // If the bucket count and asmap checksum haven't changed, then attempt + // If the bucket count and asmap version haven't changed, then attempt // to restore the entries to the buckets/positions they were in before // serialization. - uint256 supplied_asmap_checksum{m_netgroupman.GetAsmapChecksum()}; - uint256 serialized_asmap_checksum; + uint256 supplied_asmap_version{m_netgroupman.GetAsmapVersion()}; + uint256 serialized_asmap_version; if (format >= Format::V2_ASMAP) { - s >> serialized_asmap_checksum; + s >> serialized_asmap_version; } const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && - serialized_asmap_checksum == supplied_asmap_checksum}; + serialized_asmap_version == supplied_asmap_version}; if (!restore_bucketing) { LogDebug(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n"); diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/addrman.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/addrman.cpp index 907c7d2e..d28030e4 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/addrman.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/addrman.cpp @@ -24,7 +24,7 @@ static constexpr size_t NUM_SOURCES = 64; static constexpr size_t NUM_ADDRESSES_PER_SOURCE = 256; -static NetGroupManager EMPTY_NETGROUPMAN{std::vector()}; +static auto EMPTY_NETGROUPMAN{NetGroupManager::NoAsmap()}; static constexpr uint32_t ADDRMAN_CONSISTENCY_CHECK_RATIO{0}; static std::vector g_sources; diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/block_assemble.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/block_assemble.cpp index 297465be..702f2c09 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/block_assemble.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/block_assemble.cpp @@ -30,6 +30,7 @@ static void AssembleBlock(benchmark::Bench& bench) witness.stack.push_back(WITNESS_STACK_ELEM_OP_TRUE); BlockAssembler::Options options; options.coinbase_output_script = P2WSH_OP_TRUE; + options.include_dummy_extranonce = true; // Collect some loose transactions that spend the coinbases of our mined blocks constexpr size_t NUM_BLOCKS{200}; diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/blockencodings.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/blockencodings.cpp index 3f6be56b..ce968dbc 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/blockencodings.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/blockencodings.cpp @@ -22,7 +22,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { LockPoints lp; - TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); } namespace { diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/cluster_linearize.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/cluster_linearize.cpp index 88f8bf28..d345a7ca 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/cluster_linearize.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/cluster_linearize.cpp @@ -55,7 +55,7 @@ void BenchLinearizeOptimallyTotal(benchmark::Bench& bench, const std::string& na // Benchmark the total time to optimal. uint64_t rng_seed = 0; bench.name(bench_name).run([&] { - auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++); + auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++, IndexTxOrder{}); assert(optimal); }); } @@ -72,7 +72,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string& // Determine the cost of 100 rng_seeds. uint64_t total_cost = 0; for (uint64_t iter = 0; iter < 100; ++iter) { - auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter); + auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); total_cost += cost; } @@ -80,7 +80,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string& bench.name(bench_name).unit("cost").batch(total_cost).run([&] { uint64_t recompute_cost = 0; for (uint64_t iter = 0; iter < 100; ++iter) { - auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter); + auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); assert(optimal); recompute_cost += cost; } diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_ephemeral_spends.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_ephemeral_spends.cpp index 2f89f0da..f0d8eb0b 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_ephemeral_spends.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_ephemeral_spends.cpp @@ -29,7 +29,7 @@ static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_R unsigned int sigOpCost{4}; uint64_t fee{0}; LockPoints lp; - TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), + TryAddToMempool(pool, CTxMemPoolEntry( tx, fee, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_eviction.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_eviction.cpp index ad3ab08a..37bb7215 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_eviction.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_eviction.cpp @@ -27,7 +27,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& nFee, CTxMemPool& po bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; - TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), + TryAddToMempool(pool, CTxMemPoolEntry( tx, nFee, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_stress.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_stress.cpp index 768913ac..1f582081 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/mempool_stress.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/mempool_stress.cpp @@ -29,7 +29,7 @@ static void AddTx(const CTransactionRef& tx, CTxMemPool& pool, FastRandomContext bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; - TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, det_rand.randrange(10000)+1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(tx, det_rand.randrange(10000)+1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } struct Available { @@ -106,7 +106,7 @@ static void MemPoolAddTransactions(benchmark::Bench& bench) std::vector transactions; // Create 1000 clusters of 100 transactions each for (int i=0; i<100; i++) { - auto new_txs = CreateCoinCluster(det_rand, childTxs, /*min_ancestors*/ 1); + auto new_txs = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/ 1); transactions.insert(transactions.end(), new_txs.begin(), new_txs.end()); } @@ -156,7 +156,7 @@ static void ComplexMemPool(benchmark::Bench& bench) // in the same state at the end of the function, so we benchmark both // mining a block and reorging the block's contents back into the mempool. bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { - pool.removeForBlock(tx_remove_for_block, /*nBlockHeight*/100); + pool.removeForBlock(tx_remove_for_block, /*nBlockHeight=*/100); for (auto& tx: tx_remove_for_block) { AddTx(tx, pool, det_rand); } diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/readwriteblock.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/readwriteblock.cpp index e1372a26..b8e226c6 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/readwriteblock.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/readwriteblock.cpp @@ -21,9 +21,8 @@ static CBlock CreateTestBlock() { - DataStream stream{benchmark::data::block413567}; CBlock block; - stream >> TX_WITH_WITNESS(block); + SpanReader{benchmark::data::block413567} >> TX_WITH_WITNESS(block); return block; } diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/rpc_mempool.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/rpc_mempool.cpp index b27a7e72..f319c961 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/rpc_mempool.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/rpc_mempool.cpp @@ -22,7 +22,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { LockPoints lp; - TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); } static void RpcMempool(benchmark::Bench& bench) diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/txgraph.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/txgraph.cpp index 078f77b4..00074db6 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/txgraph.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/txgraph.cpp @@ -12,6 +12,11 @@ namespace { +std::strong_ordering PointerComparator(const TxGraph::Ref& a, const TxGraph::Ref& b) noexcept +{ + return (&a) <=> (&b); +} + void BenchTxGraphTrim(benchmark::Bench& bench) { // The from-block transactions consist of 1000 fully linear clusters, each with 64 @@ -60,14 +65,14 @@ void BenchTxGraphTrim(benchmark::Bench& bench) std::vector top_components; InsecureRandomContext rng(11); - auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, NUM_ACCEPTABLE_ITERS); + auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, NUM_ACCEPTABLE_ITERS, PointerComparator); // Construct the top chains. for (int chain = 0; chain < NUM_TOP_CHAINS; ++chain) { for (int chaintx = 0; chaintx < NUM_TX_PER_TOP_CHAIN; ++chaintx) { int64_t fee = rng.randbits<27>() + 100; FeePerWeight feerate{fee, 1}; - top_refs.push_back(graph->AddTransaction(feerate)); + graph->AddTransaction(top_refs.emplace_back(), feerate); // Add internal dependencies linking the chain transactions together. if (chaintx > 0) { graph->AddDependency(*(top_refs.rbegin()), *(top_refs.rbegin() + 1)); @@ -85,7 +90,8 @@ void BenchTxGraphTrim(benchmark::Bench& bench) // Construct the transaction. int64_t fee = rng.randbits<27>() + 100; FeePerWeight feerate{fee, 1}; - auto bottom_tx = graph->AddTransaction(feerate); + TxGraph::Ref bottom_tx; + graph->AddTransaction(bottom_tx, feerate); // Determine the number of dependencies this transaction will have. int deps = std::min(NUM_DEPS_PER_BOTTOM_TX, top_components.size()); for (int dep = 0; dep < deps; ++dep) { diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/wallet_ismine.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/wallet_ismine.cpp index d26f893b..6bfbb42d 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/wallet_ismine.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/wallet_ismine.cpp @@ -36,7 +36,7 @@ static void WalletIsMine(benchmark::Bench& bench, int num_combo = 0) // Loading the wallet will also create it uint64_t create_flags = WALLET_FLAG_DESCRIPTORS; auto database = CreateMockableWalletDatabase(); - auto wallet = TestLoadWallet(std::move(database), context, create_flags); + auto wallet = TestCreateWallet(std::move(database), context, create_flags); // For a descriptor wallet, fill with num_combo combo descriptors with random keys // This benchmarks a non-HD wallet migrated to descriptors diff --git a/libbitcoinkernel-sys/bitcoin/src/bench/wallet_loading.cpp b/libbitcoinkernel-sys/bitcoin/src/bench/wallet_loading.cpp index f7c78806..20997ff8 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bench/wallet_loading.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bench/wallet_loading.cpp @@ -43,7 +43,7 @@ static void WalletLoadingDescriptors(benchmark::Bench& bench) // Loading the wallet will also create it uint64_t create_flags = WALLET_FLAG_DESCRIPTORS; auto database = CreateMockableWalletDatabase(); - auto wallet = TestLoadWallet(std::move(database), context, create_flags); + auto wallet = TestCreateWallet(std::move(database), context, create_flags); // Generate a bunch of transactions and addresses to put into the wallet for (int i = 0; i < 1000; ++i) { @@ -56,7 +56,7 @@ static void WalletLoadingDescriptors(benchmark::Bench& bench) TestUnloadWallet(std::move(wallet)); bench.epochs(5).run([&] { - wallet = TestLoadWallet(std::move(database), context, create_flags); + wallet = TestLoadWallet(std::move(database), context); // Cleanup database = DuplicateMockDatabase(wallet->GetDatabase()); diff --git a/libbitcoinkernel-sys/bitcoin/src/bitcoin-cli.cpp b/libbitcoinkernel-sys/bitcoin/src/bitcoin-cli.cpp index 724620aa..909ed09f 100644 --- a/libbitcoinkernel-sys/bitcoin/src/bitcoin-cli.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/bitcoin-cli.cpp @@ -367,7 +367,6 @@ struct GetinfoRequestHandler : BaseRequestHandler { if (!batch[ID_WALLETINFO]["result"]["unlocked_until"].isNull()) { result.pushKV("unlocked_until", batch[ID_WALLETINFO]["result"]["unlocked_until"]); } - result.pushKV("paytxfee", batch[ID_WALLETINFO]["result"]["paytxfee"]); } if (!batch[ID_BALANCES]["result"].isNull()) { result.pushKV("balance", batch[ID_BALANCES]["result"]["mine"]["trusted"]); @@ -1152,7 +1151,6 @@ static void ParseGetInfoResult(UniValue& result) if (!result["unlocked_until"].isNull()) { result_string += strprintf("Unlocked until: %s\n", result["unlocked_until"].getValStr()); } - result_string += strprintf("Transaction fee rate (-paytxfee) (%s/kvB): %s\n\n", CURRENCY_UNIT, result["paytxfee"].getValStr()); } if (!result["balance"].isNull()) { result_string += strprintf("%sBalance:%s %s\n\n", CYAN, RESET, result["balance"].getValStr()); diff --git a/libbitcoinkernel-sys/bitcoin/src/chain.h b/libbitcoinkernel-sys/bitcoin/src/chain.h index eb4ff719..c2782920 100644 --- a/libbitcoinkernel-sys/bitcoin/src/chain.h +++ b/libbitcoinkernel-sys/bitcoin/src/chain.h @@ -77,8 +77,7 @@ enum BlockStatus : uint32_t { BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO, BLOCK_FAILED_VALID = 32, //!< stage after last reached validness failed - BLOCK_FAILED_CHILD = 64, //!< descends from failed block - BLOCK_FAILED_MASK = BLOCK_FAILED_VALID | BLOCK_FAILED_CHILD, + BLOCK_FAILED_CHILD = 64, //!< Unused flag that was previously set when descending from failed block BLOCK_OPT_WITNESS = 128, //!< block data in blk*.dat was received with a witness-enforcing client @@ -253,7 +252,7 @@ class CBlockIndex { AssertLockHeld(::cs_main); assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed. - if (nStatus & BLOCK_FAILED_MASK) + if (nStatus & BLOCK_FAILED_VALID) return false; return ((nStatus & BLOCK_VALID_MASK) >= nUpTo); } @@ -264,7 +263,7 @@ class CBlockIndex { AssertLockHeld(::cs_main); assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed. - if (nStatus & BLOCK_FAILED_MASK) return false; + if (nStatus & BLOCK_FAILED_VALID) return false; if ((nStatus & BLOCK_VALID_MASK) < nUpTo) { nStatus = (nStatus & ~BLOCK_VALID_MASK) | nUpTo; @@ -428,6 +427,15 @@ class CChain return int(vChain.size()) - 1; } + /** Check whether this chain's tip exists, has enough work, and is recent. */ + bool IsTipRecent(const arith_uint256& min_chain_work, std::chrono::seconds max_tip_age) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) + { + const auto tip{Tip()}; + return tip && + tip->nChainWork >= min_chain_work && + tip->Time() >= Now() - max_tip_age; + } + /** Set/initialize a chain with a given tip. */ void SetTip(CBlockIndex& block); diff --git a/libbitcoinkernel-sys/bitcoin/src/checkqueue.h b/libbitcoinkernel-sys/bitcoin/src/checkqueue.h index 037023ee..5258d711 100644 --- a/libbitcoinkernel-sys/bitcoin/src/checkqueue.h +++ b/libbitcoinkernel-sys/bitcoin/src/checkqueue.h @@ -5,9 +5,9 @@ #ifndef BITCOIN_CHECKQUEUE_H #define BITCOIN_CHECKQUEUE_H -#include #include #include +#include #include #include diff --git a/libbitcoinkernel-sys/bitcoin/src/cluster_linearize.h b/libbitcoinkernel-sys/bitcoin/src/cluster_linearize.h index 8be16625..1fe737a7 100644 --- a/libbitcoinkernel-sys/bitcoin/src/cluster_linearize.h +++ b/libbitcoinkernel-sys/bitcoin/src/cluster_linearize.h @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -461,6 +462,16 @@ std::vector ChunkLinearization(const DepGraph& depgraph, std:: return ret; } +/** Concept for function objects that return std::strong_ordering when invoked with two Args. */ +template +concept StrongComparator = + std::regular_invocable && + std::is_same_v, std::strong_ordering>; + +/** Simple default transaction ordering function for SpanningForestState::GetLinearization() and + * Linearize(), which just sorts by DepGraphIndex. */ +using IndexTxOrder = std::compare_three_way; + /** Class to represent the internal state of the spanning-forest linearization (SFL) algorithm. * * At all times, each dependency is marked as either "active" or "inactive". The subset of active @@ -620,6 +631,11 @@ std::vector ChunkLinearization(const DepGraph& depgraph, std:: * - Inside the selected chunk (see above), among the dependencies whose top feerate is strictly * higher than its bottom feerate in the selected chunk, if any, a uniformly random dependency * is deactivated. + * - After every split, it is possible that the top and the bottom chunk merge with each other + * again in the merge sequence (through a top->bottom dependency, not through the deactivated + * one, which was bottom->top). Call this a self-merge. If a self-merge does not occur after + * a split, the resulting linearization is strictly improved (the area under the convexified + * feerate diagram increases by at least gain/2), while self-merges do not change it. * * - How to decide the exact output linearization: * - When there are multiple equal-feerate chunks with no dependencies between them, output a @@ -635,58 +651,65 @@ class SpanningForestState InsecureRandomContext m_rng; /** Data type to represent indexing into m_tx_data. */ - using TxIdx = uint32_t; - /** Data type to represent indexing into m_dep_data. */ - using DepIdx = uint32_t; - - /** Structure with information about a single transaction. For transactions that are the - * representative for the chunk they are in, this also stores chunk information. */ + using TxIdx = DepGraphIndex; + /** Data type to represent indexing into m_set_info. Use the smallest type possible to improve + * cache locality. */ + using SetIdx = std::conditional_t<(SetType::Size() <= 0xff), + uint8_t, + std::conditional_t<(SetType::Size() <= 0xffff), + uint16_t, + uint32_t>>; + /** An invalid SetIdx. */ + static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1); + + /** Structure with information about a single transaction. */ struct TxData { - /** The dependencies to children of this transaction. Immutable after construction. */ - std::vector child_deps; + /** The top set for every active child dependency this transaction has, indexed by child + * TxIdx. Only defined for indexes in active_children. */ + std::array dep_top_idx; /** The set of parent transactions of this transaction. Immutable after construction. */ SetType parents; /** The set of child transactions of this transaction. Immutable after construction. */ SetType children; - /** Which transaction holds the chunk_setinfo for the chunk this transaction is in - * (the representative for the chunk). */ - TxIdx chunk_rep; - /** (Only if this transaction is the representative for the chunk it is in) the total - * chunk set and feerate. */ - SetInfo chunk_setinfo; - }; - - /** Structure with information about a single dependency. */ - struct DepData { - /** Whether this dependency is active. */ - bool active; - /** What the parent and child transactions are. Immutable after construction. */ - TxIdx parent, child; - /** (Only if this dependency is active) the would-be top chunk and its feerate that would - * be formed if this dependency were to be deactivated. */ - SetInfo top_setinfo; + /** The set of child transactions reachable through an active dependency. */ + SetType active_children; + /** Which chunk this transaction belongs to. */ + SetIdx chunk_idx; }; /** The set of all TxIdx's of transactions in the cluster indexing into m_tx_data. */ SetType m_transaction_idxs; + /** The set of all chunk SetIdx's. This excludes the SetIdxs that refer to active + * dependencies' tops. */ + SetType m_chunk_idxs; + /** The set of all SetIdx's that appear in m_suboptimal_chunks. Note that they do not need to + * be chunks: some of these sets may have been converted to a dependency's top set since being + * added to m_suboptimal_chunks. */ + SetType m_suboptimal_idxs; /** Information about each transaction (and chunks). Keeps the "holes" from DepGraph during * construction. Indexed by TxIdx. */ std::vector m_tx_data; - /** Information about each dependency. Indexed by DepIdx. */ - std::vector m_dep_data; - /** A FIFO of chunk representatives of chunks that may be improved still. */ - VecDeque m_suboptimal_chunks; - /** A FIFO of chunk representatives with a pivot transaction in them, and a flag to indicate - * their status: + /** Information about each set (chunk, or active dependency top set). Indexed by SetIdx. */ + std::vector> m_set_info; + /** For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions, in the + * upwards (.first) and downwards (.second) direction. */ + std::vector> m_reachable; + /** A FIFO of chunk SetIdxs for chunks that may be improved still. */ + VecDeque m_suboptimal_chunks; + /** A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their + * status: * - bit 1: currently attempting to move the pivot down, rather than up. * - bit 2: this is the second stage, so we have already tried moving the pivot in the other * direction. */ - VecDeque> m_nonminimal_chunks; + VecDeque> m_nonminimal_chunks; /** The number of updated transactions in activations/deactivations. */ uint64_t m_cost{0}; + /** The DepGraph we are trying to linearize. */ + const DepGraph& m_depgraph; + /** Pick a random transaction within a set (which must be non-empty). */ TxIdx PickRandomTx(const SetType& tx_idxs) noexcept { @@ -700,58 +723,39 @@ class SpanningForestState return TxIdx(-1); } - /** Update a chunk: - * - All transactions have their chunk representative set to `chunk_rep`. - * - All dependencies which have `query` in their top_setinfo get `dep_change` added to it - * (if `!Subtract`) or removed from it (if `Subtract`). - */ - template - void UpdateChunk(const SetType& chunk, TxIdx query, TxIdx chunk_rep, const SetInfo& dep_change) noexcept + /** Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and + * downwards direction. Only used by SanityCheck to verify the precomputed reachable sets in + * m_reachable that are maintained by Activate/Deactivate. */ + std::pair GetReachable(const SetType& tx_idxs) const noexcept { - // Iterate over all the chunk's transactions. - for (auto tx_idx : chunk) { - auto& tx_data = m_tx_data[tx_idx]; - // Update the chunk representative. - tx_data.chunk_rep = chunk_rep; - // Iterate over all active dependencies with tx_idx as parent. Combined with the outer - // loop this iterates over all internal active dependencies of the chunk. - auto child_deps = std::span{tx_data.child_deps}; - for (auto dep_idx : child_deps) { - auto& dep_entry = m_dep_data[dep_idx]; - Assume(dep_entry.parent == tx_idx); - // Skip inactive dependencies. - if (!dep_entry.active) continue; - // If this dependency's top_setinfo contains query, update it to add/remove - // dep_change. - if (dep_entry.top_setinfo.transactions[query]) { - if constexpr (Subtract) { - dep_entry.top_setinfo -= dep_change; - } else { - dep_entry.top_setinfo |= dep_change; - } - } - } + SetType parents, children; + for (auto tx_idx : tx_idxs) { + const auto& tx_data = m_tx_data[tx_idx]; + parents |= tx_data.parents; + children |= tx_data.children; } + return {parents - tx_idxs, children - tx_idxs}; } - /** Make a specified inactive dependency active. Returns the merged chunk representative. */ - TxIdx Activate(DepIdx dep_idx) noexcept + /** Make the inactive dependency from child to parent, which must not be in the same chunk + * already, active. Returns the merged chunk idx. */ + SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(!dep_data.active); - auto& child_tx_data = m_tx_data[dep_data.child]; - auto& parent_tx_data = m_tx_data[dep_data.parent]; - - // Gather information about the parent and child chunks. - Assume(parent_tx_data.chunk_rep != child_tx_data.chunk_rep); - auto& par_chunk_data = m_tx_data[parent_tx_data.chunk_rep]; - auto& chl_chunk_data = m_tx_data[child_tx_data.chunk_rep]; - TxIdx top_rep = parent_tx_data.chunk_rep; - auto top_part = par_chunk_data.chunk_setinfo; - auto bottom_part = chl_chunk_data.chunk_setinfo; - // Update the parent chunk to also contain the child. - par_chunk_data.chunk_setinfo |= bottom_part; - m_cost += par_chunk_data.chunk_setinfo.transactions.Count(); + // Gather and check information about the parent and child transactions. + auto& parent_data = m_tx_data[parent_idx]; + auto& child_data = m_tx_data[child_idx]; + Assume(parent_data.children[child_idx]); + Assume(!parent_data.active_children[child_idx]); + // Get the set index of the chunks the parent and child are currently in. The parent chunk + // will become the top set of the newly activated dependency, while the child chunk will be + // grown to become the merged chunk. + auto parent_chunk_idx = parent_data.chunk_idx; + auto child_chunk_idx = child_data.chunk_idx; + Assume(parent_chunk_idx != child_chunk_idx); + Assume(m_chunk_idxs[parent_chunk_idx]); + Assume(m_chunk_idxs[child_chunk_idx]); + auto& top_info = m_set_info[parent_chunk_idx]; + auto& bottom_info = m_set_info[child_chunk_idx]; // Consider the following example: // @@ -768,233 +772,340 @@ class SpanningForestState // dependency being activated (E->C here) in its top set, will have the opposite part added // to it. This is true for B->A and F->E, but not for C->A and F->D. // - // Let UpdateChunk traverse the old parent chunk top_part (ABC in example), and add - // bottom_part (DEF) to every dependency's top_set which has the parent (C) in it. The - // representative of each of these transactions was already top_rep, so that is not being - // changed here. - UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent, - /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part); - // Let UpdateChunk traverse the old child chunk bottom_part (DEF in example), and add - // top_part (ABC) to every dependency's top_set which has the child (E) in it. At the same - // time, change the representative of each of these transactions to be top_rep, which - // becomes the representative for the merged chunk. - UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child, - /*chunk_rep=*/top_rep, /*dep_change=*/top_part); - // Make active. - dep_data.active = true; - dep_data.top_setinfo = top_part; - return top_rep; + // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to + // every dependency's top set which has the parent (C) in it. At the same time, change the + // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk. + for (auto tx_idx : top_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + tx_data.chunk_idx = child_chunk_idx; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info; + } + } + // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to + // every dependency's top set which has the child (E) in it. + for (auto tx_idx : bottom_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info; + } + } + // Merge top_info into bottom_info, which becomes the merged chunk. + bottom_info |= top_info; + m_cost += bottom_info.transactions.Count(); + // Compute merged sets of reachable transactions from the new chunk, based on the input + // chunks' reachable sets. + m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first; + m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second; + m_reachable[child_chunk_idx].first -= bottom_info.transactions; + m_reachable[child_chunk_idx].second -= bottom_info.transactions; + // Make parent chunk the set for the new active dependency. + parent_data.dep_top_idx[child_idx] = parent_chunk_idx; + parent_data.active_children.Set(child_idx); + m_chunk_idxs.Reset(parent_chunk_idx); + // Return the newly merged chunk. + return child_chunk_idx; } - /** Make a specified active dependency inactive. */ - void Deactivate(DepIdx dep_idx) noexcept + /** Make a specified active dependency inactive. Returns the created parent and child chunk + * indexes. */ + std::pair Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(dep_data.active); - auto& parent_tx_data = m_tx_data[dep_data.parent]; - // Make inactive. - dep_data.active = false; - // Update representatives. - auto& chunk_data = m_tx_data[parent_tx_data.chunk_rep]; - m_cost += chunk_data.chunk_setinfo.transactions.Count(); - auto top_part = dep_data.top_setinfo; - auto bottom_part = chunk_data.chunk_setinfo - top_part; - TxIdx bottom_rep = dep_data.child; - auto& bottom_chunk_data = m_tx_data[bottom_rep]; - bottom_chunk_data.chunk_setinfo = bottom_part; - TxIdx top_rep = dep_data.parent; - auto& top_chunk_data = m_tx_data[top_rep]; - top_chunk_data.chunk_setinfo = top_part; - - // See the comment above in Activate(). We perform the opposite operations here, - // removing instead of adding. - // - // Let UpdateChunk traverse the old parent chunk top_part, and remove bottom_part from - // every dependency's top_set which has the parent in it. At the same time, change the - // representative of each of these transactions to be top_rep. - UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent, - /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part); - // Let UpdateChunk traverse the old child chunk bottom_part, and remove top_part from every - // dependency's top_set which has the child in it. At the same time, change the - // representative of each of these transactions to be bottom_rep. - UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child, - /*chunk_rep=*/bottom_rep, /*dep_change=*/top_part); + // Gather and check information about the parent transactions. + auto& parent_data = m_tx_data[parent_idx]; + Assume(parent_data.children[child_idx]); + Assume(parent_data.active_children[child_idx]); + // Get the top set of the active dependency (which will become the parent chunk) and the + // chunk set the transactions are currently in (which will become the bottom chunk). + auto parent_chunk_idx = parent_data.dep_top_idx[child_idx]; + auto child_chunk_idx = parent_data.chunk_idx; + Assume(parent_chunk_idx != child_chunk_idx); + Assume(m_chunk_idxs[child_chunk_idx]); + Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk + auto& top_info = m_set_info[parent_chunk_idx]; + auto& bottom_info = m_set_info[child_chunk_idx]; + + // Remove the active dependency. + parent_data.active_children.Reset(child_idx); + m_chunk_idxs.Set(parent_chunk_idx); + m_cost += bottom_info.transactions.Count(); + // Subtract the top_info from the bottom_info, as it will become the child chunk. + bottom_info -= top_info; + // See the comment above in Activate(). We perform the opposite operations here, removing + // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children. + SetType top_parents, top_children; + for (auto tx_idx : top_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + tx_data.chunk_idx = parent_chunk_idx; + top_parents |= tx_data.parents; + top_children |= tx_data.children; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info; + } + } + SetType bottom_parents, bottom_children; + for (auto tx_idx : bottom_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + bottom_parents |= tx_data.parents; + bottom_children |= tx_data.children; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info; + } + } + // Compute the new sets of reachable transactions for each new chunk, based on the + // top/bottom parents and children computed above. + m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions; + m_reachable[parent_chunk_idx].second = top_children - top_info.transactions; + m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions; + m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions; + // Return the two new set idxs. + return {parent_chunk_idx, child_chunk_idx}; } - /** Activate a dependency from the chunk represented by bottom_idx to the chunk represented by - * top_idx. Return the representative of the merged chunk, or TxIdx(-1) if no merge is - * possible. */ - TxIdx MergeChunks(TxIdx top_rep, TxIdx bottom_rep) noexcept + /** Activate a dependency from the bottom set to the top set, which must exist. Return the + * index of the merged chunk. */ + SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept { - auto& top_chunk = m_tx_data[top_rep]; - Assume(top_chunk.chunk_rep == top_rep); - auto& bottom_chunk = m_tx_data[bottom_rep]; - Assume(bottom_chunk.chunk_rep == bottom_rep); + Assume(m_chunk_idxs[top_idx]); + Assume(m_chunk_idxs[bottom_idx]); + auto& top_chunk_info = m_set_info[top_idx]; + auto& bottom_chunk_info = m_set_info[bottom_idx]; // Count the number of dependencies between bottom_chunk and top_chunk. - TxIdx num_deps{0}; - for (auto tx : top_chunk.chunk_setinfo.transactions) { - auto& tx_data = m_tx_data[tx]; - num_deps += (tx_data.children & bottom_chunk.chunk_setinfo.transactions).Count(); + unsigned num_deps{0}; + for (auto tx_idx : top_chunk_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + num_deps += (tx_data.children & bottom_chunk_info.transactions).Count(); } - if (num_deps == 0) return TxIdx(-1); + Assume(num_deps > 0); // Uniformly randomly pick one of them and activate it. - TxIdx pick = m_rng.randrange(num_deps); - for (auto tx : top_chunk.chunk_setinfo.transactions) { - auto& tx_data = m_tx_data[tx]; - auto intersect = tx_data.children & bottom_chunk.chunk_setinfo.transactions; + unsigned pick = m_rng.randrange(num_deps); + for (auto tx_idx : top_chunk_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + auto intersect = tx_data.children & bottom_chunk_info.transactions; auto count = intersect.Count(); if (pick < count) { - for (auto dep : tx_data.child_deps) { - auto& dep_data = m_dep_data[dep]; - if (bottom_chunk.chunk_setinfo.transactions[dep_data.child]) { - if (pick == 0) return Activate(dep); - --pick; - } + for (auto child_idx : intersect) { + if (pick == 0) return Activate(tx_idx, child_idx); + --pick; } + Assume(false); break; } pick -= count; } Assume(false); - return TxIdx(-1); + return INVALID_SET_IDX; + } + + /** Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency + * from merge_chunk_idx to chunk_idx (if DownWard). Return the index of the merged chunk. */ + template + SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept + { + if constexpr (DownWard) { + return MergeChunks(chunk_idx, merge_chunk_idx); + } else { + return MergeChunks(merge_chunk_idx, chunk_idx); + } } - /** Perform an upward or downward merge step, on the specified chunk representative. Returns - * the representative of the merged chunk, or TxIdx(-1) if no merge took place. */ + /** Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none. */ template - TxIdx MergeStep(TxIdx chunk_rep) noexcept + SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept { - /** Information about the chunk that tx_idx is currently in. */ - auto& chunk_data = m_tx_data[chunk_rep]; - SetType chunk_txn = chunk_data.chunk_setinfo.transactions; - // Iterate over all transactions in the chunk, figuring out which other chunk each - // depends on, but only testing each other chunk once. For those depended-on chunks, + /** Information about the chunk. */ + Assume(m_chunk_idxs[chunk_idx]); + auto& chunk_info = m_set_info[chunk_idx]; + // Iterate over all chunks reachable from this one. For those depended-on chunks, // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one. // If multiple equal-feerate candidate chunks to merge with exist, pick a random one // among them. - /** Which transactions have been reached from this chunk already. Initialize with the - * chunk itself, so internal dependencies within the chunk are ignored. */ - SetType explored = chunk_txn; /** The minimum feerate (if downward) or maximum feerate (if upward) to consider when * looking for candidate chunks to merge with. Initially, this is the original chunk's * feerate, but is updated to be the current best candidate whenever one is found. */ - FeeFrac best_other_chunk_feerate = chunk_data.chunk_setinfo.feerate; - /** The representative for the best candidate chunk to merge with. -1 if none. */ - TxIdx best_other_chunk_rep = TxIdx(-1); + FeeFrac best_other_chunk_feerate = chunk_info.feerate; + /** The chunk index for the best candidate chunk to merge with. INVALID_SET_IDX if none. */ + SetIdx best_other_chunk_idx = INVALID_SET_IDX; /** We generate random tiebreak values to pick between equal-feerate candidate chunks. * This variable stores the tiebreak of the current best candidate. */ uint64_t best_other_chunk_tiebreak{0}; - for (auto tx : chunk_txn) { - auto& tx_data = m_tx_data[tx]; - /** The transactions reached by following dependencies from tx that have not been - * explored before. */ - auto newly_reached = (DownWard ? tx_data.children : tx_data.parents) - explored; - explored |= newly_reached; - while (newly_reached.Any()) { - // Find a chunk inside newly_reached, and remove it from newly_reached. - auto reached_chunk_rep = m_tx_data[newly_reached.First()].chunk_rep; - auto& reached_chunk = m_tx_data[reached_chunk_rep].chunk_setinfo; - newly_reached -= reached_chunk.transactions; - // See if it has an acceptable feerate. - auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk.feerate) - : FeeRateCompare(reached_chunk.feerate, best_other_chunk_feerate); - if (cmp > 0) continue; - uint64_t tiebreak = m_rng.rand64(); - if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) { - best_other_chunk_feerate = reached_chunk.feerate; - best_other_chunk_rep = reached_chunk_rep; - best_other_chunk_tiebreak = tiebreak; - } + + /** Which parent/child transactions we still need to process the chunks for. */ + auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first; + unsigned steps = 0; + while (todo.Any()) { + ++steps; + // Find a chunk for a transaction in todo, and remove all its transactions from todo. + auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx; + auto& reached_chunk_info = m_set_info[reached_chunk_idx]; + todo -= reached_chunk_info.transactions; + // See if it has an acceptable feerate. + auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk_info.feerate) + : FeeRateCompare(reached_chunk_info.feerate, best_other_chunk_feerate); + if (cmp > 0) continue; + uint64_t tiebreak = m_rng.rand64(); + if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) { + best_other_chunk_feerate = reached_chunk_info.feerate; + best_other_chunk_idx = reached_chunk_idx; + best_other_chunk_tiebreak = tiebreak; } } - // Stop if there are no candidate chunks to merge with. - if (best_other_chunk_rep == TxIdx(-1)) return TxIdx(-1); - if constexpr (DownWard) { - chunk_rep = MergeChunks(chunk_rep, best_other_chunk_rep); - } else { - chunk_rep = MergeChunks(best_other_chunk_rep, chunk_rep); - } - Assume(chunk_rep != TxIdx(-1)); - return chunk_rep; + Assume(steps <= m_set_info.size()); + + return best_other_chunk_idx; } + /** Perform an upward or downward merge step, on the specified chunk. Returns the merged chunk, + * or INVALID_SET_IDX if no merge took place. */ + template + SetIdx MergeStep(SetIdx chunk_idx) noexcept + { + auto merge_chunk_idx = PickMergeCandidate(chunk_idx); + if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX; + chunk_idx = MergeChunksDirected(chunk_idx, merge_chunk_idx); + Assume(chunk_idx != INVALID_SET_IDX); + return chunk_idx; + } - /** Perform an upward or downward merge sequence on the specified transaction. */ + /** Perform an upward or downward merge sequence on the specified chunk. */ template - void MergeSequence(TxIdx tx_idx) noexcept + void MergeSequence(SetIdx chunk_idx) noexcept { - auto chunk_rep = m_tx_data[tx_idx].chunk_rep; + Assume(m_chunk_idxs[chunk_idx]); while (true) { - auto merged_rep = MergeStep(chunk_rep); - if (merged_rep == TxIdx(-1)) break; - chunk_rep = merged_rep; + auto merged_chunk_idx = MergeStep(chunk_idx); + if (merged_chunk_idx == INVALID_SET_IDX) break; + chunk_idx = merged_chunk_idx; + } + // Add the chunk to the queue of improvable chunks, if it wasn't already there. + if (!m_suboptimal_idxs[chunk_idx]) { + m_suboptimal_idxs.Set(chunk_idx); + m_suboptimal_chunks.push_back(chunk_idx); } - // Add the chunk to the queue of improvable chunks. - m_suboptimal_chunks.push_back(chunk_rep); } /** Split a chunk, and then merge the resulting two chunks to make the graph topological * again. */ - void Improve(DepIdx dep_idx) noexcept + void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(dep_data.active); // Deactivate the specified dependency, splitting it into two new chunks: a top containing // the parent, and a bottom containing the child. The top should have a higher feerate. - Deactivate(dep_idx); + auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx); // At this point we have exactly two chunks which may violate topology constraints (the - // parent chunk and child chunk that were produced by deactivating dep_idx). We can fix + // parent chunk and child chunk that were produced by deactivation). We can fix // these using just merge sequences, one upwards and one downwards, avoiding the need for a // full MakeTopological. + const auto& parent_reachable = m_reachable[parent_chunk_idx].first; + const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions; + if (parent_reachable.Overlaps(child_chunk_txn)) { + // The parent chunk has a dependency on a transaction in the child chunk. In this case, + // the parent needs to merge back with the child chunk (a self-merge), and no other + // merges are needed. Special-case this, so the overhead of PickMergeCandidate and + // MergeSequence can be avoided. + + // In the self-merge, the roles reverse: the parent chunk (from the split) depends + // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the + // "bottom" for MergeChunks. + auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx); + if (!m_suboptimal_idxs[merged_chunk_idx]) { + m_suboptimal_idxs.Set(merged_chunk_idx); + m_suboptimal_chunks.push_back(merged_chunk_idx); + } + } else { + // Merge the top chunk with lower-feerate chunks it depends on. + MergeSequence(parent_chunk_idx); + // Merge the bottom chunk with higher-feerate chunks that depend on it. + MergeSequence(child_chunk_idx); + } + } - // Merge the top chunk with lower-feerate chunks it depends on (which may be the bottom it - // was just split from, or other pre-existing chunks). - MergeSequence(dep_data.parent); - // Merge the bottom chunk with higher-feerate chunks that depend on it. - MergeSequence(dep_data.child); + /** Determine the next chunk to optimize, or INVALID_SET_IDX if none. */ + SetIdx PickChunkToOptimize() noexcept + { + while (!m_suboptimal_chunks.empty()) { + // Pop an entry from the potentially-suboptimal chunk queue. + SetIdx chunk_idx = m_suboptimal_chunks.front(); + Assume(m_suboptimal_idxs[chunk_idx]); + m_suboptimal_idxs.Reset(chunk_idx); + m_suboptimal_chunks.pop_front(); + if (m_chunk_idxs[chunk_idx]) return chunk_idx; + // If what was popped is not currently a chunk, continue. This may + // happen when a split chunk merges in Improve() with one or more existing chunks that + // are themselves on the suboptimal queue already. + } + return INVALID_SET_IDX; + } + + /** Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none. */ + std::pair PickDependencyToSplit(SetIdx chunk_idx) noexcept + { + Assume(m_chunk_idxs[chunk_idx]); + auto& chunk_info = m_set_info[chunk_idx]; + + // Remember the best dependency {par, chl} seen so far. + std::pair candidate_dep = {TxIdx(-1), TxIdx(-1)}; + uint64_t candidate_tiebreak = 0; + // Iterate over all transactions. + for (auto tx_idx : chunk_info.transactions) { + const auto& tx_data = m_tx_data[tx_idx]; + // Iterate over all active child dependencies of the transaction. + for (auto child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]]; + // Skip if this dependency is ineligible (the top chunk that would be created + // does not have higher feerate than the chunk it is currently part of). + auto cmp = FeeRateCompare(dep_top_info.feerate, chunk_info.feerate); + if (cmp <= 0) continue; + // Generate a random tiebreak for this dependency, and reject it if its tiebreak + // is worse than the best so far. This means that among all eligible + // dependencies, a uniformly random one will be chosen. + uint64_t tiebreak = m_rng.rand64(); + if (tiebreak < candidate_tiebreak) continue; + // Remember this as our (new) candidate dependency. + candidate_dep = {tx_idx, child_idx}; + candidate_tiebreak = tiebreak; + } + } + return candidate_dep; } public: /** Construct a spanning forest for the given DepGraph, with every transaction in its own chunk * (not topological). */ - explicit SpanningForestState(const DepGraph& depgraph, uint64_t rng_seed) noexcept : m_rng(rng_seed) + explicit SpanningForestState(const DepGraph& depgraph LIFETIMEBOUND, uint64_t rng_seed) noexcept : + m_rng(rng_seed), m_depgraph(depgraph) { m_transaction_idxs = depgraph.Positions(); auto num_transactions = m_transaction_idxs.Count(); m_tx_data.resize(depgraph.PositionRange()); - // Reserve the maximum number of (reserved) dependencies the cluster can have, so - // m_dep_data won't need any reallocations during construction. For a cluster with N - // transactions, the worst case consists of two sets of transactions, the parents and the - // children, where each child depends on each parent and nothing else. For even N, both - // sets can be sized N/2, which means N^2/4 dependencies. For odd N, one can be (N + 1)/2 - // and the other can be (N - 1)/2, meaning (N^2 - 1)/4 dependencies. Because N^2 is odd in - // this case, N^2/4 (with rounding-down division) is the correct value in both cases. - m_dep_data.reserve((num_transactions * num_transactions) / 4); - for (auto tx : m_transaction_idxs) { + m_set_info.resize(num_transactions); + m_reachable.resize(num_transactions); + size_t num_chunks = 0; + for (auto tx_idx : m_transaction_idxs) { // Fill in transaction data. - auto& tx_data = m_tx_data[tx]; - tx_data.chunk_rep = tx; - tx_data.chunk_setinfo.transactions = SetType::Singleton(tx); - tx_data.chunk_setinfo.feerate = depgraph.FeeRate(tx); - // Add its dependencies. - SetType parents = depgraph.GetReducedParents(tx); - for (auto par : parents) { - auto& par_tx_data = m_tx_data[par]; - auto dep_idx = m_dep_data.size(); - // Construct new dependency. - auto& dep = m_dep_data.emplace_back(); - dep.active = false; - dep.parent = par; - dep.child = tx; - // Add it as parent of the child. - tx_data.parents.Set(par); - // Add it as child of the parent. - par_tx_data.child_deps.push_back(dep_idx); - par_tx_data.children.Set(tx); + auto& tx_data = m_tx_data[tx_idx]; + tx_data.parents = depgraph.GetReducedParents(tx_idx); + for (auto parent_idx : tx_data.parents) { + m_tx_data[parent_idx].children.Set(tx_idx); } + // Create a singleton chunk for it. + tx_data.chunk_idx = num_chunks; + m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx); + } + // Set the reachable transactions for each chunk to the transactions' parents and children. + for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) { + auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()]; + m_reachable[chunk_idx].first = tx_data.parents; + m_reachable[chunk_idx].second = tx_data.children; } + Assume(num_chunks == num_transactions); + // Mark all chunk sets as chunks. + m_chunk_idxs = SetType::Fill(num_chunks); } /** Load an existing linearization. Must be called immediately after constructor. The result is @@ -1003,12 +1114,12 @@ class SpanningForestState void LoadLinearization(std::span old_linearization) noexcept { // Add transactions one by one, in order of existing linearization. - for (DepGraphIndex tx : old_linearization) { - auto chunk_rep = m_tx_data[tx].chunk_rep; + for (DepGraphIndex tx_idx : old_linearization) { + auto chunk_idx = m_tx_data[tx_idx].chunk_idx; // Merge the chunk upwards, as long as merging succeeds. while (true) { - chunk_rep = MergeStep(chunk_rep); - if (chunk_rep == TxIdx(-1)) break; + chunk_idx = MergeStep(chunk_idx); + if (chunk_idx == INVALID_SET_IDX) break; } } } @@ -1016,39 +1127,61 @@ class SpanningForestState /** Make state topological. Can be called after constructing, or after LoadLinearization. */ void MakeTopological() noexcept { - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - m_suboptimal_chunks.emplace_back(tx); - // Randomize the initial order of suboptimal chunks in the queue. - TxIdx j = m_rng.randrange(m_suboptimal_chunks.size()); - if (j != m_suboptimal_chunks.size() - 1) { - std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); - } + Assume(m_suboptimal_chunks.empty()); + /** What direction to initially merge chunks in; one of the two directions is enough. This + * is sufficient because if a non-topological inactive dependency exists between two + * chunks, at least one of the two chunks will eventually be processed in a direction that + * discovers it - either the lower chunk tries upward, or the upper chunk tries downward. + * Chunks that are the result of the merging are always tried in both directions. */ + unsigned init_dir = m_rng.randbool(); + /** Which chunks are the result of merging, and thus need merge attempts in both + * directions. */ + SetType merged_chunks; + // Mark chunks as suboptimal. + m_suboptimal_idxs = m_chunk_idxs; + for (auto chunk_idx : m_chunk_idxs) { + m_suboptimal_chunks.emplace_back(chunk_idx); + // Randomize the initial order of suboptimal chunks in the queue. + SetIdx j = m_rng.randrange(m_suboptimal_chunks.size()); + if (j != m_suboptimal_chunks.size() - 1) { + std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); } } while (!m_suboptimal_chunks.empty()) { // Pop an entry from the potentially-suboptimal chunk queue. - TxIdx chunk = m_suboptimal_chunks.front(); + SetIdx chunk_idx = m_suboptimal_chunks.front(); m_suboptimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk]; - // If what was popped is not currently a chunk representative, continue. This may + Assume(m_suboptimal_idxs[chunk_idx]); + m_suboptimal_idxs.Reset(chunk_idx); + // If what was popped is not currently a chunk, continue. This may // happen when it was merged with something else since being added. - if (chunk_data.chunk_rep != chunk) continue; + if (!m_chunk_idxs[chunk_idx]) continue; + /** What direction(s) to attempt merging in. 1=up, 2=down, 3=both. */ + unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1; int flip = m_rng.randbool(); for (int i = 0; i < 2; ++i) { if (i ^ flip) { + if (!(direction & 1)) continue; // Attempt to merge the chunk upwards. - auto result_up = MergeStep(chunk); - if (result_up != TxIdx(-1)) { - m_suboptimal_chunks.push_back(result_up); + auto result_up = MergeStep(chunk_idx); + if (result_up != INVALID_SET_IDX) { + if (!m_suboptimal_idxs[result_up]) { + m_suboptimal_idxs.Set(result_up); + m_suboptimal_chunks.push_back(result_up); + } + merged_chunks.Set(result_up); break; } } else { + if (!(direction & 2)) continue; // Attempt to merge the chunk downwards. - auto result_down = MergeStep(chunk); - if (result_down != TxIdx(-1)) { - m_suboptimal_chunks.push_back(result_down); + auto result_down = MergeStep(chunk_idx); + if (result_down != INVALID_SET_IDX) { + if (!m_suboptimal_idxs[result_down]) { + m_suboptimal_idxs.Set(result_down); + m_suboptimal_chunks.push_back(result_down); + } + merged_chunks.Set(result_down); break; } } @@ -1059,16 +1192,15 @@ class SpanningForestState /** Initialize the data structure for optimization. It must be topological already. */ void StartOptimizing() noexcept { + Assume(m_suboptimal_chunks.empty()); // Mark chunks suboptimal. - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - m_suboptimal_chunks.push_back(tx); - // Randomize the initial order of suboptimal chunks in the queue. - TxIdx j = m_rng.randrange(m_suboptimal_chunks.size()); - if (j != m_suboptimal_chunks.size() - 1) { - std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); - } + m_suboptimal_idxs = m_chunk_idxs; + for (auto chunk_idx : m_chunk_idxs) { + m_suboptimal_chunks.push_back(chunk_idx); + // Randomize the initial order of suboptimal chunks in the queue. + SetIdx j = m_rng.randrange(m_suboptimal_chunks.size()); + if (j != m_suboptimal_chunks.size() - 1) { + std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); } } } @@ -1076,49 +1208,20 @@ class SpanningForestState /** Try to improve the forest. Returns false if it is optimal, true otherwise. */ bool OptimizeStep() noexcept { - while (!m_suboptimal_chunks.empty()) { - // Pop an entry from the potentially-suboptimal chunk queue. - TxIdx chunk = m_suboptimal_chunks.front(); - m_suboptimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk]; - // If what was popped is not currently a chunk representative, continue. This may - // happen when a split chunk merges in Improve() with one or more existing chunks that - // are themselves on the suboptimal queue already. - if (chunk_data.chunk_rep != chunk) continue; - // Remember the best dependency seen so far. - DepIdx candidate_dep = DepIdx(-1); - uint64_t candidate_tiebreak = 0; - // Iterate over all transactions. - for (auto tx : chunk_data.chunk_setinfo.transactions) { - const auto& tx_data = m_tx_data[tx]; - // Iterate over all active child dependencies of the transaction. - const auto children = std::span{tx_data.child_deps}; - for (DepIdx dep_idx : children) { - const auto& dep_data = m_dep_data[dep_idx]; - if (!dep_data.active) continue; - // Skip if this dependency is ineligible (the top chunk that would be created - // does not have higher feerate than the chunk it is currently part of). - auto cmp = FeeRateCompare(dep_data.top_setinfo.feerate, chunk_data.chunk_setinfo.feerate); - if (cmp <= 0) continue; - // Generate a random tiebreak for this dependency, and reject it if its tiebreak - // is worse than the best so far. This means that among all eligible - // dependencies, a uniformly random one will be chosen. - uint64_t tiebreak = m_rng.rand64(); - if (tiebreak < candidate_tiebreak) continue; - // Remember this as our (new) candidate dependency. - candidate_dep = dep_idx; - candidate_tiebreak = tiebreak; - } - } - // If a candidate with positive gain was found, deactivate it and then make the state - // topological again with a sequence of merges. - if (candidate_dep != DepIdx(-1)) Improve(candidate_dep); - // Stop processing for now, even if nothing was activated, as the loop above may have - // had a nontrivial cost. + auto chunk_idx = PickChunkToOptimize(); + if (chunk_idx == INVALID_SET_IDX) { + // No improvable chunk was found, we are done. + return false; + } + auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx); + if (parent_idx == TxIdx(-1)) { + // Nothing to improve in chunk_idx. Need to continue with other chunks, if any. return !m_suboptimal_chunks.empty(); } - // No improvable chunk was found, we are done. - return false; + // Deactivate the found dependency and then make the state topological again with a + // sequence of merges. + Improve(parent_idx, child_idx); + return true; } /** Initialize data structure for minimizing the chunks. Can only be called if state is known @@ -1129,16 +1232,13 @@ class SpanningForestState m_nonminimal_chunks.reserve(m_transaction_idxs.Count()); // Gather all chunks, and for each, add it with a random pivot in it, and a random initial // direction, to m_nonminimal_chunks. - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - TxIdx pivot_idx = PickRandomTx(tx_data.chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(tx, pivot_idx, m_rng.randbits<1>()); - // Randomize the initial order of nonminimal chunks in the queue. - TxIdx j = m_rng.randrange(m_nonminimal_chunks.size()); - if (j != m_nonminimal_chunks.size() - 1) { - std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]); - } + for (auto chunk_idx : m_chunk_idxs) { + TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>()); + // Randomize the initial order of nonminimal chunks in the queue. + SetIdx j = m_rng.randrange(m_nonminimal_chunks.size()); + if (j != m_nonminimal_chunks.size() - 1) { + std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]); } } } @@ -1149,10 +1249,9 @@ class SpanningForestState // If the queue of potentially-non-minimal chunks is empty, we are done. if (m_nonminimal_chunks.empty()) return false; // Pop an entry from the potentially-non-minimal chunk queue. - auto [chunk_rep, pivot_idx, flags] = m_nonminimal_chunks.front(); + auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front(); m_nonminimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk_rep]; - Assume(chunk_data.chunk_rep == chunk_rep); + auto& chunk_info = m_set_info[chunk_idx]; /** Whether to move the pivot down rather than up. */ bool move_pivot_down = flags & 1; /** Whether this is already the second stage. */ @@ -1160,29 +1259,27 @@ class SpanningForestState // Find a random dependency whose top and bottom set feerates are equal, and which has // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down). - DepIdx candidate_dep = DepIdx(-1); + std::pair candidate_dep; uint64_t candidate_tiebreak{0}; bool have_any = false; // Iterate over all transactions. - for (auto tx_idx : chunk_data.chunk_setinfo.transactions) { + for (auto tx_idx : chunk_info.transactions) { const auto& tx_data = m_tx_data[tx_idx]; // Iterate over all active child dependencies of the transaction. - for (auto dep_idx : tx_data.child_deps) { - auto& dep_data = m_dep_data[dep_idx]; - // Skip inactive child dependencies. - if (!dep_data.active) continue; + for (auto child_idx : tx_data.active_children) { + const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]]; // Skip if this dependency does not have equal top and bottom set feerates. Note // that the top cannot have higher feerate than the bottom, or OptimizeSteps would // have dealt with it. - if (dep_data.top_setinfo.feerate << chunk_data.chunk_setinfo.feerate) continue; + if (dep_top_info.feerate << chunk_info.feerate) continue; have_any = true; // Skip if this dependency does not have pivot in the right place. - if (move_pivot_down == dep_data.top_setinfo.transactions[pivot_idx]) continue; + if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue; // Remember this as our chosen dependency if it has a better tiebreak. uint64_t tiebreak = m_rng.rand64() | 1; if (tiebreak > candidate_tiebreak) { candidate_tiebreak = tiebreak; - candidate_dep = dep_idx; + candidate_dep = {tx_idx, child_idx}; } } } @@ -1193,23 +1290,24 @@ class SpanningForestState if (candidate_tiebreak == 0) { // Switch to other direction, and to second phase. flags ^= 3; - if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_rep, pivot_idx, flags); + if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags); return true; } // Otherwise, deactivate the dependency that was found. - Deactivate(candidate_dep); - auto& dep_data = m_dep_data[candidate_dep]; - auto parent_chunk_rep = m_tx_data[dep_data.parent].chunk_rep; - auto child_chunk_rep = m_tx_data[dep_data.child].chunk_rep; - // Try to activate a dependency between the new bottom and the new top (opposite from the + auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second); + // Determine if there is a dependency from the new bottom to the new top (opposite from the // dependency that was just deactivated). - auto merged_chunk_rep = MergeChunks(child_chunk_rep, parent_chunk_rep); - if (merged_chunk_rep != TxIdx(-1)) { - // A self-merge happened. - // Re-insert the chunk into the queue, in the same direction. Note that the chunk_rep + auto& parent_reachable = m_reachable[parent_chunk_idx].first; + auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions; + if (parent_reachable.Overlaps(child_chunk_txn)) { + // A self-merge is needed. Note that the child_chunk_idx is the top, and + // parent_chunk_idx is the bottom, because we activate a dependency in the reverse + // direction compared to the deactivation above. + auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx); + // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx // will have changed. - m_nonminimal_chunks.emplace_back(merged_chunk_rep, pivot_idx, flags); + m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags); } else { // No self-merge happens, and thus we have found a way to split the chunk. Create two // smaller chunks, and add them to the queue. The one that contains the current pivot @@ -1219,13 +1317,13 @@ class SpanningForestState // possible already. The new chunk without the current pivot gets a new randomly-chosen // one. if (move_pivot_down) { - auto parent_pivot_idx = PickRandomTx(m_tx_data[parent_chunk_rep].chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(parent_chunk_rep, parent_pivot_idx, m_rng.randbits<1>()); - m_nonminimal_chunks.emplace_back(child_chunk_rep, pivot_idx, flags); + auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>()); + m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags); } else { - auto child_pivot_idx = PickRandomTx(m_tx_data[child_chunk_rep].chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(parent_chunk_rep, pivot_idx, flags); - m_nonminimal_chunks.emplace_back(child_chunk_rep, child_pivot_idx, m_rng.randbits<1>()); + auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags); + m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>()); } if (m_rng.randbool()) { std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[m_nonminimal_chunks.size() - 2]); @@ -1235,82 +1333,129 @@ class SpanningForestState } /** Construct a topologically-valid linearization from the current forest state. Must be - * topological. */ - std::vector GetLinearization() noexcept + * topological. fallback_order is a comparator that defines a strong order for DepGraphIndexes + * in this cluster, used to order equal-feerate transactions and chunks. + * + * Specifically, the resulting order consists of: + * - The chunks of the current SFL state, sorted by (in decreasing order of priority): + * - topology (parents before children) + * - highest chunk feerate first + * - smallest chunk size first + * - the chunk with the lowest maximum transaction, by fallback_order, first + * - The transactions within a chunk, sorted by (in decreasing order of priority): + * - topology (parents before children) + * - highest tx feerate first + * - smallest tx size first + * - the lowest transaction, by fallback_order, first + */ + std::vector GetLinearization(const StrongComparator auto& fallback_order) const noexcept { /** The output linearization. */ std::vector ret; - ret.reserve(m_transaction_idxs.Count()); - /** A heap with all chunks (by representative) that can currently be included, sorted by - * chunk feerate and a random tie-breaker. */ - std::vector> ready_chunks; - /** Information about chunks: - * - The first value is only used for chunk representatives, and counts the number of - * unmet dependencies this chunk has on other chunks (not including dependencies within - * the chunk itself). - * - The second value is the number of unmet dependencies overall. - */ - std::vector> chunk_deps(m_tx_data.size(), {0, 0}); - /** The set of all chunk representatives. */ - SetType chunk_reps; - /** A list with all transactions within the current chunk that can be included. */ + ret.reserve(m_set_info.size()); + /** A heap with all chunks (by set index) that can currently be included, sorted by + * chunk feerate (high to low), chunk size (small to large), and by least maximum element + * according to the fallback order (which is the second pair element). */ + std::vector> ready_chunks; + /** For every chunk, indexed by SetIdx, the number of unmet dependencies the chunk has on + * other chunks (not including dependencies within the chunk itself). */ + std::vector chunk_deps(m_set_info.size(), 0); + /** For every transaction, indexed by TxIdx, the number of unmet dependencies the + * transaction has. */ + std::vector tx_deps(m_tx_data.size(), 0); + /** A heap with all transactions within the current chunk that can be included, sorted by + * tx feerate (high to low), tx size (small to large), and fallback order. */ std::vector ready_tx; - // Populate chunk_deps[c] with the number of {out-of-chunk dependencies, dependencies} the - // child has. + // Populate chunk_deps and tx_deps. for (TxIdx chl_idx : m_transaction_idxs) { const auto& chl_data = m_tx_data[chl_idx]; - chunk_deps[chl_idx].second = chl_data.parents.Count(); - auto chl_chunk_rep = chl_data.chunk_rep; - chunk_reps.Set(chl_chunk_rep); - for (auto par_idx : chl_data.parents) { - auto par_chunk_rep = m_tx_data[par_idx].chunk_rep; - chunk_deps[chl_chunk_rep].first += (par_chunk_rep != chl_chunk_rep); - } + tx_deps[chl_idx] = chl_data.parents.Count(); + auto chl_chunk_idx = chl_data.chunk_idx; + auto& chl_chunk_info = m_set_info[chl_chunk_idx]; + chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count(); } + /** Function to compute the highest element of a chunk, by fallback_order. */ + auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept { + auto& chunk = m_set_info[chunk_idx].transactions; + auto it = chunk.begin(); + DepGraphIndex ret = *it; + ++it; + while (it != chunk.end()) { + if (fallback_order(*it, ret) > 0) ret = *it; + ++it; + } + return ret; + }; + /** Comparison function for the transaction heap. Note that it is a max-heap, so + * tx_cmp_fn(a, b) == true means "a appears after b in the linearization". */ + auto tx_cmp_fn = [&](const auto& a, const auto& b) noexcept { + // Bail out for identical transactions. + if (a == b) return false; + // First sort by increasing transaction feerate. + auto& a_feerate = m_depgraph.FeeRate(a); + auto& b_feerate = m_depgraph.FeeRate(b); + auto feerate_cmp = FeeRateCompare(a_feerate, b_feerate); + if (feerate_cmp != 0) return feerate_cmp < 0; + // Then by decreasing transaction size. + if (a_feerate.size != b_feerate.size) { + return a_feerate.size > b_feerate.size; + } + // Tie-break by decreasing fallback_order. + auto fallback_cmp = fallback_order(a, b); + if (fallback_cmp != 0) return fallback_cmp > 0; + // This should not be hit, because fallback_order defines a strong ordering. + Assume(false); + return a < b; + }; // Construct a heap with all chunks that have no out-of-chunk dependencies. - /** Comparison function for the heap. */ - auto chunk_cmp_fn = [&](const std::pair& a, const std::pair& b) noexcept { - auto& chunk_a = m_tx_data[a.first]; - auto& chunk_b = m_tx_data[b.first]; - Assume(chunk_a.chunk_rep == a.first); - Assume(chunk_b.chunk_rep == b.first); - // First sort by chunk feerate. - if (chunk_a.chunk_setinfo.feerate != chunk_b.chunk_setinfo.feerate) { - return chunk_a.chunk_setinfo.feerate < chunk_b.chunk_setinfo.feerate; + /** Comparison function for the chunk heap. Note that it is a max-heap, so + * chunk_cmp_fn(a, b) == true means "a appears after b in the linearization". */ + auto chunk_cmp_fn = [&](const auto& a, const auto& b) noexcept { + // Bail out for identical chunks. + if (a.first == b.first) return false; + // First sort by increasing chunk feerate. + auto& chunk_feerate_a = m_set_info[a.first].feerate; + auto& chunk_feerate_b = m_set_info[b.first].feerate; + auto feerate_cmp = FeeRateCompare(chunk_feerate_a, chunk_feerate_b); + if (feerate_cmp != 0) return feerate_cmp < 0; + // Then by decreasing chunk size. + if (chunk_feerate_a.size != chunk_feerate_b.size) { + return chunk_feerate_a.size > chunk_feerate_b.size; } - // Tie-break randomly. - if (a.second != b.second) return a.second < b.second; - // Lastly, tie-break by chunk representative. - return a.first < b.first; + // Tie-break by decreasing fallback_order. + auto fallback_cmp = fallback_order(a.second, b.second); + if (fallback_cmp != 0) return fallback_cmp > 0; + // This should not be hit, because fallback_order defines a strong ordering. + Assume(false); + return a.second < b.second; }; - for (TxIdx chunk_rep : chunk_reps) { - if (chunk_deps[chunk_rep].first == 0) ready_chunks.emplace_back(chunk_rep, m_rng.rand64()); + // Construct a heap with all chunks that have no out-of-chunk dependencies. + for (SetIdx chunk_idx : m_chunk_idxs) { + if (chunk_deps[chunk_idx] == 0) { + ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx)); + } } std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); - // Pop chunks off the heap, highest-feerate ones first. + // Pop chunks off the heap. while (!ready_chunks.empty()) { - auto [chunk_rep, _rnd] = ready_chunks.front(); + auto [chunk_idx, _rnd] = ready_chunks.front(); std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); ready_chunks.pop_back(); - Assume(m_tx_data[chunk_rep].chunk_rep == chunk_rep); - Assume(chunk_deps[chunk_rep].first == 0); - const auto& chunk_txn = m_tx_data[chunk_rep].chunk_setinfo.transactions; + Assume(chunk_deps[chunk_idx] == 0); + const auto& chunk_txn = m_set_info[chunk_idx].transactions; // Build heap of all includable transactions in chunk. + Assume(ready_tx.empty()); for (TxIdx tx_idx : chunk_txn) { - if (chunk_deps[tx_idx].second == 0) { - ready_tx.push_back(tx_idx); - } + if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx); } Assume(!ready_tx.empty()); - // Pick transactions from the ready queue, append them to linearization, and decrement + std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn); + // Pick transactions from the ready heap, append them to linearization, and decrement // dependency counts. while (!ready_tx.empty()) { - // Move a random queue element to the back. - auto pos = m_rng.randrange(ready_tx.size()); - if (pos != ready_tx.size() - 1) std::swap(ready_tx.back(), ready_tx[pos]); - // Pop from the back. - auto tx_idx = ready_tx.back(); - Assume(chunk_txn[tx_idx]); + // Pop an element from the tx_ready heap. + auto tx_idx = ready_tx.front(); + std::pop_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn); ready_tx.pop_back(); // Append to linearization. ret.push_back(tx_idx); @@ -1319,24 +1464,25 @@ class SpanningForestState for (TxIdx chl_idx : tx_data.children) { auto& chl_data = m_tx_data[chl_idx]; // Decrement tx dependency count. - Assume(chunk_deps[chl_idx].second > 0); - if (--chunk_deps[chl_idx].second == 0 && chunk_txn[chl_idx]) { - // Child tx has no dependencies left, and is in this chunk. Add it to the tx queue. + Assume(tx_deps[chl_idx] > 0); + if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) { + // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap. ready_tx.push_back(chl_idx); + std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn); } // Decrement chunk dependency count if this is out-of-chunk dependency. - if (chl_data.chunk_rep != chunk_rep) { - Assume(chunk_deps[chl_data.chunk_rep].first > 0); - if (--chunk_deps[chl_data.chunk_rep].first == 0) { + if (chl_data.chunk_idx != chunk_idx) { + Assume(chunk_deps[chl_data.chunk_idx] > 0); + if (--chunk_deps[chl_data.chunk_idx] == 0) { // Child chunk has no dependencies left. Add it to the chunk heap. - ready_chunks.emplace_back(chl_data.chunk_rep, m_rng.rand64()); + ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx)); std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); } } } } } - Assume(ret.size() == m_transaction_idxs.Count()); + Assume(ret.size() == m_set_info.size()); return ret; } @@ -1356,10 +1502,8 @@ class SpanningForestState std::vector GetDiagram() const noexcept { std::vector ret; - for (auto tx : m_transaction_idxs) { - if (m_tx_data[tx].chunk_rep == tx) { - ret.push_back(m_tx_data[tx].chunk_setinfo.feerate); - } + for (auto chunk_idx : m_chunk_idxs) { + ret.push_back(m_set_info[chunk_idx].feerate); } std::sort(ret.begin(), ret.end(), std::greater{}); return ret; @@ -1369,148 +1513,152 @@ class SpanningForestState uint64_t GetCost() const noexcept { return m_cost; } /** Verify internal consistency of the data structure. */ - void SanityCheck(const DepGraph& depgraph) const + void SanityCheck() const { // // Verify dependency parent/child information, and build list of (active) dependencies. // std::vector> expected_dependencies; - std::vector> all_dependencies; - std::vector> active_dependencies; - for (auto parent_idx : depgraph.Positions()) { - for (auto child_idx : depgraph.GetReducedChildren(parent_idx)) { + std::vector> all_dependencies; + std::vector> active_dependencies; + for (auto parent_idx : m_depgraph.Positions()) { + for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) { expected_dependencies.emplace_back(parent_idx, child_idx); } } - for (DepIdx dep_idx = 0; dep_idx < m_dep_data.size(); ++dep_idx) { - const auto& dep_data = m_dep_data[dep_idx]; - all_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx); - // Also add to active_dependencies if it is active. - if (m_dep_data[dep_idx].active) { - active_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx); + for (auto tx_idx : m_transaction_idxs) { + for (auto child_idx : m_tx_data[tx_idx].children) { + all_dependencies.emplace_back(tx_idx, child_idx); + if (m_tx_data[tx_idx].active_children[child_idx]) { + active_dependencies.emplace_back(tx_idx, child_idx); + } } } std::sort(expected_dependencies.begin(), expected_dependencies.end()); std::sort(all_dependencies.begin(), all_dependencies.end()); - assert(expected_dependencies.size() == all_dependencies.size()); - for (size_t i = 0; i < expected_dependencies.size(); ++i) { - assert(expected_dependencies[i] == - std::make_pair(std::get<0>(all_dependencies[i]), - std::get<1>(all_dependencies[i]))); - } + assert(expected_dependencies == all_dependencies); // // Verify the chunks against the list of active dependencies // - for (auto tx_idx: depgraph.Positions()) { - // Only process chunks for now. - if (m_tx_data[tx_idx].chunk_rep == tx_idx) { - const auto& chunk_data = m_tx_data[tx_idx]; - // Verify that transactions in the chunk point back to it. This guarantees - // that chunks are non-overlapping. - for (auto chunk_tx : chunk_data.chunk_setinfo.transactions) { - assert(m_tx_data[chunk_tx].chunk_rep == tx_idx); - } - // Verify the chunk's transaction set: it must contain the representative, and for - // every active dependency, if it contains the parent or child, it must contain - // both. It must have exactly N-1 active dependencies in it, guaranteeing it is - // acyclic. - SetType expected_chunk = SetType::Singleton(tx_idx); - while (true) { - auto old = expected_chunk; - size_t active_dep_count{0}; - for (const auto& [par, chl, _dep] : active_dependencies) { - if (expected_chunk[par] || expected_chunk[chl]) { - expected_chunk.Set(par); - expected_chunk.Set(chl); - ++active_dep_count; - } - } - if (old == expected_chunk) { - assert(expected_chunk.Count() == active_dep_count + 1); - break; + SetType chunk_cover; + for (auto chunk_idx : m_chunk_idxs) { + const auto& chunk_info = m_set_info[chunk_idx]; + // Verify that transactions in the chunk point back to it. This guarantees + // that chunks are non-overlapping. + for (auto tx_idx : chunk_info.transactions) { + assert(m_tx_data[tx_idx].chunk_idx == chunk_idx); + } + assert(!chunk_cover.Overlaps(chunk_info.transactions)); + chunk_cover |= chunk_info.transactions; + // Verify the chunk's transaction set: start from an arbitrary chunk transaction, + // and for every active dependency, if it contains the parent or child, add the + // other. It must have exactly N-1 active dependencies in it, guaranteeing it is + // acyclic. + assert(chunk_info.transactions.Any()); + SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First()); + while (true) { + auto old = expected_chunk; + size_t active_dep_count{0}; + for (const auto& [par, chl] : active_dependencies) { + if (expected_chunk[par] || expected_chunk[chl]) { + expected_chunk.Set(par); + expected_chunk.Set(chl); + ++active_dep_count; } } - assert(chunk_data.chunk_setinfo.transactions == expected_chunk); - // Verify the chunk's feerate. - assert(chunk_data.chunk_setinfo.feerate == - depgraph.FeeRate(chunk_data.chunk_setinfo.transactions)); + if (old == expected_chunk) { + assert(expected_chunk.Count() == active_dep_count + 1); + break; + } } + assert(chunk_info.transactions == expected_chunk); + // Verify the chunk's feerate. + assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions)); + // Verify the chunk's reachable transactions. + assert(m_reachable[chunk_idx] == GetReachable(expected_chunk)); + // Verify that the chunk's reachable transactions don't include its own transactions. + assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions)); + assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions)); } + // Verify that together, the chunks cover all transactions. + assert(chunk_cover == m_depgraph.Positions()); // - // Verify other transaction data. + // Verify transaction data. // - assert(m_transaction_idxs == depgraph.Positions()); + assert(m_transaction_idxs == m_depgraph.Positions()); for (auto tx_idx : m_transaction_idxs) { const auto& tx_data = m_tx_data[tx_idx]; - // Verify it has a valid chunk representative, and that chunk includes this - // transaction. - assert(m_tx_data[tx_data.chunk_rep].chunk_rep == tx_data.chunk_rep); - assert(m_tx_data[tx_data.chunk_rep].chunk_setinfo.transactions[tx_idx]); + // Verify it has a valid chunk index, and that chunk includes this transaction. + assert(m_chunk_idxs[tx_data.chunk_idx]); + assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]); // Verify parents/children. - assert(tx_data.parents == depgraph.GetReducedParents(tx_idx)); - assert(tx_data.children == depgraph.GetReducedChildren(tx_idx)); - // Verify list of child dependencies. - std::vector expected_child_deps; - for (const auto& [par_idx, chl_idx, dep_idx] : all_dependencies) { - if (tx_idx == par_idx) { - assert(tx_data.children[chl_idx]); - expected_child_deps.push_back(dep_idx); - } + assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx)); + assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx)); + // Verify active_children is a subset of children. + assert(tx_data.active_children.IsSubsetOf(tx_data.children)); + // Verify each active child's dep_top_idx points to a valid non-chunk set. + for (auto child_idx : tx_data.active_children) { + assert(tx_data.dep_top_idx[child_idx] < m_set_info.size()); + assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]); } - std::sort(expected_child_deps.begin(), expected_child_deps.end()); - auto child_deps_copy = tx_data.child_deps; - std::sort(child_deps_copy.begin(), child_deps_copy.end()); - assert(expected_child_deps == child_deps_copy); } // - // Verify active dependencies' top_setinfo. + // Verify active dependencies' top sets. // - for (const auto& [par_idx, chl_idx, dep_idx] : active_dependencies) { - const auto& dep_data = m_dep_data[dep_idx]; - // Verify the top_info's transactions: it must contain the parent, and for every - // active dependency, except dep_idx itself, if it contains the parent or child, it - // must contain both. + for (const auto& [par_idx, chl_idx] : active_dependencies) { + // Verify the top set's transactions: it must contain the parent, and for every + // active dependency, except the chl_idx->par_idx dependency itself, if it contains the + // parent or child, it must contain both. It must have exactly N-1 active dependencies + // in it, guaranteeing it is acyclic. SetType expected_top = SetType::Singleton(par_idx); while (true) { auto old = expected_top; - for (const auto& [par2_idx, chl2_idx, dep2_idx] : active_dependencies) { - if (dep2_idx != dep_idx && (expected_top[par2_idx] || expected_top[chl2_idx])) { + size_t active_dep_count{0}; + for (const auto& [par2_idx, chl2_idx] : active_dependencies) { + if (par_idx == par2_idx && chl_idx == chl2_idx) continue; + if (expected_top[par2_idx] || expected_top[chl2_idx]) { expected_top.Set(par2_idx); expected_top.Set(chl2_idx); + ++active_dep_count; } } - if (old == expected_top) break; + if (old == expected_top) { + assert(expected_top.Count() == active_dep_count + 1); + break; + } } assert(!expected_top[chl_idx]); - assert(dep_data.top_setinfo.transactions == expected_top); - // Verify the top_info's feerate. - assert(dep_data.top_setinfo.feerate == - depgraph.FeeRate(dep_data.top_setinfo.transactions)); + auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]]; + assert(dep_top_info.transactions == expected_top); + // Verify the top set's feerate. + assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions)); } // // Verify m_suboptimal_chunks. // + SetType suboptimal_idxs; for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) { - auto tx_idx = m_suboptimal_chunks[i]; - assert(m_transaction_idxs[tx_idx]); + auto chunk_idx = m_suboptimal_chunks[i]; + assert(!suboptimal_idxs[chunk_idx]); + suboptimal_idxs.Set(chunk_idx); } + assert(m_suboptimal_idxs == suboptimal_idxs); // // Verify m_nonminimal_chunks. // - SetType nonminimal_reps; + SetType nonminimal_idxs; for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) { - auto [chunk_rep, pivot, flags] = m_nonminimal_chunks[i]; - assert(m_tx_data[chunk_rep].chunk_rep == chunk_rep); - assert(m_tx_data[pivot].chunk_rep == chunk_rep); - assert(!nonminimal_reps[chunk_rep]); - nonminimal_reps.Set(chunk_rep); + auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i]; + assert(m_tx_data[pivot].chunk_idx == chunk_idx); + assert(!nonminimal_idxs[chunk_idx]); + nonminimal_idxs.Set(chunk_idx); } - assert(nonminimal_reps.IsSubsetOf(m_transaction_idxs)); + assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs)); } }; @@ -1521,6 +1669,9 @@ class SpanningForestState * @param[in] rng_seed A random number seed to control search order. This prevents peers * from predicting exactly which clusters would be hard for us to * linearize. + * @param[in] fallback_order A comparator to order transactions, used to sort equal-feerate + * chunks and transactions. See SpanningForestState::GetLinearization + * for details. * @param[in] old_linearization An existing linearization for the cluster, or empty. * @param[in] is_topological (Only relevant if old_linearization is not empty) Whether * old_linearization is topologically valid. @@ -1532,7 +1683,13 @@ class SpanningForestState * - How many optimization steps were actually performed. */ template -std::tuple, bool, uint64_t> Linearize(const DepGraph& depgraph, uint64_t max_iterations, uint64_t rng_seed, std::span old_linearization = {}, bool is_topological = true) noexcept +std::tuple, bool, uint64_t> Linearize( + const DepGraph& depgraph, + uint64_t max_iterations, + uint64_t rng_seed, + const StrongComparator auto& fallback_order, + std::span old_linearization = {}, + bool is_topological = true) noexcept { /** Initialize a spanning forest data structure for this cluster. */ SpanningForestState forest(depgraph, rng_seed); @@ -1562,7 +1719,7 @@ std::tuple, bool, uint64_t> Linearize(const DepGraph< } } while (forest.GetCost() < max_iterations); } - return {forest.GetLinearization(), optimal, forest.GetCost()}; + return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()}; } /** Improve a given linearization. diff --git a/libbitcoinkernel-sys/bitcoin/src/coins.cpp b/libbitcoinkernel-sys/bitcoin/src/coins.cpp index 7f2ffc38..a6552283 100644 --- a/libbitcoinkernel-sys/bitcoin/src/coins.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/coins.cpp @@ -5,8 +5,9 @@ #include #include -#include #include +#include +#include #include TRACEPOINT_SEMAPHORE(utxocache, add); @@ -14,6 +15,7 @@ TRACEPOINT_SEMAPHORE(utxocache, spent); TRACEPOINT_SEMAPHORE(utxocache, uncache); std::optional CCoinsView::GetCoin(const COutPoint& outpoint) const { return std::nullopt; } +std::optional CCoinsView::PeekCoin(const COutPoint& outpoint) const { return GetCoin(outpoint); } uint256 CCoinsView::GetBestBlock() const { return uint256(); } std::vector CCoinsView::GetHeadBlocks() const { return std::vector(); } void CCoinsView::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& hashBlock) @@ -30,6 +32,7 @@ bool CCoinsView::HaveCoin(const COutPoint &outpoint) const CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { } std::optional CCoinsViewBacked::GetCoin(const COutPoint& outpoint) const { return base->GetCoin(outpoint); } +std::optional CCoinsViewBacked::PeekCoin(const COutPoint& outpoint) const { return base->PeekCoin(outpoint); } bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); } uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } std::vector CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); } @@ -38,6 +41,14 @@ void CCoinsViewBacked::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& h std::unique_ptr CCoinsViewBacked::Cursor() const { return base->Cursor(); } size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } +std::optional CCoinsViewCache::PeekCoin(const COutPoint& outpoint) const +{ + if (auto it{cacheCoins.find(outpoint)}; it != cacheCoins.end()) { + return it->second.coin.IsSpent() ? std::nullopt : std::optional{it->second.coin}; + } + return base->PeekCoin(outpoint); +} + CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) : CCoinsViewBacked(baseIn), m_deterministic(deterministic), cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic), CCoinsMap::key_equal{}, &m_cache_coins_memory_resource) @@ -49,16 +60,18 @@ size_t CCoinsViewCache::DynamicMemoryUsage() const { return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage; } +std::optional CCoinsViewCache::FetchCoinFromBase(const COutPoint& outpoint) const +{ + return base->GetCoin(outpoint); +} + CCoinsMap::iterator CCoinsViewCache::FetchCoin(const COutPoint &outpoint) const { const auto [ret, inserted] = cacheCoins.try_emplace(outpoint); if (inserted) { - if (auto coin{base->GetCoin(outpoint)}) { + if (auto coin{FetchCoinFromBase(outpoint)}) { ret->second.coin = std::move(*coin); cachedCoinsUsage += ret->second.coin.DynamicMemoryUsage(); - if (ret->second.coin.IsSpent()) { // TODO GetCoin cannot return spent coins - // The parent only has an empty entry for this outpoint; we can consider our version as fresh. - CCoinsCacheEntry::SetFresh(*ret, m_sentinel); - } + Assert(!ret->second.coin.IsSpent()); } else { cacheCoins.erase(ret); return cacheCoins.end(); @@ -100,10 +113,12 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi fresh = !it->second.IsDirty(); } if (!inserted) { + m_dirty_count -= it->second.IsDirty(); cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); } it->second.coin = std::move(coin); CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; if (fresh) CCoinsCacheEntry::SetFresh(*it, m_sentinel); cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); TRACEPOINT(utxocache, add, @@ -119,6 +134,7 @@ void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coi auto [it, inserted] = cacheCoins.try_emplace(std::move(outpoint), std::move(coin)); if (inserted) { CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; cachedCoinsUsage += mem_usage; } } @@ -137,6 +153,7 @@ void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) { CCoinsMap::iterator it = FetchCoin(outpoint); if (it == cacheCoins.end()) return false; + m_dirty_count -= it->second.IsDirty(); cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); TRACEPOINT(utxocache, spent, outpoint.hash.data(), @@ -151,6 +168,7 @@ bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) { cacheCoins.erase(it); } else { CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; it->second.coin.Clear(); } return true; @@ -209,8 +227,9 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha } else { entry.coin = it->second.coin; } - cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + ++m_dirty_count; + cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); // We can mark it FRESH in the parent if it was FRESH in the child // Otherwise it might have just been flushed from the parent's cache // and already exist in the grandparent @@ -229,6 +248,7 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha if (itUs->second.IsFresh() && it->second.coin.IsSpent()) { // The grandparent cache does not have an entry, and the coin // has been spent. We can just delete it from the parent cache. + m_dirty_count -= itUs->second.IsDirty(); cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); cacheCoins.erase(itUs); } else { @@ -242,7 +262,10 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha itUs->second.coin = it->second.coin; } cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); - CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + if (!itUs->second.IsDirty()) { + CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + ++m_dirty_count; + } // NOTE: It isn't safe to mark the coin as FRESH in the parent // cache. If it already existed and was spent in the parent // cache then marking it FRESH would prevent that spentness @@ -250,15 +273,16 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha } } } - hashBlock = hashBlockIn; + SetBestBlock(hashBlockIn); } -void CCoinsViewCache::Flush(bool will_reuse_cache) +void CCoinsViewCache::Flush(bool reallocate_cache) { - auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/true)}; + auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/true)}; base->BatchWrite(cursor, hashBlock); + Assume(m_dirty_count == 0); cacheCoins.clear(); - if (will_reuse_cache) { + if (reallocate_cache) { ReallocateCache(); } cachedCoinsUsage = 0; @@ -266,18 +290,27 @@ void CCoinsViewCache::Flush(bool will_reuse_cache) void CCoinsViewCache::Sync() { - auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/false)}; + auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/false)}; base->BatchWrite(cursor, hashBlock); + Assume(m_dirty_count == 0); if (m_sentinel.second.Next() != &m_sentinel) { /* BatchWrite must clear flags of all entries */ throw std::logic_error("Not all unspent flagged entries were cleared"); } } +void CCoinsViewCache::Reset() noexcept +{ + cacheCoins.clear(); + cachedCoinsUsage = 0; + m_dirty_count = 0; + SetBestBlock(uint256::ZERO); +} + void CCoinsViewCache::Uncache(const COutPoint& hash) { CCoinsMap::iterator it = cacheCoins.find(hash); - if (it != cacheCoins.end() && !it->second.IsDirty() && !it->second.IsFresh()) { + if (it != cacheCoins.end() && !it->second.IsDirty()) { cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); TRACEPOINT(utxocache, uncache, hash.hash.data(), @@ -318,20 +351,19 @@ void CCoinsViewCache::ReallocateCache() void CCoinsViewCache::SanityCheck() const { size_t recomputed_usage = 0; - size_t count_flagged = 0; + size_t count_dirty = 0; for (const auto& [_, entry] : cacheCoins) { - unsigned attr = 0; - if (entry.IsDirty()) attr |= 1; - if (entry.IsFresh()) attr |= 2; - if (entry.coin.IsSpent()) attr |= 4; - // Only 5 combinations are possible. - assert(attr != 2 && attr != 4 && attr != 7); + if (entry.coin.IsSpent()) { + assert(entry.IsDirty() && !entry.IsFresh()); // A spent coin must be dirty and cannot be fresh + } else { + assert(entry.IsDirty() || !entry.IsFresh()); // An unspent coin must not be fresh if not dirty + } // Recompute cachedCoinsUsage. recomputed_usage += entry.coin.DynamicMemoryUsage(); // Count the number of entries we expect in the linked list. - if (entry.IsDirty() || entry.IsFresh()) ++count_flagged; + if (entry.IsDirty()) ++count_dirty; } // Iterate over the linked list of flagged entries. size_t count_linked = 0; @@ -340,11 +372,11 @@ void CCoinsViewCache::SanityCheck() const assert(it->second.Next()->second.Prev() == it); assert(it->second.Prev()->second.Next() == it); // Verify they are actually flagged. - assert(it->second.IsDirty() || it->second.IsFresh()); + assert(it->second.IsDirty()); // Count the number of entries actually in the list. ++count_linked; } - assert(count_linked == count_flagged); + assert(count_dirty == count_linked && count_dirty == m_dirty_count); assert(recomputed_usage == cachedCoinsUsage); } @@ -389,3 +421,8 @@ bool CCoinsViewErrorCatcher::HaveCoin(const COutPoint& outpoint) const { return ExecuteBackedWrapper([&]() { return CCoinsViewBacked::HaveCoin(outpoint); }, m_err_callbacks); } + +std::optional CCoinsViewErrorCatcher::PeekCoin(const COutPoint& outpoint) const +{ + return ExecuteBackedWrapper>([&]() { return CCoinsViewBacked::PeekCoin(outpoint); }, m_err_callbacks); +} diff --git a/libbitcoinkernel-sys/bitcoin/src/coins.h b/libbitcoinkernel-sys/bitcoin/src/coins.h index 6da53829..ba23e3d3 100644 --- a/libbitcoinkernel-sys/bitcoin/src/coins.h +++ b/libbitcoinkernel-sys/bitcoin/src/coins.h @@ -6,6 +6,7 @@ #ifndef BITCOIN_COINS_H #define BITCOIN_COINS_H +#include #include #include #include @@ -102,7 +103,6 @@ using CoinsCachePair = std::pair; * - unspent, FRESH, DIRTY (e.g. a new coin created in the cache) * - unspent, not FRESH, DIRTY (e.g. a coin changed in the cache during a reorg) * - unspent, not FRESH, not DIRTY (e.g. an unspent coin fetched from the parent cache) - * - spent, FRESH, not DIRTY (e.g. a spent coin fetched from the parent cache) * - spent, not FRESH, DIRTY (e.g. a coin is spent and spentness needs to be flushed to the parent) */ struct CCoinsCacheEntry @@ -117,12 +117,6 @@ struct CCoinsCacheEntry * the parent cache for batch writing. This is a performance optimization * compared to giving all entries in the cache to the parent and having the * parent scan for only modified entries. - * - * FRESH-but-not-DIRTY coins can not occur in practice, since that would - * mean a spent coin exists in the parent CCoinsView and not in the child - * CCoinsViewCache. Nevertheless, if a spent coin is retrieved from the - * parent cache, the FRESH-but-not-DIRTY coin will be tracked by the linked - * list and deleted when Sync or Flush is called on the CCoinsViewCache. */ CoinsCachePair* m_prev{nullptr}; CoinsCachePair* m_next{nullptr}; @@ -271,10 +265,11 @@ struct CoinsViewCacheCursor //! This is an optimization compared to erasing all entries as the cursor iterates them when will_erase is set. //! Calling CCoinsMap::clear() afterwards is faster because a CoinsCachePair cannot be coerced back into a //! CCoinsMap::iterator to be erased, and must therefore be looked up again by key in the CCoinsMap before being erased. - CoinsViewCacheCursor(CoinsCachePair& sentinel LIFETIMEBOUND, + CoinsViewCacheCursor(size_t& dirty_count LIFETIMEBOUND, + CoinsCachePair& sentinel LIFETIMEBOUND, CCoinsMap& map LIFETIMEBOUND, bool will_erase) noexcept - : m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {} + : m_dirty_count(dirty_count), m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {} inline CoinsCachePair* Begin() const noexcept { return m_sentinel.second.Next(); } inline CoinsCachePair* End() const noexcept { return &m_sentinel; } @@ -283,6 +278,7 @@ struct CoinsViewCacheCursor inline CoinsCachePair* NextAndMaybeErase(CoinsCachePair& current) noexcept { const auto next_entry{current.second.Next()}; + m_dirty_count -= current.second.IsDirty(); // If we are not going to erase the cache, we must still erase spent entries. // Otherwise, clear the state of the entry. if (!m_will_erase) { @@ -297,7 +293,10 @@ struct CoinsViewCacheCursor } inline bool WillErase(CoinsCachePair& current) const noexcept { return m_will_erase || current.second.coin.IsSpent(); } + size_t GetDirtyCount() const noexcept { return m_dirty_count; } + size_t GetTotalCount() const noexcept { return m_map.size(); } private: + size_t& m_dirty_count; CoinsCachePair& m_sentinel; CCoinsMap& m_map; bool m_will_erase; @@ -308,9 +307,15 @@ class CCoinsView { public: //! Retrieve the Coin (unspent transaction output) for a given outpoint. + //! May populate the cache. Use PeekCoin() to perform a non-caching lookup. virtual std::optional GetCoin(const COutPoint& outpoint) const; + //! Retrieve the Coin (unspent transaction output) for a given outpoint, without caching results. + //! Does not populate the cache. Use GetCoin() to cache the result. + virtual std::optional PeekCoin(const COutPoint& outpoint) const; + //! Just check whether a given outpoint is unspent. + //! May populate the cache. Use PeekCoin() to perform a non-caching lookup. virtual bool HaveCoin(const COutPoint &outpoint) const; //! Retrieve the block hash whose state this CCoinsView currently represents @@ -346,6 +351,7 @@ class CCoinsViewBacked : public CCoinsView public: CCoinsViewBacked(CCoinsView *viewIn); std::optional GetCoin(const COutPoint& outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; @@ -375,6 +381,17 @@ class CCoinsViewCache : public CCoinsViewBacked /* Cached dynamic memory usage for the inner Coin objects. */ mutable size_t cachedCoinsUsage{0}; + /* Running count of dirty Coin cache entries. */ + mutable size_t m_dirty_count{0}; + + /** + * Discard all modifications made to this cache without flushing to the base view. + * This can be used to efficiently reuse a cache instance across multiple operations. + */ + void Reset() noexcept; + + /* Fetch the coin from base. Used for cache misses in FetchCoin. */ + virtual std::optional FetchCoinFromBase(const COutPoint& outpoint) const; public: CCoinsViewCache(CCoinsView *baseIn, bool deterministic = false); @@ -386,6 +403,7 @@ class CCoinsViewCache : public CCoinsViewBacked // Standard CCoinsView methods std::optional GetCoin(const COutPoint& outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; void SetBestBlock(const uint256 &hashBlock); @@ -439,10 +457,10 @@ class CCoinsViewCache : public CCoinsViewBacked * Push the modifications applied to this cache to its base and wipe local state. * Failure to call this method or Sync() before destruction will cause the changes * to be forgotten. - * If will_reuse_cache is false, the cache will retain the same memory footprint + * If reallocate_cache is false, the cache will retain the same memory footprint * after flushing and should be destroyed to deallocate. */ - void Flush(bool will_reuse_cache = true); + void Flush(bool reallocate_cache = true); /** * Push the modifications applied to this cache to its base while retaining @@ -458,9 +476,12 @@ class CCoinsViewCache : public CCoinsViewBacked */ void Uncache(const COutPoint &outpoint); - //! Calculate the size of the cache (in number of transaction outputs) + //! Size of the cache (in number of transaction outputs) unsigned int GetCacheSize() const; + //! Number of dirty cache entries (transaction outputs) + size_t GetDirtyCount() const noexcept { return m_dirty_count; } + //! Calculate the size of the cache (in bytes) size_t DynamicMemoryUsage() const; @@ -477,6 +498,25 @@ class CCoinsViewCache : public CCoinsViewBacked //! Run an internal sanity check on the cache data structure. */ void SanityCheck() const; + class ResetGuard + { + private: + friend CCoinsViewCache; + CCoinsViewCache& m_cache; + explicit ResetGuard(CCoinsViewCache& cache LIFETIMEBOUND) noexcept : m_cache{cache} {} + + public: + ResetGuard(const ResetGuard&) = delete; + ResetGuard& operator=(const ResetGuard&) = delete; + ResetGuard(ResetGuard&&) = delete; + ResetGuard& operator=(ResetGuard&&) = delete; + + ~ResetGuard() { m_cache.Reset(); } + }; + + //! Create a scoped guard that will call `Reset()` on this cache when it goes out of scope. + [[nodiscard]] ResetGuard CreateResetGuard() noexcept { return ResetGuard{*this}; } + private: /** * @note this is marked const, but may actually append to `cacheCoins`, increasing @@ -485,6 +525,27 @@ class CCoinsViewCache : public CCoinsViewBacked CCoinsMap::iterator FetchCoin(const COutPoint &outpoint) const; }; +/** + * CCoinsViewCache overlay that avoids populating/mutating parent cache layers on cache misses. + * + * This is achieved by fetching coins from the base view using PeekCoin() instead of GetCoin(), + * so intermediate CCoinsViewCache layers are not filled. + * + * Used during ConnectBlock() as an ephemeral, resettable top-level view that is flushed only + * on success, so invalid blocks don't pollute the underlying cache. + */ +class CoinsViewOverlay : public CCoinsViewCache +{ +private: + std::optional FetchCoinFromBase(const COutPoint& outpoint) const override + { + return base->PeekCoin(outpoint); + } + +public: + using CCoinsViewCache::CCoinsViewCache; +}; + //! Utility function to add all of a transaction's outputs to a cache. //! When check is false, this assumes that overwrites are only possible for coinbase transactions. //! When check is true, the underlying view may be queried to determine whether an addition is @@ -517,6 +578,7 @@ class CCoinsViewErrorCatcher final : public CCoinsViewBacked std::optional GetCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; private: /** A list of callbacks to execute upon leveldb read error. */ diff --git a/libbitcoinkernel-sys/bitcoin/src/common/args.cpp b/libbitcoinkernel-sys/bitcoin/src/common/args.cpp index 3ffa4d3f..5c8589cf 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/args.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/common/args.cpp @@ -483,29 +483,33 @@ std::string SettingToString(const common::SettingsValue& value, const std::strin return SettingToString(value).value_or(strDefault); } -int64_t ArgsManager::GetIntArg(const std::string& strArg, int64_t nDefault) const +template +Int ArgsManager::GetArg(const std::string& strArg, Int nDefault) const { - return GetIntArg(strArg).value_or(nDefault); + return GetArg(strArg).value_or(nDefault); } -std::optional ArgsManager::GetIntArg(const std::string& strArg) const +template +std::optional ArgsManager::GetArg(const std::string& strArg) const { const common::SettingsValue value = GetSetting(strArg); - return SettingToInt(value); + return SettingTo(value); } -std::optional SettingToInt(const common::SettingsValue& value) +template +std::optional SettingTo(const common::SettingsValue& value) { if (value.isNull()) return std::nullopt; if (value.isFalse()) return 0; if (value.isTrue()) return 1; - if (value.isNum()) return value.getInt(); - return LocaleIndependentAtoi(value.get_str()); + if (value.isNum()) return value.getInt(); + return LocaleIndependentAtoi(value.get_str()); } -int64_t SettingToInt(const common::SettingsValue& value, int64_t nDefault) +template +Int SettingTo(const common::SettingsValue& value, Int nDefault) { - return SettingToInt(value).value_or(nDefault); + return SettingTo(value).value_or(nDefault); } bool ArgsManager::GetBoolArg(const std::string& strArg, bool fDefault) const @@ -531,6 +535,23 @@ bool SettingToBool(const common::SettingsValue& value, bool fDefault) return SettingToBool(value).value_or(fDefault); } +#define INSTANTIATE_INT_TYPE(Type) \ + template Type ArgsManager::GetArg(const std::string&, Type) const; \ + template std::optional ArgsManager::GetArg(const std::string&) const; \ + template Type SettingTo(const common::SettingsValue&, Type); \ + template std::optional SettingTo(const common::SettingsValue&) + +INSTANTIATE_INT_TYPE(int8_t); +INSTANTIATE_INT_TYPE(uint8_t); +INSTANTIATE_INT_TYPE(int16_t); +INSTANTIATE_INT_TYPE(uint16_t); +INSTANTIATE_INT_TYPE(int32_t); +INSTANTIATE_INT_TYPE(uint32_t); +INSTANTIATE_INT_TYPE(int64_t); +INSTANTIATE_INT_TYPE(uint64_t); + +#undef INSTANTIATE_INT_TYPE + bool ArgsManager::SoftSetArg(const std::string& strArg, const std::string& strValue) { LOCK(cs_args); diff --git a/libbitcoinkernel-sys/bitcoin/src/common/args.h b/libbitcoinkernel-sys/bitcoin/src/common/args.h index 1b9233ec..ea4e173b 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/args.h +++ b/libbitcoinkernel-sys/bitcoin/src/common/args.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -89,8 +90,11 @@ struct SectionInfo { std::string SettingToString(const common::SettingsValue&, const std::string&); std::optional SettingToString(const common::SettingsValue&); -int64_t SettingToInt(const common::SettingsValue&, int64_t); -std::optional SettingToInt(const common::SettingsValue&); +template +Int SettingTo(const common::SettingsValue&, Int); + +template +std::optional SettingTo(const common::SettingsValue&); bool SettingToBool(const common::SettingsValue&, bool); std::optional SettingToBool(const common::SettingsValue&); @@ -293,8 +297,14 @@ class ArgsManager * @param nDefault (e.g. 1) * @return command-line argument (0 if invalid number) or default value */ - int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const; - std::optional GetIntArg(const std::string& strArg) const; + template + Int GetArg(const std::string& strArg, Int nDefault) const; + + template + std::optional GetArg(const std::string& strArg) const; + + int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const { return GetArg(strArg, nDefault); } + std::optional GetIntArg(const std::string& strArg) const { return GetArg(strArg); } /** * Return boolean argument or default value diff --git a/libbitcoinkernel-sys/bitcoin/src/common/messages.cpp b/libbitcoinkernel-sys/bitcoin/src/common/messages.cpp index 123db93c..637ec62a 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/messages.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/common/messages.cpp @@ -4,11 +4,11 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include - #include -#include #include +#include #include +#include #include #include #include @@ -33,7 +33,6 @@ std::string StringForFeeReason(FeeReason reason) {FeeReason::DOUBLE_ESTIMATE, "Double Target 95% Threshold"}, {FeeReason::CONSERVATIVE, "Conservative Double Target longer horizon"}, {FeeReason::MEMPOOL_MIN, "Mempool Min Fee"}, - {FeeReason::PAYTXFEE, "PayTxFee set"}, {FeeReason::FALLBACK, "Fallback fee"}, {FeeReason::REQUIRED, "Minimum Required Fee"}, }; @@ -68,7 +67,6 @@ std::string FeeModeInfo(const std::pair& mode, std "less responsive to short-term drops in the prevailing fee market. This mode\n" "potentially returns a higher fee rate estimate.\n", mode.first); default: - // Other modes apart from the ones handled are fee rate units; they should not be clarified. assert(false); } } diff --git a/libbitcoinkernel-sys/bitcoin/src/common/pcp.cpp b/libbitcoinkernel-sys/bitcoin/src/common/pcp.cpp index 4864136b..a640b823 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/pcp.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/common/pcp.cpp @@ -4,6 +4,7 @@ #include +#include #include #include #include @@ -81,6 +82,8 @@ constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12; constexpr uint8_t NATPMP_RESULT_SUCCESS = 0; //! Result code representing unsupported version. constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1; +//! Result code representing not authorized (router doesn't support port mapping). +constexpr uint8_t NATPMP_RESULT_NOT_AUTHORIZED = 2; //! Result code representing lack of resources. constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4; @@ -144,6 +147,8 @@ constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20; //! Result code representing success (RFC6887 7.4), shared with NAT-PMP. constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS; +//! Result code representing not authorized (RFC6887 7.4), shared with NAT-PMP. +constexpr uint8_t PCP_RESULT_NOT_AUTHORIZED = NATPMP_RESULT_NOT_AUTHORIZED; //! Result code representing lack of resources (RFC6887 7.4). constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8; @@ -374,7 +379,16 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); if (result_code != NATPMP_RESULT_SUCCESS) { - LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + if (result_code == NATPMP_RESULT_NOT_AUTHORIZED) { + static std::atomic warned{false}; + if (!warned.exchange(true)) { + LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } else { + LogDebug(BCLog::NET, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } + } else { + LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } if (result_code == NATPMP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } @@ -508,7 +522,16 @@ std::variant PCPRequestPortMap(const PCPMappingNonc uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS); CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))}; if (result_code != PCP_RESULT_SUCCESS) { - LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + if (result_code == PCP_RESULT_NOT_AUTHORIZED) { + static std::atomic warned{false}; + if (!warned.exchange(true)) { + LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } else { + LogDebug(BCLog::NET, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } + } else { + LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } if (result_code == PCP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } diff --git a/libbitcoinkernel-sys/bitcoin/src/common/run_command.cpp b/libbitcoinkernel-sys/bitcoin/src/common/run_command.cpp index 57683e03..86f89e17 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/run_command.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/common/run_command.cpp @@ -8,12 +8,13 @@ #include #include +#include #ifdef ENABLE_EXTERNAL_SIGNER #include #endif // ENABLE_EXTERNAL_SIGNER -UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in) +UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in) { #ifdef ENABLE_EXTERNAL_SIGNER namespace sp = subprocess; @@ -22,9 +23,9 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& std::istringstream stdout_stream; std::istringstream stderr_stream; - if (str_command.empty()) return UniValue::VNULL; + if (cmd_args.empty()) return UniValue::VNULL; - auto c = sp::Popen(str_command, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE}); + auto c = sp::Popen(cmd_args, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE}); if (!str_std_in.empty()) { c.send(str_std_in); } @@ -38,7 +39,7 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& std::getline(stderr_stream, error); const int n_error = c.retcode(); - if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error)); + if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", util::Join(cmd_args, " "), n_error, error)); if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result); return result_json; diff --git a/libbitcoinkernel-sys/bitcoin/src/common/run_command.h b/libbitcoinkernel-sys/bitcoin/src/common/run_command.h index 56c94f83..9162c704 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/run_command.h +++ b/libbitcoinkernel-sys/bitcoin/src/common/run_command.h @@ -6,16 +6,17 @@ #define BITCOIN_COMMON_RUN_COMMAND_H #include +#include class UniValue; /** * Execute a command which returns JSON, and parse the result. * - * @param str_command The command to execute, including any arguments + * @param cmd_args The command and arguments * @param str_std_in string to pass to stdin * @return parsed JSON */ -UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in=""); +UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in = ""); #endif // BITCOIN_COMMON_RUN_COMMAND_H diff --git a/libbitcoinkernel-sys/bitcoin/src/common/system.cpp b/libbitcoinkernel-sys/bitcoin/src/common/system.cpp index 72e9de10..08c0c692 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/system.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/common/system.cpp @@ -37,9 +37,6 @@ using util::ReplaceAll; -// Application startup time (used for uptime calculation) -const int64_t nStartupTime = GetTime(); - #ifndef WIN32 std::string ShellEscape(const std::string& arg) { @@ -130,8 +127,8 @@ std::optional GetTotalRAM() return std::nullopt; } -// Obtain the application startup time (used for uptime calculation) -int64_t GetStartupTime() -{ - return nStartupTime; -} +namespace { + const auto g_startup_time{SteadyClock::now()}; +} // namespace + +SteadyClock::duration GetUptime() { return SteadyClock::now() - g_startup_time; } diff --git a/libbitcoinkernel-sys/bitcoin/src/common/system.h b/libbitcoinkernel-sys/bitcoin/src/common/system.h index 2184f1d4..a3100fec 100644 --- a/libbitcoinkernel-sys/bitcoin/src/common/system.h +++ b/libbitcoinkernel-sys/bitcoin/src/common/system.h @@ -7,13 +7,15 @@ #define BITCOIN_COMMON_SYSTEM_H #include // IWYU pragma: keep +#include +#include #include #include #include -// Application startup time (used for uptime calculation) -int64_t GetStartupTime(); +/// Monotonic uptime (not affected by system time changes). +SteadyClock::duration GetUptime(); void SetupEnvironment(); [[nodiscard]] bool SetupNetworking(); diff --git a/libbitcoinkernel-sys/bitcoin/src/consensus/tx_verify.cpp b/libbitcoinkernel-sys/bitcoin/src/consensus/tx_verify.cpp index 00022a33..4efed70f 100644 --- a/libbitcoinkernel-sys/bitcoin/src/consensus/tx_verify.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/consensus/tx_verify.cpp @@ -188,6 +188,10 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, } } + // `tx.GetValueOut()` won't throw in validation paths because output-range checks run first + // (`bad-txns-vout-negative`, `bad-txns-vout-toolarge`, `bad-txns-txouttotal-toolarge`): + // * `MemPoolAccept::PreChecks`: `CheckTransaction()` is called before this method; + // * `Chainstate::ConnectBlock`: `CheckTransaction()` is called via `CheckBlock()` before this method. const CAmount value_out = tx.GetValueOut(); if (nValueIn < value_out) { return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-in-belowout", @@ -197,6 +201,11 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, // Tally transaction fees const CAmount txfee_aux = nValueIn - value_out; if (!MoneyRange(txfee_aux)) { + // Unreachable, given the following preconditions: + // * `value_out` comes from `tx.GetValueOut()`, which throws unless `MoneyRange(value_out)` and asserts `MoneyRange(nValueOut)` on return. + // * `MoneyRange(nValueIn)` was enforced in the input loop. + // * `nValueIn < value_out` was handled above, so `nValueIn >= value_out` here (and `txfee_aux >= 0`). + // Therefore `0 <= txfee_aux = nValueIn - value_out <= nValueIn <= MAX_MONEY`. return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-fee-outofrange"); } diff --git a/libbitcoinkernel-sys/bitcoin/src/core_io.cpp b/libbitcoinkernel-sys/bitcoin/src/core_io.cpp index a4b726cd..7492e9ca 100644 --- a/libbitcoinkernel-sys/bitcoin/src/core_io.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/core_io.cpp @@ -174,7 +174,7 @@ static bool DecodeTx(CMutableTransaction& tx, const std::vector& // Try decoding with extended serialization support, and remember if the result successfully // consumes the entire input. if (try_witness) { - DataStream ssData(tx_data); + SpanReader ssData{tx_data}; try { ssData >> TX_WITH_WITNESS(tx_extended); if (ssData.empty()) ok_extended = true; @@ -192,7 +192,7 @@ static bool DecodeTx(CMutableTransaction& tx, const std::vector& // Try decoding with legacy serialization, and remember if the result successfully consumes the entire input. if (try_no_witness) { - DataStream ssData(tx_data); + SpanReader ssData{tx_data}; try { ssData >> TX_NO_WITNESS(tx_legacy); if (ssData.empty()) ok_legacy = true; @@ -239,9 +239,8 @@ bool DecodeHexBlockHeader(CBlockHeader& header, const std::string& hex_header) if (!IsHex(hex_header)) return false; const std::vector header_data{ParseHex(hex_header)}; - DataStream ser_header{header_data}; try { - ser_header >> header; + SpanReader{header_data} >> header; } catch (const std::exception&) { return false; } @@ -254,9 +253,8 @@ bool DecodeHexBlk(CBlock& block, const std::string& strHexBlk) return false; std::vector blockData(ParseHex(strHexBlk)); - DataStream ssBlock(blockData); try { - ssBlock >> TX_WITH_WITNESS(block); + SpanReader{blockData} >> TX_WITH_WITNESS(block); } catch (const std::exception&) { return false; @@ -439,7 +437,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry entry.pushKV("size", tx.ComputeTotalSize()); entry.pushKV("vsize", (GetTransactionWeight(tx) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR); entry.pushKV("weight", GetTransactionWeight(tx)); - entry.pushKV("locktime", (int64_t)tx.nLockTime); + entry.pushKV("locktime", tx.nLockTime); UniValue vin{UniValue::VARR}; vin.reserve(tx.vin.size()); @@ -457,7 +455,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry in.pushKV("coinbase", HexStr(txin.scriptSig)); } else { in.pushKV("txid", txin.prevout.hash.GetHex()); - in.pushKV("vout", (int64_t)txin.prevout.n); + in.pushKV("vout", txin.prevout.n); UniValue o(UniValue::VOBJ); o.pushKV("asm", ScriptToAsmStr(txin.scriptSig, true)); o.pushKV("hex", HexStr(txin.scriptSig)); @@ -482,14 +480,14 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry ScriptToUniv(prev_txout.scriptPubKey, /*out=*/o_script_pub_key, /*include_hex=*/true, /*include_address=*/true); UniValue p(UniValue::VOBJ); - p.pushKV("generated", bool(prev_coin.fCoinBase)); - p.pushKV("height", uint64_t(prev_coin.nHeight)); + p.pushKV("generated", static_cast(prev_coin.fCoinBase)); + p.pushKV("height", prev_coin.nHeight); p.pushKV("value", ValueFromAmount(prev_txout.nValue)); p.pushKV("scriptPubKey", std::move(o_script_pub_key)); in.pushKV("prevout", std::move(p)); } } - in.pushKV("sequence", (int64_t)txin.nSequence); + in.pushKV("sequence", txin.nSequence); vin.push_back(std::move(in)); } entry.pushKV("vin", std::move(vin)); @@ -502,7 +500,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry UniValue out(UniValue::VOBJ); out.pushKV("value", ValueFromAmount(txout.nValue)); - out.pushKV("n", (int64_t)i); + out.pushKV("n", i); UniValue o(UniValue::VOBJ); ScriptToUniv(txout.scriptPubKey, /*out=*/o, /*include_hex=*/true, /*include_address=*/true); diff --git a/libbitcoinkernel-sys/bitcoin/src/crypto/chacha20.h b/libbitcoinkernel-sys/bitcoin/src/crypto/chacha20.h index 15035621..3feef196 100644 --- a/libbitcoinkernel-sys/bitcoin/src/crypto/chacha20.h +++ b/libbitcoinkernel-sys/bitcoin/src/crypto/chacha20.h @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/libbitcoinkernel-sys/bitcoin/src/dbwrapper.cpp b/libbitcoinkernel-sys/bitcoin/src/dbwrapper.cpp index b3f08cb2..eb222078 100644 --- a/libbitcoinkernel-sys/bitcoin/src/dbwrapper.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/dbwrapper.cpp @@ -4,6 +4,16 @@ #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -11,6 +21,7 @@ #include #include #include +#include #include #include @@ -19,16 +30,6 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -57,7 +58,7 @@ class CBitcoinLevelDBLogger : public leveldb::Logger { // This code is adapted from posix_logger.h, which is why it is using vsprintf. // Please do not do this in normal code void Logv(const char * format, va_list ap) override { - if (!LogAcceptCategory(BCLog::LEVELDB, BCLog::Level::Debug)) { + if (!LogAcceptCategory(BCLog::LEVELDB, util::log::Level::Debug)) { return; } char buffer[500]; @@ -276,7 +277,7 @@ CDBWrapper::~CDBWrapper() void CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync) { - const bool log_memory = LogAcceptCategory(BCLog::LEVELDB, BCLog::Level::Debug); + const bool log_memory = LogAcceptCategory(BCLog::LEVELDB, util::log::Level::Debug); double mem_before = 0; if (log_memory) { mem_before = DynamicMemoryUsage() / 1024.0 / 1024; diff --git a/libbitcoinkernel-sys/bitcoin/src/dbwrapper.h b/libbitcoinkernel-sys/bitcoin/src/dbwrapper.h index b2ce67c7..2eee6c1c 100644 --- a/libbitcoinkernel-sys/bitcoin/src/dbwrapper.h +++ b/libbitcoinkernel-sys/bitcoin/src/dbwrapper.h @@ -214,9 +214,9 @@ class CDBWrapper return false; } try { - DataStream ssValue{MakeByteSpan(*strValue)}; + std::span ssValue{MakeWritableByteSpan(*strValue)}; m_obfuscation(ssValue); - ssValue >> value; + SpanReader{ssValue} >> value; } catch (const std::exception&) { return false; } diff --git a/libbitcoinkernel-sys/bitcoin/src/dummywallet.cpp b/libbitcoinkernel-sys/bitcoin/src/dummywallet.cpp index 85fb1ed1..24952fea 100644 --- a/libbitcoinkernel-sys/bitcoin/src/dummywallet.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/dummywallet.cpp @@ -38,7 +38,6 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const "-maxapsfee=", "-maxtxfee=", "-mintxfee=", - "-paytxfee=", "-signer=", "-spendzeroconfchange", "-txconfirmtarget=", diff --git a/libbitcoinkernel-sys/bitcoin/src/external_signer.cpp b/libbitcoinkernel-sys/bitcoin/src/external_signer.cpp index 84d98a19..3790f4d3 100644 --- a/libbitcoinkernel-sys/bitcoin/src/external_signer.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/external_signer.cpp @@ -9,24 +9,27 @@ #include #include #include +#include #include #include #include #include -ExternalSigner::ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name) +ExternalSigner::ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name) : m_command{std::move(command)}, m_chain{std::move(chain)}, m_fingerprint{std::move(fingerprint)}, m_name{std::move(name)} {} -std::string ExternalSigner::NetworkArg() const +std::vector ExternalSigner::NetworkArg() const { - return " --chain " + m_chain; + return {"--chain", m_chain}; } bool ExternalSigner::Enumerate(const std::string& command, std::vector& signers, const std::string& chain) { // Call enumerate - const UniValue result = RunCommandParseJSON(command + " enumerate"); + std::vector cmd_args = Cat(subprocess::util::split(command), {"enumerate"}); + + const UniValue result = RunCommandParseJSON(cmd_args, ""); if (!result.isArray()) { throw std::runtime_error(strprintf("'%s' received invalid response, expected array of signers", command)); } @@ -56,19 +59,19 @@ bool ExternalSigner::Enumerate(const std::string& command, std::vector command = Cat(m_command, Cat({"--stdin", "--fingerprint", m_fingerprint}, NetworkArg())); const std::string stdinStr = "signtx " + EncodeBase64(ssTx.str()); const UniValue signer_result = RunCommandParseJSON(command, stdinStr); diff --git a/libbitcoinkernel-sys/bitcoin/src/external_signer.h b/libbitcoinkernel-sys/bitcoin/src/external_signer.h index 1b36d496..5ba37c06 100644 --- a/libbitcoinkernel-sys/bitcoin/src/external_signer.h +++ b/libbitcoinkernel-sys/bitcoin/src/external_signer.h @@ -19,19 +19,19 @@ class ExternalSigner { private: //! The command which handles interaction with the external signer. - std::string m_command; + std::vector m_command; //! Bitcoin mainnet, testnet, etc std::string m_chain; - std::string NetworkArg() const; + std::vector NetworkArg() const; public: //! @param[in] command the command which handles interaction with the external signer //! @param[in] fingerprint master key fingerprint of the signer //! @param[in] chain "main", "test", "regtest" or "signet" //! @param[in] name device name - ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name); + ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name); //! Master key fingerprint of the signer std::string m_fingerprint; diff --git a/libbitcoinkernel-sys/bitcoin/src/flatfile.cpp b/libbitcoinkernel-sys/bitcoin/src/flatfile.cpp index 140217b0..056fb9c1 100644 --- a/libbitcoinkernel-sys/bitcoin/src/flatfile.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/flatfile.cpp @@ -3,12 +3,13 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include - #include -#include + #include #include +#include + +#include FlatFileSeq::FlatFileSeq(fs::path dir, const char* prefix, size_t chunk_size) : m_dir(std::move(dir)), diff --git a/libbitcoinkernel-sys/bitcoin/src/httpserver.cpp b/libbitcoinkernel-sys/bitcoin/src/httpserver.cpp index 71c6f5b1..b84f0da0 100644 --- a/libbitcoinkernel-sys/bitcoin/src/httpserver.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/httpserver.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -49,83 +50,6 @@ using common::InvalidPortErrMsg; /** Maximum size of http request (request line + headers) */ static const size_t MAX_HEADERS_SIZE = 8192; -/** HTTP request work item */ -class HTTPWorkItem final : public HTTPClosure -{ -public: - HTTPWorkItem(std::unique_ptr _req, const std::string &_path, const HTTPRequestHandler& _func): - req(std::move(_req)), path(_path), func(_func) - { - } - void operator()() override - { - func(req.get(), path); - } - - std::unique_ptr req; - -private: - std::string path; - HTTPRequestHandler func; -}; - -/** Simple work queue for distributing work over multiple threads. - * Work items are simply callable objects. - */ -template -class WorkQueue -{ -private: - Mutex cs; - std::condition_variable cond GUARDED_BY(cs); - std::deque> queue GUARDED_BY(cs); - bool running GUARDED_BY(cs){true}; - const size_t maxDepth; - -public: - explicit WorkQueue(size_t _maxDepth) : maxDepth(_maxDepth) - { - } - /** Precondition: worker threads have all stopped (they have been joined). - */ - ~WorkQueue() = default; - /** Enqueue a work item */ - bool Enqueue(WorkItem* item) EXCLUSIVE_LOCKS_REQUIRED(!cs) - { - LOCK(cs); - if (!running || queue.size() >= maxDepth) { - return false; - } - queue.emplace_back(std::unique_ptr(item)); - cond.notify_one(); - return true; - } - /** Thread function */ - void Run() EXCLUSIVE_LOCKS_REQUIRED(!cs) - { - while (true) { - std::unique_ptr i; - { - WAIT_LOCK(cs, lock); - while (running && queue.empty()) - cond.wait(lock); - if (!running && queue.empty()) - break; - i = std::move(queue.front()); - queue.pop_front(); - } - (*i)(); - } - } - /** Interrupt and exit loops */ - void Interrupt() EXCLUSIVE_LOCKS_REQUIRED(!cs) - { - LOCK(cs); - running = false; - cond.notify_all(); - } -}; - struct HTTPPathHandler { HTTPPathHandler(std::string _prefix, bool _exactMatch, HTTPRequestHandler _handler): @@ -145,13 +69,14 @@ static struct event_base* eventBase = nullptr; static struct evhttp* eventHTTP = nullptr; //! List of subnets to allow RPC connections from static std::vector rpc_allow_subnets; -//! Work queue for handling longer requests off the event loop thread -static std::unique_ptr> g_work_queue{nullptr}; //! Handlers for (sub)paths static GlobalMutex g_httppathhandlers_mutex; static std::vector pathHandlers GUARDED_BY(g_httppathhandlers_mutex); //! Bound listening sockets static std::vector boundSockets; +//! Http thread pool - future: encapsulate in HttpContext +static ThreadPool g_threadpool_http("http"); +static int g_max_queue_depth{100}; /** * @brief Helps keep track of open `evhttp_connection`s with active `evhttp_requests` @@ -230,7 +155,7 @@ static bool InitHTTPAllowList() if (!subnet.IsValid()) { uiInterface.ThreadSafeMessageBox( Untranslated(strprintf("Invalid -rpcallowip subnet specification: %s. Valid values are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0), a network/CIDR (e.g. 1.2.3.4/24), all ipv4 (0.0.0.0/0), or all ipv6 (::/0). RFC4193 is allowed only if -cjdnsreachable=0.", strAllow)), - "", CClientUIInterface::MSG_ERROR); + CClientUIInterface::MSG_ERROR); return false; } rpc_allow_subnets.push_back(subnet); @@ -286,7 +211,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg) } } } - auto hreq{std::make_unique(req, *static_cast(arg))}; + auto hreq{std::make_shared(req, *static_cast(arg))}; // Early address-based allow check if (!ClientAllowed(hreq->GetPeer())) { @@ -327,13 +252,36 @@ static void http_request_cb(struct evhttp_request* req, void* arg) // Dispatch to worker thread if (i != iend) { - std::unique_ptr item(new HTTPWorkItem(std::move(hreq), path, i->handler)); - assert(g_work_queue); - if (g_work_queue->Enqueue(item.get())) { - (void)item.release(); /* if true, queue took ownership */ - } else { + if (static_cast(g_threadpool_http.WorkQueueSize()) >= g_max_queue_depth) { LogWarning("Request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting"); - item->req->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded"); + hreq->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded"); + return; + } + + auto item = [req = hreq, in_path = std::move(path), fn = i->handler]() { + std::string err_msg; + try { + fn(req.get(), in_path); + return; + } catch (const std::exception& e) { + LogWarning("Unexpected error while processing request for '%s'. Error msg: '%s'", req->GetURI(), e.what()); + err_msg = e.what(); + } catch (...) { + LogWarning("Unknown error while processing request for '%s'", req->GetURI()); + err_msg = "unknown error"; + } + // Reply so the client doesn't hang waiting for the response. + req->WriteHeader("Connection", "close"); + // TODO: Implement specific error formatting for the REST and JSON-RPC servers responses. + req->WriteReply(HTTP_INTERNAL_SERVER_ERROR, err_msg); + }; + + if (auto res = g_threadpool_http.Submit(std::move(item)); !res.has_value()) { + Assume(hreq.use_count() == 1); // ensure request will be deleted + // Both SubmitError::Inactive and SubmitError::Interrupted mean shutdown + LogWarning("HTTP request rejected during server shutdown: '%s'", SubmitErrorString(res.error())); + hreq->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Request rejected during server shutdown"); + return; } } else { hreq->WriteReply(HTTP_NOT_FOUND); @@ -412,13 +360,6 @@ static bool HTTPBindAddresses(struct evhttp* http) return !boundSockets.empty(); } -/** Simple wrapper to set thread name and run work queue */ -static void HTTPWorkQueueRun(WorkQueue* queue, int worker_num) -{ - util::ThreadRename(strprintf("httpworker.%i", worker_num)); - queue->Run(); -} - /** libevent event log callback */ static void libevent_log_cb(int severity, const char *msg) { @@ -475,10 +416,9 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) } LogDebug(BCLog::HTTP, "Initialized HTTP server\n"); - int workQueueDepth = std::max((long)gArgs.GetIntArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); - LogDebug(BCLog::HTTP, "creating work queue of depth %d\n", workQueueDepth); + g_max_queue_depth = std::max(gArgs.GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1); + LogDebug(BCLog::HTTP, "set work queue of depth %d", g_max_queue_depth); - g_work_queue = std::make_unique>(workQueueDepth); // transfer ownership to eventBase/HTTP via .release() eventBase = base_ctr.release(); eventHTTP = http_ctr.release(); @@ -494,17 +434,13 @@ void UpdateHTTPServerLogging(bool enable) { } static std::thread g_thread_http; -static std::vector g_thread_http_workers; void StartHTTPServer() { - int rpcThreads = std::max((long)gArgs.GetIntArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); - LogInfo("Starting HTTP server with %d worker threads\n", rpcThreads); + int rpcThreads = std::max(gArgs.GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1); + LogInfo("Starting HTTP server with %d worker threads", rpcThreads); + g_threadpool_http.Start(rpcThreads); g_thread_http = std::thread(ThreadHTTP, eventBase); - - for (int i = 0; i < rpcThreads; i++) { - g_thread_http_workers.emplace_back(HTTPWorkQueueRun, g_work_queue.get(), i); - } } void InterruptHTTPServer() @@ -514,21 +450,17 @@ void InterruptHTTPServer() // Reject requests on current connections evhttp_set_gencb(eventHTTP, http_reject_request_cb, nullptr); } - if (g_work_queue) { - g_work_queue->Interrupt(); - } + // Interrupt pool after disabling requests + g_threadpool_http.Interrupt(); } void StopHTTPServer() { LogDebug(BCLog::HTTP, "Stopping HTTP server\n"); - if (g_work_queue) { - LogDebug(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n"); - for (auto& thread : g_thread_http_workers) { - thread.join(); - } - g_thread_http_workers.clear(); - } + + LogDebug(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n"); + g_threadpool_http.Stop(); + // Unlisten sockets, these are what make the event loop running, which means // that after this and all connections are closed the event loop will quit. for (evhttp_bound_socket *socket : boundSockets) { @@ -556,7 +488,6 @@ void StopHTTPServer() event_base_free(eventBase); eventBase = nullptr; } - g_work_queue.reset(); LogDebug(BCLog::HTTP, "Stopped HTTP server\n"); } diff --git a/libbitcoinkernel-sys/bitcoin/src/httpserver.h b/libbitcoinkernel-sys/bitcoin/src/httpserver.h index 1ef3aaeb..5461480d 100644 --- a/libbitcoinkernel-sys/bitcoin/src/httpserver.h +++ b/libbitcoinkernel-sys/bitcoin/src/httpserver.h @@ -159,15 +159,6 @@ class HTTPRequest */ std::optional GetQueryParameterFromUri(const char* uri, const std::string& key); -/** Event handler closure. - */ -class HTTPClosure -{ -public: - virtual void operator()() = 0; - virtual ~HTTPClosure() = default; -}; - /** Event class. This can be used either as a cross-thread trigger or as a timer. */ class HTTPEvent diff --git a/libbitcoinkernel-sys/bitcoin/src/index/base.cpp b/libbitcoinkernel-sys/bitcoin/src/index/base.cpp index 7f77d13a..fba2f4f6 100644 --- a/libbitcoinkernel-sys/bitcoin/src/index/base.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/index/base.cpp @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/libbitcoinkernel-sys/bitcoin/src/index/base.h b/libbitcoinkernel-sys/bitcoin/src/index/base.h index 8cb8ad8e..d8fd8566 100644 --- a/libbitcoinkernel-sys/bitcoin/src/index/base.h +++ b/libbitcoinkernel-sys/bitcoin/src/index/base.h @@ -122,9 +122,6 @@ class BaseIndex : public CValidationInterface void ChainStateFlushed(const kernel::ChainstateRole& role, const CBlockLocator& locator) override; - /// Return custom notification options for index. - [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; } - /// Initialize internal state from the database and block index. [[nodiscard]] virtual bool CustomInit(const std::optional& block) { return true; } @@ -151,6 +148,9 @@ class BaseIndex : public CValidationInterface /// Get the name of the index for display in logs. const std::string& GetName() const LIFETIMEBOUND { return m_name; } + /// Return custom notification options for index. + [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; } + /// Blocks the current thread until the index is caught up to the current /// state of the block chain. This only blocks if the index has gotten in /// sync once and only needs to process blocks in the ValidationInterface diff --git a/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.cpp b/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.cpp index caf6d592..67fa1fab 100644 --- a/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.cpp @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.h b/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.h index 85cc8f18..0bb4a74e 100644 --- a/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.h +++ b/libbitcoinkernel-sys/bitcoin/src/index/blockfilterindex.h @@ -51,7 +51,7 @@ class BlockFilterIndex final : public BaseIndex Mutex m_cs_headers_cache; /** cache of block hash to filter header, to avoid disk access when responding to getcfcheckpt. */ - std::unordered_map m_headers_cache GUARDED_BY(m_cs_headers_cache); + std::unordered_map m_headers_cache GUARDED_BY(m_cs_headers_cache); // Last computed header to avoid disk reads on every new block. uint256 m_last_header{}; @@ -63,8 +63,6 @@ class BlockFilterIndex final : public BaseIndex std::optional ReadFilterHeader(int height, const uint256& expected_block_hash); protected: - interfaces::Chain::NotifyOptions CustomOptions() override; - bool CustomInit(const std::optional& block) override; bool CustomCommit(CDBBatch& batch) override; @@ -80,6 +78,8 @@ class BlockFilterIndex final : public BaseIndex explicit BlockFilterIndex(std::unique_ptr chain, BlockFilterType filter_type, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); + interfaces::Chain::NotifyOptions CustomOptions() override; + BlockFilterType GetFilterType() const { return m_filter_type; } /** Get a single filter by block. */ diff --git a/libbitcoinkernel-sys/bitcoin/src/index/coinstatsindex.cpp b/libbitcoinkernel-sys/bitcoin/src/index/coinstatsindex.cpp index 6be3db9a..319ffcad 100644 --- a/libbitcoinkernel-sys/bitcoin/src/index/coinstatsindex.cpp +++ b/libbitcoinkernel-sys/bitcoin/src/index/coinstatsindex.cpp @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include