diff --git a/.github/ci-test-each-commit-exec.py b/.github/ci-test-each-commit-exec.py index cdbf4ff438d1..aed1526b6436 100755 --- a/.github/ci-test-each-commit-exec.py +++ b/.github/ci-test-each-commit-exec.py @@ -62,6 +62,7 @@ def main(): f"./{build_dir}/test/functional/test_runner.py", "-j", str(num_procs * 2), + "--failfast", "--combinedlogslen=99999999", ]) diff --git a/.github/ci-windows-cross.py b/.github/ci-windows-cross.py index 13ca3b49456a..90cd59c7fe7d 100755 --- a/.github/ci-windows-cross.py +++ b/.github/ci-windows-cross.py @@ -134,13 +134,13 @@ def run_unit_tests(): def main(): parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.") - steps = [ - "print_version", - "check_manifests", - "prepare_tests", - "run_unit_tests", - "run_functional_tests", - ] + steps = list(map(lambda f: f.__name__, [ + print_version, + check_manifests, + prepare_tests, + run_unit_tests, + run_functional_tests, + ])) parser.add_argument("step", choices=steps, help="CI step to perform.") args = parser.parse_args() @@ -149,16 +149,7 @@ def main(): str(Path.cwd() / "previous_releases"), ) - if args.step == "print_version": - print_version() - elif args.step == "check_manifests": - check_manifests() - elif args.step == "prepare_tests": - prepare_tests() - elif args.step == "run_unit_tests": - run_unit_tests() - elif args.step == "run_functional_tests": - run_functional_tests() + exec(f'{args.step}()') if __name__ == "__main__": diff --git a/.github/ci-windows.py b/.github/ci-windows.py index caa2d52c7754..16d7db7a2ed2 100755 --- a/.github/ci-windows.py +++ b/.github/ci-windows.py @@ -38,6 +38,29 @@ def run(cmd, **kwargs): } +def github_import_vs_env(_ci_type): + vswhere_path = Path(os.environ["ProgramFiles(x86)"]) / "Microsoft Visual Studio" / "Installer" / "vswhere.exe" + installation_path = run( + [str(vswhere_path), "-latest", "-property", "installationPath"], + capture_output=True, + text=True, + ).stdout.strip() + vsdevcmd = Path(installation_path) / "Common7" / "Tools" / "vsdevcmd.bat" + comspec = os.environ["COMSPEC"] + output = run( + f'"{comspec}" /s /c ""{vsdevcmd}" -arch=x64 -no_logo && set"', + capture_output=True, + text=True, + ).stdout + github_env = os.environ["GITHUB_ENV"] + with open(github_env, "a") as env_file: + for line in output.splitlines(): + if "=" not in line: + continue + name, value = line.split("=", 1) + env_file.write(f"{name}={value}\n") + + def generate(ci_type): command = [ "cmake", @@ -50,7 +73,7 @@ def generate(ci_type): run(command) -def build(): +def build(_ci_type): command = [ "cmake", "--build", @@ -180,26 +203,18 @@ def run_tests(ci_type): def main(): parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.") parser.add_argument("ci_type", choices=GENERATE_OPTIONS, help="CI type to run.") - steps = [ - "generate", - "build", - "check_manifests", - "prepare_tests", - "run_tests", - ] + steps = list(map(lambda f: f.__name__, [ + github_import_vs_env, + generate, + build, + check_manifests, + prepare_tests, + run_tests, + ])) parser.add_argument("step", choices=steps, help="CI step to perform.") args = parser.parse_args() - if args.step == "generate": - generate(args.ci_type) - elif args.step == "build": - build() - elif args.step == "check_manifests": - check_manifests(args.ci_type) - elif args.step == "prepare_tests": - prepare_tests(args.ci_type) - elif args.step == "run_tests": - run_tests(args.ci_type) + exec(f'{args.step}("{args.ci_type}")') if __name__ == "__main__": diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9bc798dc976c..1fedde674e21 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,12 +56,13 @@ jobs: fi test-each-commit: - name: 'test max 6 ancestor commits' - runs-on: ubuntu-24.04 - if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 - timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. + name: 'test ancestor commits' + needs: runners + runs-on: ${{ needs.runners.outputs.provider == 'cirrus' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' || 'ubuntu-24.04' }} env: - MAX_COUNT: 6 # Keep in sync with name above + TEST_RUNNER_PORT_MIN: "14000" # Use a larger port, to avoid colliding with CIRRUS_CACHE_HOST port 12321. + if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 + timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" @@ -72,25 +73,35 @@ jobs: fetch-depth: ${{ env.FETCH_DEPTH }} - name: Determine commit range run: | - # Checkout HEAD~ and find the test base commit - # Checkout HEAD~ because it would be wasteful to rerun tests on the PR - # head commit that are already run by other jobs. + # Checkout HEAD~ and find the test base commit. + # Checkout HEAD~ because it would be wasteful to rerun + # tests on the PR head commit that are already run + # by other jobs. git checkout HEAD~ - # Figure out test base commit by listing ancestors of HEAD, excluding - # ancestors of the most recent merge commit, limiting the list to the - # newest MAX_COUNT ancestors, ordering it from oldest to newest, and - # taking the first one. + # Moreover, pull requests that contain a merge commit + # are generally draft pull requests that merge in other + # pull requests, so only check the relevant commits + # after the last merge commit. A merge commit could + # also be a subtree merge commit, which may be + # worthwhile to check. However, it is rare that the + # subtree merge commit is not the top commit (which + # would be skipped anyway by this task, because it is + # run by all other tasks). Also, `git rebase --exec` + # does not work on merge commits, so if this was + # important to check, the logic would have to be + # rewritten. # - # If the branch contains up to MAX_COUNT ancestor commits after the - # most recent merge commit, all of those commits will be tested. If it - # contains more, only the most recent MAX_COUNT commits will be - # tested. + # Figure out test base commit by listing ancestors of + # HEAD, excluding ancestors of the most recent merge + # commit, ordering them from oldest to newest, and + # taking the first one. # - # In the command below, the ^@ suffix is used to refer to all parents - # of the merge commit as described in: + # In the command below, the ^@ suffix is used to refer + # to all parents of the merge commit as described in: # https://git-scm.com/docs/git-rev-parse#_other_rev_parent_shorthand_notations - # and the ^ prefix is used to exclude these parents and all their - # ancestors from the rev-list output as described in: + # and the ^ prefix is used to exclude these parents + # and all their ancestors from the rev-list output + # as described in: # https://git-scm.com/docs/git-rev-list MERGE_BASE=$(git rev-list -n1 --merges HEAD) EXCLUDE_MERGE_BASE_ANCESTORS= @@ -98,7 +109,7 @@ jobs: if test -n "$MERGE_BASE"; then EXCLUDE_MERGE_BASE_ANCESTORS=^${MERGE_BASE}^@ fi - echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV" + echo "TEST_BASE=$(git rev-list -n${{ github.event.pull_request.commits }} --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV" - run: | git fetch origin "${GITHUB_BASE_REF}" git config user.email "ci@example.com" @@ -227,26 +238,19 @@ jobs: - *CHECKOUT - - &SET_UP_VS - name: Set up VS Developer Prompt - shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" - run: | - $vswherePath = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" - $installationPath = & $vswherePath -latest -property installationPath - & "${env:COMSPEC}" /s /c "`"$installationPath\Common7\Tools\vsdevcmd.bat`" -arch=x64 -no_logo && set" | foreach-object { - $name, $value = $_ -split '=', 2 - echo "$name=$value" >> $env:GITHUB_ENV - } + - &IMPORT_VS_ENV + name: Import Visual Studio env vars + run: py -3 .github/ci-windows.py "standard" github_import_vs_env - name: Get tool information - shell: pwsh run: | - cmake -version | Tee-Object -FilePath "cmake_version" - Write-Output "---" - msbuild -version | Tee-Object -FilePath "msbuild_version" - $env:VCToolsVersion | Tee-Object -FilePath "toolset_version" + set -o errexit -o pipefail -o xtrace -o nounset + + cmake -version | tee cmake_version + echo '---' + msbuild.exe -version | tee msbuild_version + echo "${VCToolsVersion-}" | tee toolset_version py -3 --version - Write-Host "PowerShell version $($PSVersionTable.PSVersion.ToString())" bash --version - name: Using vcpkg with MSBuild @@ -287,7 +291,8 @@ jobs: - name: Save vcpkg tools cache uses: actions/cache/save@v5 - if: github.event_name != 'pull_request' && github.ref_name == github.event.repository.default_branch && steps.vcpkg-tools-cache.outputs.cache-hit != 'true' + # Only save cache from one job as they share tools. If the matrix is expanded to jobs with unique tools, this may need amending. + if: github.event_name != 'pull_request' && github.ref_name == github.event.repository.default_branch && steps.vcpkg-tools-cache.outputs.cache-hit != 'true' && matrix.job-type == 'standard' with: path: C:/vcpkg/downloads/tools key: ${{ github.job }}-vcpkg-tools-${{ github.run_id }} @@ -418,7 +423,7 @@ jobs: - name: Run bitcoind.exe run: py -3 .github/ci-windows-cross.py print_version - - *SET_UP_VS + - *IMPORT_VS_ENV - name: Check executable manifests run: py -3 .github/ci-windows-cross.py check_manifests diff --git a/CMakeLists.txt b/CMakeLists.txt index f992a8d6af4e..f0f101b2098b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,6 +119,7 @@ endif() cmake_dependent_option(BUILD_WALLET_TOOL "Build bitcoin-wallet tool." ${BUILD_TESTS} "ENABLE_WALLET" OFF) option(REDUCE_EXPORTS "Attempt to reduce exported symbols in the resulting executables." OFF) +option(CMAKE_COMPILE_WARNING_AS_ERROR "Treat compiler warnings as errors." OFF) option(WITH_CCACHE "Attempt to use ccache for compiling." ON) option(WITH_ZMQ "Enable ZMQ notifications." OFF) @@ -126,6 +127,8 @@ if(WITH_ZMQ) find_package(ZeroMQ 4.0.0 MODULE REQUIRED) endif() +option(WITH_EMBEDDED_ASMAP "Embed default ASMap data." ON) + option(WITH_USDT "Enable tracepoints for Userspace, Statically Defined Tracing." OFF) if(WITH_USDT) find_package(USDT MODULE REQUIRED) @@ -215,6 +218,7 @@ if(BUILD_FOR_FUZZING) set(BUILD_GUI OFF) set(ENABLE_EXTERNAL_SIGNER OFF) set(WITH_ZMQ OFF) + set(WITH_EMBEDDED_ASMAP OFF) set(BUILD_TESTS OFF) set(BUILD_GUI_TESTS OFF) set(BUILD_BENCH OFF) @@ -653,6 +657,7 @@ else() set(ipc_status OFF) endif() message(" IPC ................................. ${ipc_status}") +message(" Embedded ASMap ...................... ${WITH_EMBEDDED_ASMAP}") message(" USDT tracing ........................ ${WITH_USDT}") message(" QR code (GUI) ....................... ${WITH_QRENCODE}") message(" DBus (GUI) .......................... ${WITH_DBUS}") diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 0e732240cce1..2465c70bb487 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -19,7 +19,7 @@ else fi export CONTAINER_NAME=ci_native_asan -export APT_LLVM_V="21" +export APT_LLVM_V="22" export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev mold python3-zmq qt6-base-dev qt6-tools-dev qt6-l10n-tools libevent-dev libboost-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE} libcapnp-dev capnproto python3-pip" export PIP_PACKAGES="--break-system-packages pycapnp" export NO_DEPENDS=1 diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index b3ad36a8bab7..cdfe2a0d35b3 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export APT_LLVM_V="21" +export APT_LLVM_V="22" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev libcapnp-dev capnproto" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index 7529bbc69f78..c6923896c562 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="21" +export APT_LLVM_V="22" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" # -lstdc++ to resolve link issues due to upstream packaging diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index c0559fc69cc8..4a17fb496d60 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="21" +export APT_LLVM_V="22" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh index 28446a705dc3..0b21ab226a91 100755 --- a/ci/test/00_setup_env_native_nowallet.sh +++ b/ci/test/00_setup_env_native_nowallet.sh @@ -17,4 +17,5 @@ export BITCOIN_CONFIG="\ --preset=dev-mode \ -DREDUCE_EXPORTS=ON \ -DENABLE_WALLET=OFF \ + -DWITH_EMBEDDED_ASMAP=OFF \ " diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index b0429ce8fe79..29dd06bf0d9b 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="21" +export APT_LLVM_V="22" LIBCXX_DIR="/cxx_build/" LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq python3-pip" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index d62221a7b77f..f8d337f6c902 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -22,11 +22,6 @@ if [ -n "$DPKG_ADD_ARCH" ]; then fi if [ -n "${APT_LLVM_V}" ]; then - # Temporarily work around Sequoia PGP policy deadline for legacy repositories. - # See https://github.com/llvm/llvm-project/issues/153385. - if [ -f /usr/share/apt/default-sequoia.config ]; then - sed -i 's/\(sha1\.second_preimage_resistance =\).*/\1 9999-01-01/' /usr/share/apt/default-sequoia.config - fi ${CI_RETRY_EXE} apt-get update ${CI_RETRY_EXE} apt-get install curl -y curl "https://apt.llvm.org/llvm-snapshot.gpg.key" | tee "/etc/apt/trusted.gpg.d/apt.llvm.org.asc" @@ -62,7 +57,7 @@ if [ -n "$PIP_PACKAGES" ]; then fi if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.5" /llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-22.1.0" /llvm-project cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ diff --git a/ci/test/wrap-valgrind.sh b/ci/test/wrap-valgrind.sh index e351cf24839b..4ed3f2d66c54 100755 --- a/ci/test/wrap-valgrind.sh +++ b/ci/test/wrap-valgrind.sh @@ -12,7 +12,7 @@ for b_name in "${BASE_OUTDIR}/bin"/*; do echo "Wrap $b ..." mv "$b" "${b}_orig" echo '#!/usr/bin/env bash' > "$b" - echo "exec valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/contrib/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b" + echo "exec valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b" chmod +x "$b" done done diff --git a/cmake/module/AddBoostIfNeeded.cmake b/cmake/module/AddBoostIfNeeded.cmake index b3f248009d8c..80a6d2e89117 100644 --- a/cmake/module/AddBoostIfNeeded.cmake +++ b/cmake/module/AddBoostIfNeeded.cmake @@ -32,12 +32,14 @@ function(add_boost_if_needed) find_package(Boost 1.74.0 REQUIRED CONFIG) mark_as_advanced(Boost_INCLUDE_DIR boost_headers_DIR) # Workaround for a bug in NetBSD pkgsrc. - # See: https://github.com/NetBSD/pkgsrc/issues/167. + # See https://gnats.netbsd.org/59856. if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") get_filename_component(_boost_include_dir "${boost_headers_DIR}/../../../include/" ABSOLUTE) - set_target_properties(Boost::headers PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir} - ) + if(_boost_include_dir MATCHES "^/usr/pkg/") + set_target_properties(Boost::headers PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir} + ) + endif() unset(_boost_include_dir) endif() set_target_properties(Boost::headers PROPERTIES IMPORTED_GLOBAL TRUE) diff --git a/cmake/script/GenerateHeaderFromRaw.cmake b/cmake/script/GenerateHeaderFromRaw.cmake index d373d1c4f870..2c40e419f60b 100644 --- a/cmake/script/GenerateHeaderFromRaw.cmake +++ b/cmake/script/GenerateHeaderFromRaw.cmake @@ -18,6 +18,5 @@ ${formatted_bytes} }; inline constexpr std::span ${raw_source_basename}{detail_${raw_source_basename}_raw}; -} -") +}") file(WRITE ${HEADER_PATH} "${header_content}") diff --git a/contrib/README.md b/contrib/README.md index f23d7ac557bb..037ea2f0690d 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -18,6 +18,9 @@ A Linux bash script that will set up traffic control (tc) to limit the outgoing ### [Seeds](/contrib/seeds) ### Utility to generate the pnSeed[] array that is compiled into the client. +### [ASMap](/contrib/asmap) ### +Utilities to analyze and process asmap files. + Build Tools and Keys --------------------- diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md index 833d54037f11..a157acb4b75c 100644 --- a/contrib/guix/INSTALL.md +++ b/contrib/guix/INSTALL.md @@ -69,13 +69,13 @@ distros, please see: https://repology.org/project/guix/versions ### Debian / Ubuntu -Guix is available as a distribution package in various versions of [Debian -](https://packages.debian.org/search?keywords=guix) and [Ubuntu -](https://packages.ubuntu.com/search?keywords=guix). +Currently, the `guix` package is no longer present in recent Debian or Ubuntu +repositories. Any other installation option mentioned in this document may be +used. -To install: +If you previously installed `guix` via `apt`, you can remove it with: ```sh -sudo apt install guix +sudo apt purge guix ``` ### Arch Linux diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign index ec8fbc0cf96b..791b75c540bc 100755 --- a/contrib/guix/guix-codesign +++ b/contrib/guix/guix-codesign @@ -289,7 +289,7 @@ INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}: EOF - # Run the build script 'contrib/guix/libexec/build.sh' in the build + # Run the build script 'contrib/guix/libexec/codesign.sh' in the build # container specified by 'contrib/guix/manifest.scm'. # # Explanation of `guix shell` flags: diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 1eaa86aeb3ee..072e5b91ba61 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -4,6 +4,9 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C set -e -o pipefail + +# Environment variables for determinism +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" export TZ=UTC # Although Guix _does_ set umask when building its own packages (in our case, @@ -157,10 +160,6 @@ case "$HOST" in ;; esac -# Environment variables for determinism -export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" -export TZ="UTC" - #################### # Depends Building # #################### @@ -401,12 +400,14 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) ( + tmp="$(mktemp)" cd /outdir-base { echo "$GIT_ARCHIVE" find "$ACTUAL_OUTDIR" -type f } | xargs realpath --relative-base="$PWD" \ - | xargs sha256sum \ - | sort -k2 \ - | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part + | xargs sha256sum \ + | sort -k2 \ + > "$tmp"; + mv "$tmp" "$ACTUAL_OUTDIR"/SHA256SUMS.part ) diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh index 9ea683b9fa9c..9b7f085d3ab9 100755 --- a/contrib/guix/libexec/codesign.sh +++ b/contrib/guix/libexec/codesign.sh @@ -141,6 +141,7 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) ( + tmp="$(mktemp)" cd /outdir-base { echo "$CODESIGNING_TARBALL" @@ -149,5 +150,6 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ } | xargs realpath --relative-base="$PWD" \ | xargs sha256sum \ | sort -k2 \ - | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part + > "$tmp"; + mv "$tmp" "$ACTUAL_OUTDIR"/SHA256SUMS.part ) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index ea1ffe5d983e..9cadd410537c 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -521,36 +521,6 @@ inspecting signatures in Mach-O binaries.") (("^install-others =.*$") (string-append "install-others = " out "/etc/rpc\n"))))))))))))) -;; The sponge tool from moreutils. -(define-public sponge - (package - (name "sponge") - (version "0.69") - (source (origin - (method url-fetch) - (uri (string-append - "https://git.joeyh.name/index.cgi/moreutils.git/snapshot/ - moreutils-" version ".tar.gz")) - (file-name (string-append "moreutils-" version ".tar.gz")) - (sha256 - (base32 - "1l859qnzccslvxlh5ghn863bkq2vgmqgnik6jr21b9kc6ljmsy8g")))) - (build-system gnu-build-system) - (arguments - (list #:phases - #~(modify-phases %standard-phases - (delete 'configure) - (replace 'install - (lambda* (#:key outputs #:allow-other-keys) - (let ((bin (string-append (assoc-ref outputs "out") "/bin"))) - (install-file "sponge" bin))))) - #:make-flags - #~(list "sponge" (string-append "CC=" #$(cc-for-target))))) - (home-page "https://joeyh.name/code/moreutils/") - (synopsis "Miscellaneous general-purpose command-line tools") - (description "Just sponge") - (license license:gpl2+))) - (packages->manifest (append (list ;; The Basics @@ -565,7 +535,6 @@ inspecting signatures in Mach-O binaries.") patch gawk sed - sponge ;; Compression and archiving tar gzip diff --git a/doc/asmap-data.md b/doc/asmap-data.md new file mode 100644 index 000000000000..09e2f95c9749 --- /dev/null +++ b/doc/asmap-data.md @@ -0,0 +1,59 @@ +# Embedded ASMap data + +## Background + +The ASMap feature (available via `-asmap`) makes it possible to use a peer's AS Number (ASN), an ISP/hoster identifier, +in netgroup bucketing in order to ensure a higher diversity in the peer +set. When not using this, the default behavior is to have the buckets formed +based on IP prefixes but this does not +prevent having connections dominated by peers at the same large-scale hoster, +for example, since such companies usually control many diverse IP ranges. +In order to use ASMap, the mapping between IP prefixes and AS Numbers needs +to be available. This mapping data can be provided through an external file +but Bitcoin Core also embeds a default map in its builds to make the feature +available to users when they are unable to provide a file. + +## Data sourcing and tools + +ASMap is a mapping of IP prefix to ASN, essentially a snapshot of the +internet routing table at some point in time. Due to the high volatility +of parts of this routing table and the known vulnerabilities in the BGP +protocol it is challenging to collect this data and prove its consistency. +Sourcing the data from a single trusted source is problematic as well. + +The [Kartograf](https://github.com/asmap/kartograf) tool was created to +deal with these uncertainties as good as possible. The mapping data is sourced from RPKI, IRR and +Routeviews. The former two are themselves used as security mechanisms to +protect against BGP security issues, which is why they are considered more secure and +their data takes precedence. The latter is a trusted collector of BGP traffic +and only used for IP space that is not covered by RPKI and IRR. + +The process in which the Kartograf project parses, processes and merges these +data sources is deterministic. Given the raw download files from these +different sources, anyone can build their own map file and verify the content +matches with other users' results. Before the map is usable by Bitcoin Core +it needs to be encoded as well. This is done using `asmap-tool.py` in `contrib/asmap` +and this step is deterministic as well. + +When it comes to obtaining the initial input data, the high volatility remains +a challenge if users don't want to trust a single creator of the used ASMap file. +To overcome this, multiple users can start the download process at the exact +same time which leads to a high likelihood that their downloaded data will be +similar enough that they receive the same output at the end of the process. +This process is regularly coordinated at the [asmap-data](https://github.com/asmap/asmap-data) +project. If enough participants have joined the effort (5 or more is recommended) and a majority of the +participants have received the same result, the resulting ASMap file is added +to the repository for public use. Files will not be merged to the repository +without at least two additional reviewers confirming that the process described +above was followed as expected and that the encoding step yielded the same +file hash. New files are created on an ongoing basis but without any central planning +or an explicit schedule. + +## Release process + +As an upcoming release approaches the embedded ASMap data should be updated +by replacing the `ip_asn.dat` with a newer ASMap file from the asmap-data +repository so that its data is embedded in the release. Ideally, there may be a file +already created recently that can be selected for an upcoming release. Alternatively, +a new creation process can be initiated with the goal of obtaining a fresh map +for use in the upcoming release. diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 97ffc0cd1b69..31ffd370c2f9 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -384,13 +384,13 @@ other input. Valgrind is a programming tool for memory debugging, memory leak detection, and profiling. The repo contains a Valgrind suppressions file -([`valgrind.supp`](https://github.com/bitcoin/bitcoin/blob/master/contrib/valgrind.supp)) +([`valgrind.supp`](/test/sanitizer_suppressions/valgrind.supp)) which includes known Valgrind warnings in our dependencies that cannot be fixed in-tree. Example use: ```shell -$ valgrind --suppressions=contrib/valgrind.supp build/bin/test_bitcoin -$ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ +$ valgrind --suppressions=test/sanitizer_suppressions/valgrind.supp build/bin/test_bitcoin +$ valgrind --suppressions=test/sanitizer_suppressions/valgrind.supp --leak-check=full \ --show-leak-kinds=all build/bin/test_bitcoin --log_level=test_suite $ valgrind -v --leak-check=full build/bin/bitcoind -printtoconsole $ ./build/test/functional/test_runner.py --valgrind diff --git a/doc/files.md b/doc/files.md index 12c6cefbc6d6..27fe5615dddc 100644 --- a/doc/files.md +++ b/doc/files.md @@ -55,6 +55,7 @@ Subdirectory | File(s) | Description `blocks/` | `xor.dat` | Rolling XOR pattern for block and undo data files `chainstate/` | LevelDB database | Blockchain state (a compact representation of all currently unspent transaction outputs (UTXOs) and metadata about the transactions they are from) `indexes/txindex/` | LevelDB database | Transaction index; *optional*, used if `-txindex=1` +`indexes/txospenderindex/` | LevelDB database | Transaction spender index; *optional*, used if `-txospenderindex=1` `indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `indexes/blockfilter/basic/` | `fltrNNNNN.dat`[\[2\]](#note2) | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `indexes/coinstatsindex/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1` diff --git a/doc/productivity.md b/doc/productivity.md index 4509e23cde1f..c436811a0a86 100644 --- a/doc/productivity.md +++ b/doc/productivity.md @@ -191,7 +191,12 @@ Then a simple `git pr 12345` will fetch and check out that pr from upstream. ### Diff the diffs with `git range-diff` -It is very common for contributors to rebase their pull requests, or make changes to commits (perhaps in response to review) that are not at the head of their branch. This poses a problem for reviewers as when the contributor force pushes, the reviewer is no longer sure that his previous reviews of commits are still valid (as the commit hashes can now be different even though the diff is semantically the same). [git range-diff](https://git-scm.com/docs/git-range-diff) (Git >= 2.19) can help solve this problem by diffing the diffs. +It is very common for contributors to rebase their pull requests, or make changes to commits (perhaps in response to review) that are not at the head of their branch. This poses a problem for reviewers as when the contributor force pushes, the reviewer is no longer sure that their previous reviews of commits are still valid (as the commit hashes can now be different even though the diff is semantically the same). [git range-diff](https://git-scm.com/docs/git-range-diff) (Git >= 2.19) can help solve this problem by diffing the diffs. + +> [!NOTE] +> If `git range-diff` cannot match a commit in the old range to a commit in the new range, it will show it as "removed" (`<`) and "added" (`>`), without showing the patch contents. +> This does not mean there were no code changes. +> It means the commit was considered unrelated, and should be reviewed in full like a new commit. For example, to identify the differences between your previously reviewed diffs P1-5, and the new diffs P1-2,N3-4 as illustrated below: ``` @@ -207,6 +212,12 @@ You can do: git range-diff master previously-reviewed-head new-head ``` +If you expected `git range-diff` to match a commit, but it shows it as a deletion and an addition, try re-running with a higher creation factor: + +```sh +git range-diff --creation-factor=95 +``` + Note that `git range-diff` also works for rebases: ``` diff --git a/doc/release-notes-24539.md b/doc/release-notes-24539.md new file mode 100644 index 000000000000..63b4d70eafd7 --- /dev/null +++ b/doc/release-notes-24539.md @@ -0,0 +1,14 @@ +New settings +------------ +- `-txospenderindex` enables the creation of a transaction output spender + index that, if present, will be scanned by `gettxspendingprevout` if a + spending transaction was not found in the mempool. + (#24539) + +Updated RPCs +------------ +- `gettxspendingprevout` has 2 new optional arguments: `mempool_only` and `return_spending_tx`. + If `mempool_only` is true it will limit scans to the mempool even if `txospenderindex` is available. + If `return_spending_tx` is true, the full spending tx will be returned. + In addition if `txospenderindex` is available and a confirmed spending transaction is found, + its block hash will be returned. (#24539) diff --git a/doc/release-notes-29415.md b/doc/release-notes-29415.md index d5040a3193d8..c0e0f3dc8881 100644 --- a/doc/release-notes-29415.md +++ b/doc/release-notes-29415.md @@ -12,3 +12,8 @@ P2P and network changes 2. If the originator sends two otherwise unrelated transactions, they will not be linkable. This is because a separate connection is used for broadcasting each transaction. (#29415) + +- New RPCs have been added to introspect and control private broadcast: + `getprivatebroadcastinfo` reports transactions currently being privately + broadcast, and `abortprivatebroadcast` removes matching + transactions from the private broadcast queue. diff --git a/doc/release-notes-32138.md b/doc/release-notes-32138.md new file mode 100644 index 000000000000..566998508806 --- /dev/null +++ b/doc/release-notes-32138.md @@ -0,0 +1,3 @@ +RPC and Startup Option +--- +The `-paytxfee` startup option and the `settxfee` RPC are now deleted after being deprecated in Bitcoin Core 30.0. They used to allow the user to set a static fee rate for wallet transactions, which could potentially lead to overpaying or underpaying. Users should instead rely on fee estimation or specify a fee rate per transaction using the `fee_rate` argument in RPCs such as `fundrawtransaction`, `sendtoaddress`, `send`, `sendall`, and `sendmany`. (#32138) diff --git a/doc/release-notes-33199.md b/doc/release-notes-33199.md new file mode 100644 index 000000000000..90246d782730 --- /dev/null +++ b/doc/release-notes-33199.md @@ -0,0 +1,9 @@ +Fee Estimation +======================== + +- The Bitcoin Core fee estimator minimum fee rate bucket was updated from **1 sat/vB** to **0.1 sat/vB**, + which matches the node’s default `minrelayfee`. + This means that for a given confirmation target, if a sub-1 sat/vB fee rate bucket is the minimum tracked + with sufficient data, its average value will be returned as the fee rate estimate. + +- Note: Restarting a node with this change invalidates previously saved estimates in `fee_estimates.dat`, the fee estimator will start tracking fresh stats. diff --git a/doc/release-notes-33819.md b/doc/release-notes-33819.md deleted file mode 100644 index 79ed1f707492..000000000000 --- a/doc/release-notes-33819.md +++ /dev/null @@ -1,8 +0,0 @@ -Mining IPC ----------- - -- The `getCoinbaseTx()` method is renamed to `getCoinbaseRawTx()` and deprecated. - IPC clients do not use the function name, so they're not affected. (#33819) -- Adds `getCoinbaseTx()` which clients should use instead of `getCoinbaseRawTx()`. It - contains all fields required to construct a coinbase transaction, and omits the - dummy output which Bitcoin Core uses internally. (#33819) diff --git a/doc/release-notes-34184.md b/doc/release-notes-34184.md new file mode 100644 index 000000000000..c582023eea2a --- /dev/null +++ b/doc/release-notes-34184.md @@ -0,0 +1,8 @@ +Mining IPC +---------- + +- `Mining.createNewBlock` now has a `cooldown` behavior (enabled by default) + that waits for IBD to finish and for the tip to catch up. This usually + prevents a flood of templates during startup, but is not guaranteed. (#34184) +- `Mining.interrupt()` can be used to interrupt `Mining.waitTipChanged` and + `Mining.createNewBlock`. (#34184) diff --git a/doc/release-notes-34512.md b/doc/release-notes-34512.md new file mode 100644 index 000000000000..b863448853f6 --- /dev/null +++ b/doc/release-notes-34512.md @@ -0,0 +1,8 @@ +Updated RPCs +------------ + +- The `getblock` RPC now returns a `coinbase_tx` object at verbosity levels 1, 2, + and 3. It contains `version`, `locktime`, `sequence`, `coinbase` and + `witness`. This allows for efficiently querying coinbase + transaction properties without fetching the full transaction data at + verbosity 2+. (#34512) diff --git a/doc/release-notes-34568.md b/doc/release-notes-34568.md new file mode 100644 index 000000000000..e48772330c1e --- /dev/null +++ b/doc/release-notes-34568.md @@ -0,0 +1,11 @@ +Mining IPC +---------- + +The IPC mining interface now requires mining clients to use the latest `mining.capnp` schema. Clients built against older schemas will fail when calling `Init.makeMining` and receive an RPC error indicating the old mining interface is no longer supported. Mining clients must update to the latest schema and regenerate bindings to continue working. (#34568) + +Notable IPC mining interface changes since the last release: +- `Mining.createNewBlock` and `Mining.checkBlock` now require a `context` parameter. +- `Mining.waitTipChanged` now has a default `timeout` (effectively infinite / `maxDouble`) if the client omits it. +- `BlockTemplate.getCoinbaseTx()` now returns a structured `CoinbaseTx` instead of raw bytes. +- Removed `BlockTemplate.getCoinbaseCommitment()` and `BlockTemplate.getWitnessCommitmentIndex()`. +- Cap’n Proto default values were updated to match the corresponding C++ defaults for mining-related option structs (e.g. `BlockCreateOptions`, `BlockWaitOptions`, `BlockCheckOptions`). diff --git a/doc/release-process.md b/doc/release-process.md index 272f36eadcfc..90ffd852e1a2 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -30,6 +30,7 @@ Release Process * Update translations see [translation_process.md](/doc/translation_process.md#synchronising-translations). * Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/27488) for an example. +* Update embedded asmap data at `/src/node/data/ip_asn.dat`, see [asmap data documentation](./asmap-data.md). * Update the following variables in [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp) for mainnet, testnet, and signet: - `m_assumed_blockchain_size` and `m_assumed_chain_state_size` with the current size plus some overhead (see [this](#how-to-calculate-assumed-blockchain-and-chain-state-size) for information on how to calculate them). diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cf1f26c9f24c..ad18115bbc5f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -193,6 +193,7 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL index/blockfilterindex.cpp index/coinstatsindex.cpp index/txindex.cpp + index/txospenderindex.cpp init.cpp kernel/chain.cpp kernel/checks.cpp @@ -286,6 +287,13 @@ target_link_libraries(bitcoin_node $ $ ) +if(WITH_EMBEDDED_ASMAP) + target_compile_definitions(bitcoin_node PRIVATE ENABLE_EMBEDDED_ASMAP=1) + include(TargetDataSources) + target_raw_data_sources(bitcoin_node NAMESPACE node::data + node/data/ip_asn.dat + ) +endif() # Bitcoin wrapper executable that can call other executables. if(BUILD_BITCOIN_BIN) diff --git a/src/bench/cluster_linearize.cpp b/src/bench/cluster_linearize.cpp index d345a7cad1d9..0799cc2c2e9d 100644 --- a/src/bench/cluster_linearize.cpp +++ b/src/bench/cluster_linearize.cpp @@ -55,7 +55,7 @@ void BenchLinearizeOptimallyTotal(benchmark::Bench& bench, const std::string& na // Benchmark the total time to optimal. uint64_t rng_seed = 0; bench.name(bench_name).run([&] { - auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++, IndexTxOrder{}); + auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_cost=*/10000000, rng_seed++, IndexTxOrder{}); assert(optimal); }); } @@ -72,7 +72,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string& // Determine the cost of 100 rng_seeds. uint64_t total_cost = 0; for (uint64_t iter = 0; iter < 100; ++iter) { - auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); + auto [_lin, optimal, cost] = Linearize(depgraph, /*max_cost=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); total_cost += cost; } @@ -80,7 +80,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string& bench.name(bench_name).unit("cost").batch(total_cost).run([&] { uint64_t recompute_cost = 0; for (uint64_t iter = 0; iter < 100; ++iter) { - auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); + auto [_lin, optimal, cost] = Linearize(depgraph, /*max_cost=*/10000000, /*rng_seed=*/iter, IndexTxOrder{}); assert(optimal); recompute_cost += cost; } diff --git a/src/bench/txgraph.cpp b/src/bench/txgraph.cpp index 00074db658fb..c56a284aa25d 100644 --- a/src/bench/txgraph.cpp +++ b/src/bench/txgraph.cpp @@ -51,9 +51,9 @@ void BenchTxGraphTrim(benchmark::Bench& bench) static constexpr int NUM_DEPS_PER_BOTTOM_TX = 100; /** Set a very large cluster size limit so that only the count limit is triggered. */ static constexpr int32_t MAX_CLUSTER_SIZE = 100'000 * 100; - /** Set a very high number for acceptable iterations, so that we certainly benchmark optimal + /** Set a very high number for acceptable cost, so that we certainly benchmark optimal * linearization. */ - static constexpr uint64_t NUM_ACCEPTABLE_ITERS = 100'000'000; + static constexpr uint64_t HIGH_ACCEPTABLE_COST = 100'000'000; /** Refs to all top transactions. */ std::vector top_refs; @@ -65,7 +65,7 @@ void BenchTxGraphTrim(benchmark::Bench& bench) std::vector top_components; InsecureRandomContext rng(11); - auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, NUM_ACCEPTABLE_ITERS, PointerComparator); + auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, HIGH_ACCEPTABLE_COST, PointerComparator); // Construct the top chains. for (int chain = 0; chain < NUM_TOP_CHAINS; ++chain) { diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 724620aa7306..909ed09faaca 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -367,7 +367,6 @@ struct GetinfoRequestHandler : BaseRequestHandler { if (!batch[ID_WALLETINFO]["result"]["unlocked_until"].isNull()) { result.pushKV("unlocked_until", batch[ID_WALLETINFO]["result"]["unlocked_until"]); } - result.pushKV("paytxfee", batch[ID_WALLETINFO]["result"]["paytxfee"]); } if (!batch[ID_BALANCES]["result"].isNull()) { result.pushKV("balance", batch[ID_BALANCES]["result"]["mine"]["trusted"]); @@ -1152,7 +1151,6 @@ static void ParseGetInfoResult(UniValue& result) if (!result["unlocked_until"].isNull()) { result_string += strprintf("Unlocked until: %s\n", result["unlocked_until"].getValStr()); } - result_string += strprintf("Transaction fee rate (-paytxfee) (%s/kvB): %s\n\n", CURRENCY_UNIT, result["paytxfee"].getValStr()); } if (!result["balance"].isNull()) { result_string += strprintf("%sBalance:%s %s\n\n", CYAN, RESET, result["balance"].getValStr()); diff --git a/src/chain.h b/src/chain.h index 7b65c76d7b46..c27829208c4a 100644 --- a/src/chain.h +++ b/src/chain.h @@ -77,8 +77,7 @@ enum BlockStatus : uint32_t { BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO, BLOCK_FAILED_VALID = 32, //!< stage after last reached validness failed - BLOCK_FAILED_CHILD = 64, //!< descends from failed block - BLOCK_FAILED_MASK = BLOCK_FAILED_VALID | BLOCK_FAILED_CHILD, + BLOCK_FAILED_CHILD = 64, //!< Unused flag that was previously set when descending from failed block BLOCK_OPT_WITNESS = 128, //!< block data in blk*.dat was received with a witness-enforcing client @@ -253,7 +252,7 @@ class CBlockIndex { AssertLockHeld(::cs_main); assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed. - if (nStatus & BLOCK_FAILED_MASK) + if (nStatus & BLOCK_FAILED_VALID) return false; return ((nStatus & BLOCK_VALID_MASK) >= nUpTo); } @@ -264,7 +263,7 @@ class CBlockIndex { AssertLockHeld(::cs_main); assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed. - if (nStatus & BLOCK_FAILED_MASK) return false; + if (nStatus & BLOCK_FAILED_VALID) return false; if ((nStatus & BLOCK_VALID_MASK) < nUpTo) { nStatus = (nStatus & ~BLOCK_VALID_MASK) | nUpTo; diff --git a/src/cluster_linearize.h b/src/cluster_linearize.h index 00627d6f1672..774bc61734f0 100644 --- a/src/cluster_linearize.h +++ b/src/cluster_linearize.h @@ -472,6 +472,77 @@ concept StrongComparator = * Linearize(), which just sorts by DepGraphIndex. */ using IndexTxOrder = std::compare_three_way; +/** A default cost model for SFL for SetType=BitSet<64>, based on benchmarks. + * + * The numbers here were obtained in February 2026 by: + * - For a variety of machines: + * - Running a fixed collection of ~385000 clusters found through random generation and fuzzing, + * optimizing for difficulty of linearization. + * - Linearize each ~3000 times, with different random seeds. Sometimes without input + * linearization, sometimes with a bad one. + * - Gather cycle counts for each of the operations included in this cost model, + * broken down by their parameters. + * - Correct the data by subtracting the runtime of obtaining the cycle count. + * - Drop the 5% top and bottom samples from each cycle count dataset, and compute the average + * of the remaining samples. + * - For each operation, fit a least-squares linear function approximation through the samples. + * - Rescale all machine expressions to make their total time match, as we only care about + * relative cost of each operation. + * - Take the per-operation average of operation expressions across all machines, to construct + * expressions for an average machine. + * - Approximate the result with integer coefficients. Each cost unit corresponds to somewhere + * between 0.5 ns and 2.5 ns, depending on the hardware. + */ +class SFLDefaultCostModel +{ + uint64_t m_cost{0}; + +public: + inline void InitializeBegin() noexcept {} + inline void InitializeEnd(int num_txns, int num_deps) noexcept + { + // Cost of initialization. + m_cost += 39 * num_txns; + // Cost of producing linearization at the end. + m_cost += 48 * num_txns + 4 * num_deps; + } + inline void GetLinearizationBegin() noexcept {} + inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept + { + // Note that we account for the cost of the final linearization at the beginning (see + // InitializeEnd), because the cost budget decision needs to be made before calling + // GetLinearization. + // This function exists here to allow overriding it easily for benchmark purposes. + } + inline void MakeTopologicalBegin() noexcept {} + inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept + { + m_cost += 20 * num_chunks + 28 * num_steps; + } + inline void StartOptimizingBegin() noexcept {} + inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; } + inline void ActivateBegin() noexcept {} + inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; } + inline void DeactivateBegin() noexcept {} + inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; } + inline void MergeChunksBegin() noexcept {} + inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; } + inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; } + inline void PickMergeCandidateBegin() noexcept {} + inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; } + inline void PickChunkToOptimizeBegin() noexcept {} + inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; } + inline void PickDependencyToSplitBegin() noexcept {} + inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; } + inline void StartMinimizingBegin() noexcept {} + inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; } + inline void MinimizeStepBegin() noexcept {} + inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; } + inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; } + + inline uint64_t GetCost() const noexcept { return m_cost; } +}; + /** Class to represent the internal state of the spanning-forest linearization (SFL) algorithm. * * At all times, each dependency is marked as either "active" or "inactive". The subset of active @@ -631,6 +702,11 @@ using IndexTxOrder = std::compare_three_way; * - Inside the selected chunk (see above), among the dependencies whose top feerate is strictly * higher than its bottom feerate in the selected chunk, if any, a uniformly random dependency * is deactivated. + * - After every split, it is possible that the top and the bottom chunk merge with each other + * again in the merge sequence (through a top->bottom dependency, not through the deactivated + * one, which was bottom->top). Call this a self-merge. If a self-merge does not occur after + * a split, the resulting linearization is strictly improved (the area under the convexified + * feerate diagram increases by at least gain/2), while self-merges do not change it. * * - How to decide the exact output linearization: * - When there are multiple equal-feerate chunks with no dependencies between them, output a @@ -638,7 +714,7 @@ using IndexTxOrder = std::compare_three_way; * - Within chunks, repeatedly pick a uniformly random transaction among those with no missing * dependencies. */ -template +template class SpanningForestState { private: @@ -647,60 +723,64 @@ class SpanningForestState /** Data type to represent indexing into m_tx_data. */ using TxIdx = DepGraphIndex; - /** Data type to represent indexing into m_dep_data. */ - using DepIdx = uint32_t; - - /** Structure with information about a single transaction. For transactions that are the - * representative for the chunk they are in, this also stores chunk information. */ + /** Data type to represent indexing into m_set_info. Use the smallest type possible to improve + * cache locality. */ + using SetIdx = std::conditional_t<(SetType::Size() <= 0xff), + uint8_t, + std::conditional_t<(SetType::Size() <= 0xffff), + uint16_t, + uint32_t>>; + /** An invalid SetIdx. */ + static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1); + + /** Structure with information about a single transaction. */ struct TxData { - /** The dependencies to children of this transaction. Immutable after construction. */ - std::vector child_deps; + /** The top set for every active child dependency this transaction has, indexed by child + * TxIdx. Only defined for indexes in active_children. */ + std::array dep_top_idx; /** The set of parent transactions of this transaction. Immutable after construction. */ SetType parents; /** The set of child transactions of this transaction. Immutable after construction. */ SetType children; - /** Which transaction holds the chunk_setinfo for the chunk this transaction is in - * (the representative for the chunk). */ - TxIdx chunk_rep; - /** (Only if this transaction is the representative for the chunk it is in) the total - * chunk set and feerate. */ - SetInfo chunk_setinfo; - }; - - /** Structure with information about a single dependency. */ - struct DepData { - /** Whether this dependency is active. */ - bool active; - /** What the parent and child transactions are. Immutable after construction. */ - TxIdx parent, child; - /** (Only if this dependency is active) the would-be top chunk and its feerate that would - * be formed if this dependency were to be deactivated. */ - SetInfo top_setinfo; + /** The set of child transactions reachable through an active dependency. */ + SetType active_children; + /** Which chunk this transaction belongs to. */ + SetIdx chunk_idx; }; /** The set of all TxIdx's of transactions in the cluster indexing into m_tx_data. */ SetType m_transaction_idxs; + /** The set of all chunk SetIdx's. This excludes the SetIdxs that refer to active + * dependencies' tops. */ + SetType m_chunk_idxs; + /** The set of all SetIdx's that appear in m_suboptimal_chunks. Note that they do not need to + * be chunks: some of these sets may have been converted to a dependency's top set since being + * added to m_suboptimal_chunks. */ + SetType m_suboptimal_idxs; /** Information about each transaction (and chunks). Keeps the "holes" from DepGraph during * construction. Indexed by TxIdx. */ std::vector m_tx_data; - /** Information about each dependency. Indexed by DepIdx. */ - std::vector m_dep_data; - /** A FIFO of chunk representatives of chunks that may be improved still. */ - VecDeque m_suboptimal_chunks; - /** A FIFO of chunk representatives with a pivot transaction in them, and a flag to indicate - * their status: + /** Information about each set (chunk, or active dependency top set). Indexed by SetIdx. */ + std::vector> m_set_info; + /** For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions, in the + * upwards (.first) and downwards (.second) direction. */ + std::vector> m_reachable; + /** A FIFO of chunk SetIdxs for chunks that may be improved still. */ + VecDeque m_suboptimal_chunks; + /** A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their + * status: * - bit 1: currently attempting to move the pivot down, rather than up. * - bit 2: this is the second stage, so we have already tried moving the pivot in the other * direction. */ - VecDeque> m_nonminimal_chunks; - - /** The number of updated transactions in activations/deactivations. */ - uint64_t m_cost{0}; + VecDeque> m_nonminimal_chunks; /** The DepGraph we are trying to linearize. */ const DepGraph& m_depgraph; + /** Accounting for the cost of this computation. */ + CostModel m_cost; + /** Pick a random transaction within a set (which must be non-empty). */ TxIdx PickRandomTx(const SetType& tx_idxs) noexcept { @@ -714,58 +794,40 @@ class SpanningForestState return TxIdx(-1); } - /** Update a chunk: - * - All transactions have their chunk representative set to `chunk_rep`. - * - All dependencies which have `query` in their top_setinfo get `dep_change` added to it - * (if `!Subtract`) or removed from it (if `Subtract`). - */ - template - void UpdateChunk(const SetType& chunk, TxIdx query, TxIdx chunk_rep, const SetInfo& dep_change) noexcept + /** Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and + * downwards direction. Only used by SanityCheck to verify the precomputed reachable sets in + * m_reachable that are maintained by Activate/Deactivate. */ + std::pair GetReachable(const SetType& tx_idxs) const noexcept { - // Iterate over all the chunk's transactions. - for (auto tx_idx : chunk) { - auto& tx_data = m_tx_data[tx_idx]; - // Update the chunk representative. - tx_data.chunk_rep = chunk_rep; - // Iterate over all active dependencies with tx_idx as parent. Combined with the outer - // loop this iterates over all internal active dependencies of the chunk. - auto child_deps = std::span{tx_data.child_deps}; - for (auto dep_idx : child_deps) { - auto& dep_entry = m_dep_data[dep_idx]; - Assume(dep_entry.parent == tx_idx); - // Skip inactive dependencies. - if (!dep_entry.active) continue; - // If this dependency's top_setinfo contains query, update it to add/remove - // dep_change. - if (dep_entry.top_setinfo.transactions[query]) { - if constexpr (Subtract) { - dep_entry.top_setinfo -= dep_change; - } else { - dep_entry.top_setinfo |= dep_change; - } - } - } + SetType parents, children; + for (auto tx_idx : tx_idxs) { + const auto& tx_data = m_tx_data[tx_idx]; + parents |= tx_data.parents; + children |= tx_data.children; } + return {parents - tx_idxs, children - tx_idxs}; } - /** Make a specified inactive dependency active. Returns the merged chunk representative. */ - TxIdx Activate(DepIdx dep_idx) noexcept + /** Make the inactive dependency from child to parent, which must not be in the same chunk + * already, active. Returns the merged chunk idx. */ + SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(!dep_data.active); - auto& child_tx_data = m_tx_data[dep_data.child]; - auto& parent_tx_data = m_tx_data[dep_data.parent]; - - // Gather information about the parent and child chunks. - Assume(parent_tx_data.chunk_rep != child_tx_data.chunk_rep); - auto& par_chunk_data = m_tx_data[parent_tx_data.chunk_rep]; - auto& chl_chunk_data = m_tx_data[child_tx_data.chunk_rep]; - TxIdx top_rep = parent_tx_data.chunk_rep; - auto top_part = par_chunk_data.chunk_setinfo; - auto bottom_part = chl_chunk_data.chunk_setinfo; - // Update the parent chunk to also contain the child. - par_chunk_data.chunk_setinfo |= bottom_part; - m_cost += par_chunk_data.chunk_setinfo.transactions.Count(); + m_cost.ActivateBegin(); + // Gather and check information about the parent and child transactions. + auto& parent_data = m_tx_data[parent_idx]; + auto& child_data = m_tx_data[child_idx]; + Assume(parent_data.children[child_idx]); + Assume(!parent_data.active_children[child_idx]); + // Get the set index of the chunks the parent and child are currently in. The parent chunk + // will become the top set of the newly activated dependency, while the child chunk will be + // grown to become the merged chunk. + auto parent_chunk_idx = parent_data.chunk_idx; + auto child_chunk_idx = child_data.chunk_idx; + Assume(parent_chunk_idx != child_chunk_idx); + Assume(m_chunk_idxs[parent_chunk_idx]); + Assume(m_chunk_idxs[child_chunk_idx]); + auto& top_info = m_set_info[parent_chunk_idx]; + auto& bottom_info = m_set_info[child_chunk_idx]; // Consider the following example: // @@ -782,234 +844,364 @@ class SpanningForestState // dependency being activated (E->C here) in its top set, will have the opposite part added // to it. This is true for B->A and F->E, but not for C->A and F->D. // - // Let UpdateChunk traverse the old parent chunk top_part (ABC in example), and add - // bottom_part (DEF) to every dependency's top_set which has the parent (C) in it. The - // representative of each of these transactions was already top_rep, so that is not being - // changed here. - UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent, - /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part); - // Let UpdateChunk traverse the old child chunk bottom_part (DEF in example), and add - // top_part (ABC) to every dependency's top_set which has the child (E) in it. At the same - // time, change the representative of each of these transactions to be top_rep, which - // becomes the representative for the merged chunk. - UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child, - /*chunk_rep=*/top_rep, /*dep_change=*/top_part); - // Make active. - dep_data.active = true; - dep_data.top_setinfo = top_part; - return top_rep; + // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to + // every dependency's top set which has the parent (C) in it. At the same time, change the + // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk. + for (auto tx_idx : top_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + tx_data.chunk_idx = child_chunk_idx; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info; + } + } + // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to + // every dependency's top set which has the child (E) in it. + for (auto tx_idx : bottom_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info; + } + } + // Merge top_info into bottom_info, which becomes the merged chunk. + bottom_info |= top_info; + // Compute merged sets of reachable transactions from the new chunk, based on the input + // chunks' reachable sets. + m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first; + m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second; + m_reachable[child_chunk_idx].first -= bottom_info.transactions; + m_reachable[child_chunk_idx].second -= bottom_info.transactions; + // Make parent chunk the set for the new active dependency. + parent_data.dep_top_idx[child_idx] = parent_chunk_idx; + parent_data.active_children.Set(child_idx); + m_chunk_idxs.Reset(parent_chunk_idx); + // Return the newly merged chunk. + m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1); + return child_chunk_idx; } - /** Make a specified active dependency inactive. */ - void Deactivate(DepIdx dep_idx) noexcept + /** Make a specified active dependency inactive. Returns the created parent and child chunk + * indexes. */ + std::pair Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(dep_data.active); - auto& parent_tx_data = m_tx_data[dep_data.parent]; - // Make inactive. - dep_data.active = false; - // Update representatives. - auto& chunk_data = m_tx_data[parent_tx_data.chunk_rep]; - m_cost += chunk_data.chunk_setinfo.transactions.Count(); - auto top_part = dep_data.top_setinfo; - auto bottom_part = chunk_data.chunk_setinfo - top_part; - TxIdx bottom_rep = dep_data.child; - auto& bottom_chunk_data = m_tx_data[bottom_rep]; - bottom_chunk_data.chunk_setinfo = bottom_part; - TxIdx top_rep = dep_data.parent; - auto& top_chunk_data = m_tx_data[top_rep]; - top_chunk_data.chunk_setinfo = top_part; - - // See the comment above in Activate(). We perform the opposite operations here, - // removing instead of adding. - // - // Let UpdateChunk traverse the old parent chunk top_part, and remove bottom_part from - // every dependency's top_set which has the parent in it. At the same time, change the - // representative of each of these transactions to be top_rep. - UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent, - /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part); - // Let UpdateChunk traverse the old child chunk bottom_part, and remove top_part from every - // dependency's top_set which has the child in it. At the same time, change the - // representative of each of these transactions to be bottom_rep. - UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child, - /*chunk_rep=*/bottom_rep, /*dep_change=*/top_part); + m_cost.DeactivateBegin(); + // Gather and check information about the parent transactions. + auto& parent_data = m_tx_data[parent_idx]; + Assume(parent_data.children[child_idx]); + Assume(parent_data.active_children[child_idx]); + // Get the top set of the active dependency (which will become the parent chunk) and the + // chunk set the transactions are currently in (which will become the bottom chunk). + auto parent_chunk_idx = parent_data.dep_top_idx[child_idx]; + auto child_chunk_idx = parent_data.chunk_idx; + Assume(parent_chunk_idx != child_chunk_idx); + Assume(m_chunk_idxs[child_chunk_idx]); + Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk + auto& top_info = m_set_info[parent_chunk_idx]; + auto& bottom_info = m_set_info[child_chunk_idx]; + + // Remove the active dependency. + parent_data.active_children.Reset(child_idx); + m_chunk_idxs.Set(parent_chunk_idx); + auto ntx = bottom_info.transactions.Count(); + // Subtract the top_info from the bottom_info, as it will become the child chunk. + bottom_info -= top_info; + // See the comment above in Activate(). We perform the opposite operations here, removing + // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children. + SetType top_parents, top_children; + for (auto tx_idx : top_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + tx_data.chunk_idx = parent_chunk_idx; + top_parents |= tx_data.parents; + top_children |= tx_data.children; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info; + } + } + SetType bottom_parents, bottom_children; + for (auto tx_idx : bottom_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + bottom_parents |= tx_data.parents; + bottom_children |= tx_data.children; + for (auto dep_child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]]; + if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info; + } + } + // Compute the new sets of reachable transactions for each new chunk, based on the + // top/bottom parents and children computed above. + m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions; + m_reachable[parent_chunk_idx].second = top_children - top_info.transactions; + m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions; + m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions; + // Return the two new set idxs. + m_cost.DeactivateEnd(/*num_deps=*/ntx - 1); + return {parent_chunk_idx, child_chunk_idx}; } - /** Activate a dependency from the chunk represented by bottom_idx to the chunk represented by - * top_idx. Return the representative of the merged chunk, or TxIdx(-1) if no merge is - * possible. */ - TxIdx MergeChunks(TxIdx top_rep, TxIdx bottom_rep) noexcept + /** Activate a dependency from the bottom set to the top set, which must exist. Return the + * index of the merged chunk. */ + SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept { - auto& top_chunk = m_tx_data[top_rep]; - Assume(top_chunk.chunk_rep == top_rep); - auto& bottom_chunk = m_tx_data[bottom_rep]; - Assume(bottom_chunk.chunk_rep == bottom_rep); + m_cost.MergeChunksBegin(); + Assume(m_chunk_idxs[top_idx]); + Assume(m_chunk_idxs[bottom_idx]); + auto& top_chunk_info = m_set_info[top_idx]; + auto& bottom_chunk_info = m_set_info[bottom_idx]; // Count the number of dependencies between bottom_chunk and top_chunk. - TxIdx num_deps{0}; - for (auto tx : top_chunk.chunk_setinfo.transactions) { - auto& tx_data = m_tx_data[tx]; - num_deps += (tx_data.children & bottom_chunk.chunk_setinfo.transactions).Count(); + unsigned num_deps{0}; + for (auto tx_idx : top_chunk_info.transactions) { + auto& tx_data = m_tx_data[tx_idx]; + num_deps += (tx_data.children & bottom_chunk_info.transactions).Count(); } - if (num_deps == 0) return TxIdx(-1); + m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count()); + Assume(num_deps > 0); // Uniformly randomly pick one of them and activate it. - TxIdx pick = m_rng.randrange(num_deps); - for (auto tx : top_chunk.chunk_setinfo.transactions) { - auto& tx_data = m_tx_data[tx]; - auto intersect = tx_data.children & bottom_chunk.chunk_setinfo.transactions; + unsigned pick = m_rng.randrange(num_deps); + unsigned num_steps = 0; + for (auto tx_idx : top_chunk_info.transactions) { + ++num_steps; + auto& tx_data = m_tx_data[tx_idx]; + auto intersect = tx_data.children & bottom_chunk_info.transactions; auto count = intersect.Count(); if (pick < count) { - for (auto dep : tx_data.child_deps) { - auto& dep_data = m_dep_data[dep]; - if (bottom_chunk.chunk_setinfo.transactions[dep_data.child]) { - if (pick == 0) return Activate(dep); - --pick; + for (auto child_idx : intersect) { + if (pick == 0) { + m_cost.MergeChunksEnd(/*num_steps=*/num_steps); + return Activate(tx_idx, child_idx); } + --pick; } + Assume(false); break; } pick -= count; } Assume(false); - return TxIdx(-1); + return INVALID_SET_IDX; } - /** Perform an upward or downward merge step, on the specified chunk representative. Returns - * the representative of the merged chunk, or TxIdx(-1) if no merge took place. */ + /** Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency + * from merge_chunk_idx to chunk_idx (if DownWard). Return the index of the merged chunk. */ template - TxIdx MergeStep(TxIdx chunk_rep) noexcept + SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept { - /** Information about the chunk that tx_idx is currently in. */ - auto& chunk_data = m_tx_data[chunk_rep]; - SetType chunk_txn = chunk_data.chunk_setinfo.transactions; - // Iterate over all transactions in the chunk, figuring out which other chunk each - // depends on, but only testing each other chunk once. For those depended-on chunks, + if constexpr (DownWard) { + return MergeChunks(chunk_idx, merge_chunk_idx); + } else { + return MergeChunks(merge_chunk_idx, chunk_idx); + } + } + + /** Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none. */ + template + SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept + { + m_cost.PickMergeCandidateBegin(); + /** Information about the chunk. */ + Assume(m_chunk_idxs[chunk_idx]); + auto& chunk_info = m_set_info[chunk_idx]; + // Iterate over all chunks reachable from this one. For those depended-on chunks, // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one. // If multiple equal-feerate candidate chunks to merge with exist, pick a random one // among them. - /** Which transactions have been reached from this chunk already. Initialize with the - * chunk itself, so internal dependencies within the chunk are ignored. */ - SetType explored = chunk_txn; /** The minimum feerate (if downward) or maximum feerate (if upward) to consider when * looking for candidate chunks to merge with. Initially, this is the original chunk's * feerate, but is updated to be the current best candidate whenever one is found. */ - FeeFrac best_other_chunk_feerate = chunk_data.chunk_setinfo.feerate; - /** The representative for the best candidate chunk to merge with. -1 if none. */ - TxIdx best_other_chunk_rep = TxIdx(-1); + FeeFrac best_other_chunk_feerate = chunk_info.feerate; + /** The chunk index for the best candidate chunk to merge with. INVALID_SET_IDX if none. */ + SetIdx best_other_chunk_idx = INVALID_SET_IDX; /** We generate random tiebreak values to pick between equal-feerate candidate chunks. * This variable stores the tiebreak of the current best candidate. */ uint64_t best_other_chunk_tiebreak{0}; - for (auto tx : chunk_txn) { - auto& tx_data = m_tx_data[tx]; - /** The transactions reached by following dependencies from tx that have not been - * explored before. */ - auto newly_reached = (DownWard ? tx_data.children : tx_data.parents) - explored; - explored |= newly_reached; - while (newly_reached.Any()) { - // Find a chunk inside newly_reached, and remove it from newly_reached. - auto reached_chunk_rep = m_tx_data[newly_reached.First()].chunk_rep; - auto& reached_chunk = m_tx_data[reached_chunk_rep].chunk_setinfo; - newly_reached -= reached_chunk.transactions; - // See if it has an acceptable feerate. - auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk.feerate) - : FeeRateCompare(reached_chunk.feerate, best_other_chunk_feerate); - if (cmp > 0) continue; - uint64_t tiebreak = m_rng.rand64(); - if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) { - best_other_chunk_feerate = reached_chunk.feerate; - best_other_chunk_rep = reached_chunk_rep; - best_other_chunk_tiebreak = tiebreak; - } + + /** Which parent/child transactions we still need to process the chunks for. */ + auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first; + unsigned steps = 0; + while (todo.Any()) { + ++steps; + // Find a chunk for a transaction in todo, and remove all its transactions from todo. + auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx; + auto& reached_chunk_info = m_set_info[reached_chunk_idx]; + todo -= reached_chunk_info.transactions; + // See if it has an acceptable feerate. + auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk_info.feerate) + : FeeRateCompare(reached_chunk_info.feerate, best_other_chunk_feerate); + if (cmp > 0) continue; + uint64_t tiebreak = m_rng.rand64(); + if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) { + best_other_chunk_feerate = reached_chunk_info.feerate; + best_other_chunk_idx = reached_chunk_idx; + best_other_chunk_tiebreak = tiebreak; } } - // Stop if there are no candidate chunks to merge with. - if (best_other_chunk_rep == TxIdx(-1)) return TxIdx(-1); - if constexpr (DownWard) { - chunk_rep = MergeChunks(chunk_rep, best_other_chunk_rep); - } else { - chunk_rep = MergeChunks(best_other_chunk_rep, chunk_rep); - } - Assume(chunk_rep != TxIdx(-1)); - return chunk_rep; + Assume(steps <= m_set_info.size()); + + m_cost.PickMergeCandidateEnd(/*num_steps=*/steps); + return best_other_chunk_idx; } + /** Perform an upward or downward merge step, on the specified chunk. Returns the merged chunk, + * or INVALID_SET_IDX if no merge took place. */ + template + SetIdx MergeStep(SetIdx chunk_idx) noexcept + { + auto merge_chunk_idx = PickMergeCandidate(chunk_idx); + if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX; + chunk_idx = MergeChunksDirected(chunk_idx, merge_chunk_idx); + Assume(chunk_idx != INVALID_SET_IDX); + return chunk_idx; + } - /** Perform an upward or downward merge sequence on the specified transaction. */ + /** Perform an upward or downward merge sequence on the specified chunk. */ template - void MergeSequence(TxIdx tx_idx) noexcept + void MergeSequence(SetIdx chunk_idx) noexcept { - auto chunk_rep = m_tx_data[tx_idx].chunk_rep; + Assume(m_chunk_idxs[chunk_idx]); while (true) { - auto merged_rep = MergeStep(chunk_rep); - if (merged_rep == TxIdx(-1)) break; - chunk_rep = merged_rep; + auto merged_chunk_idx = MergeStep(chunk_idx); + if (merged_chunk_idx == INVALID_SET_IDX) break; + chunk_idx = merged_chunk_idx; + } + // Add the chunk to the queue of improvable chunks, if it wasn't already there. + if (!m_suboptimal_idxs[chunk_idx]) { + m_suboptimal_idxs.Set(chunk_idx); + m_suboptimal_chunks.push_back(chunk_idx); } - // Add the chunk to the queue of improvable chunks. - m_suboptimal_chunks.push_back(chunk_rep); } /** Split a chunk, and then merge the resulting two chunks to make the graph topological * again. */ - void Improve(DepIdx dep_idx) noexcept + void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept { - auto& dep_data = m_dep_data[dep_idx]; - Assume(dep_data.active); // Deactivate the specified dependency, splitting it into two new chunks: a top containing // the parent, and a bottom containing the child. The top should have a higher feerate. - Deactivate(dep_idx); + auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx); // At this point we have exactly two chunks which may violate topology constraints (the - // parent chunk and child chunk that were produced by deactivating dep_idx). We can fix + // parent chunk and child chunk that were produced by deactivation). We can fix // these using just merge sequences, one upwards and one downwards, avoiding the need for a // full MakeTopological. + const auto& parent_reachable = m_reachable[parent_chunk_idx].first; + const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions; + if (parent_reachable.Overlaps(child_chunk_txn)) { + // The parent chunk has a dependency on a transaction in the child chunk. In this case, + // the parent needs to merge back with the child chunk (a self-merge), and no other + // merges are needed. Special-case this, so the overhead of PickMergeCandidate and + // MergeSequence can be avoided. + + // In the self-merge, the roles reverse: the parent chunk (from the split) depends + // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the + // "bottom" for MergeChunks. + auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx); + if (!m_suboptimal_idxs[merged_chunk_idx]) { + m_suboptimal_idxs.Set(merged_chunk_idx); + m_suboptimal_chunks.push_back(merged_chunk_idx); + } + } else { + // Merge the top chunk with lower-feerate chunks it depends on. + MergeSequence(parent_chunk_idx); + // Merge the bottom chunk with higher-feerate chunks that depend on it. + MergeSequence(child_chunk_idx); + } + } - // Merge the top chunk with lower-feerate chunks it depends on (which may be the bottom it - // was just split from, or other pre-existing chunks). - MergeSequence(dep_data.parent); - // Merge the bottom chunk with higher-feerate chunks that depend on it. - MergeSequence(dep_data.child); + /** Determine the next chunk to optimize, or INVALID_SET_IDX if none. */ + SetIdx PickChunkToOptimize() noexcept + { + m_cost.PickChunkToOptimizeBegin(); + unsigned steps{0}; + while (!m_suboptimal_chunks.empty()) { + ++steps; + // Pop an entry from the potentially-suboptimal chunk queue. + SetIdx chunk_idx = m_suboptimal_chunks.front(); + Assume(m_suboptimal_idxs[chunk_idx]); + m_suboptimal_idxs.Reset(chunk_idx); + m_suboptimal_chunks.pop_front(); + if (m_chunk_idxs[chunk_idx]) { + m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps); + return chunk_idx; + } + // If what was popped is not currently a chunk, continue. This may + // happen when a split chunk merges in Improve() with one or more existing chunks that + // are themselves on the suboptimal queue already. + } + m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps); + return INVALID_SET_IDX; + } + + /** Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none. */ + std::pair PickDependencyToSplit(SetIdx chunk_idx) noexcept + { + m_cost.PickDependencyToSplitBegin(); + Assume(m_chunk_idxs[chunk_idx]); + auto& chunk_info = m_set_info[chunk_idx]; + + // Remember the best dependency {par, chl} seen so far. + std::pair candidate_dep = {TxIdx(-1), TxIdx(-1)}; + uint64_t candidate_tiebreak = 0; + // Iterate over all transactions. + for (auto tx_idx : chunk_info.transactions) { + const auto& tx_data = m_tx_data[tx_idx]; + // Iterate over all active child dependencies of the transaction. + for (auto child_idx : tx_data.active_children) { + auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]]; + // Skip if this dependency is ineligible (the top chunk that would be created + // does not have higher feerate than the chunk it is currently part of). + auto cmp = FeeRateCompare(dep_top_info.feerate, chunk_info.feerate); + if (cmp <= 0) continue; + // Generate a random tiebreak for this dependency, and reject it if its tiebreak + // is worse than the best so far. This means that among all eligible + // dependencies, a uniformly random one will be chosen. + uint64_t tiebreak = m_rng.rand64(); + if (tiebreak < candidate_tiebreak) continue; + // Remember this as our (new) candidate dependency. + candidate_dep = {tx_idx, child_idx}; + candidate_tiebreak = tiebreak; + } + } + m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count()); + return candidate_dep; } public: /** Construct a spanning forest for the given DepGraph, with every transaction in its own chunk * (not topological). */ - explicit SpanningForestState(const DepGraph& depgraph LIFETIMEBOUND, uint64_t rng_seed) noexcept : - m_rng(rng_seed), m_depgraph(depgraph) + explicit SpanningForestState(const DepGraph& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept : + m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost) { + m_cost.InitializeBegin(); m_transaction_idxs = depgraph.Positions(); auto num_transactions = m_transaction_idxs.Count(); m_tx_data.resize(depgraph.PositionRange()); - // Reserve the maximum number of (reserved) dependencies the cluster can have, so - // m_dep_data won't need any reallocations during construction. For a cluster with N - // transactions, the worst case consists of two sets of transactions, the parents and the - // children, where each child depends on each parent and nothing else. For even N, both - // sets can be sized N/2, which means N^2/4 dependencies. For odd N, one can be (N + 1)/2 - // and the other can be (N - 1)/2, meaning (N^2 - 1)/4 dependencies. Because N^2 is odd in - // this case, N^2/4 (with rounding-down division) is the correct value in both cases. - m_dep_data.reserve((num_transactions * num_transactions) / 4); - for (auto tx : m_transaction_idxs) { + m_set_info.resize(num_transactions); + m_reachable.resize(num_transactions); + size_t num_chunks = 0; + size_t num_deps = 0; + for (auto tx_idx : m_transaction_idxs) { // Fill in transaction data. - auto& tx_data = m_tx_data[tx]; - tx_data.chunk_rep = tx; - tx_data.chunk_setinfo.transactions = SetType::Singleton(tx); - tx_data.chunk_setinfo.feerate = depgraph.FeeRate(tx); - // Add its dependencies. - SetType parents = depgraph.GetReducedParents(tx); - for (auto par : parents) { - auto& par_tx_data = m_tx_data[par]; - auto dep_idx = m_dep_data.size(); - // Construct new dependency. - auto& dep = m_dep_data.emplace_back(); - dep.active = false; - dep.parent = par; - dep.child = tx; - // Add it as parent of the child. - tx_data.parents.Set(par); - // Add it as child of the parent. - par_tx_data.child_deps.push_back(dep_idx); - par_tx_data.children.Set(tx); + auto& tx_data = m_tx_data[tx_idx]; + tx_data.parents = depgraph.GetReducedParents(tx_idx); + for (auto parent_idx : tx_data.parents) { + m_tx_data[parent_idx].children.Set(tx_idx); } + num_deps += tx_data.parents.Count(); + // Create a singleton chunk for it. + tx_data.chunk_idx = num_chunks; + m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx); + } + // Set the reachable transactions for each chunk to the transactions' parents and children. + for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) { + auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()]; + m_reachable[chunk_idx].first = tx_data.parents; + m_reachable[chunk_idx].second = tx_data.children; } + Assume(num_chunks == num_transactions); + // Mark all chunk sets as chunks. + m_chunk_idxs = SetType::Fill(num_chunks); + m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps); } /** Load an existing linearization. Must be called immediately after constructor. The result is @@ -1018,12 +1210,12 @@ class SpanningForestState void LoadLinearization(std::span old_linearization) noexcept { // Add transactions one by one, in order of existing linearization. - for (DepGraphIndex tx : old_linearization) { - auto chunk_rep = m_tx_data[tx].chunk_rep; + for (DepGraphIndex tx_idx : old_linearization) { + auto chunk_idx = m_tx_data[tx_idx].chunk_idx; // Merge the chunk upwards, as long as merging succeeds. while (true) { - chunk_rep = MergeStep(chunk_rep); - if (chunk_rep == TxIdx(-1)) break; + chunk_idx = MergeStep(chunk_idx); + if (chunk_idx == INVALID_SET_IDX) break; } } } @@ -1031,131 +1223,129 @@ class SpanningForestState /** Make state topological. Can be called after constructing, or after LoadLinearization. */ void MakeTopological() noexcept { - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - m_suboptimal_chunks.emplace_back(tx); - // Randomize the initial order of suboptimal chunks in the queue. - TxIdx j = m_rng.randrange(m_suboptimal_chunks.size()); - if (j != m_suboptimal_chunks.size() - 1) { - std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); - } + m_cost.MakeTopologicalBegin(); + Assume(m_suboptimal_chunks.empty()); + /** What direction to initially merge chunks in; one of the two directions is enough. This + * is sufficient because if a non-topological inactive dependency exists between two + * chunks, at least one of the two chunks will eventually be processed in a direction that + * discovers it - either the lower chunk tries upward, or the upper chunk tries downward. + * Chunks that are the result of the merging are always tried in both directions. */ + unsigned init_dir = m_rng.randbool(); + /** Which chunks are the result of merging, and thus need merge attempts in both + * directions. */ + SetType merged_chunks; + // Mark chunks as suboptimal. + m_suboptimal_idxs = m_chunk_idxs; + for (auto chunk_idx : m_chunk_idxs) { + m_suboptimal_chunks.emplace_back(chunk_idx); + // Randomize the initial order of suboptimal chunks in the queue. + SetIdx j = m_rng.randrange(m_suboptimal_chunks.size()); + if (j != m_suboptimal_chunks.size() - 1) { + std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); } } + unsigned chunks = m_chunk_idxs.Count(); + unsigned steps = 0; while (!m_suboptimal_chunks.empty()) { + ++steps; // Pop an entry from the potentially-suboptimal chunk queue. - TxIdx chunk = m_suboptimal_chunks.front(); + SetIdx chunk_idx = m_suboptimal_chunks.front(); m_suboptimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk]; - // If what was popped is not currently a chunk representative, continue. This may + Assume(m_suboptimal_idxs[chunk_idx]); + m_suboptimal_idxs.Reset(chunk_idx); + // If what was popped is not currently a chunk, continue. This may // happen when it was merged with something else since being added. - if (chunk_data.chunk_rep != chunk) continue; + if (!m_chunk_idxs[chunk_idx]) continue; + /** What direction(s) to attempt merging in. 1=up, 2=down, 3=both. */ + unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1; int flip = m_rng.randbool(); for (int i = 0; i < 2; ++i) { if (i ^ flip) { + if (!(direction & 1)) continue; // Attempt to merge the chunk upwards. - auto result_up = MergeStep(chunk); - if (result_up != TxIdx(-1)) { - m_suboptimal_chunks.push_back(result_up); + auto result_up = MergeStep(chunk_idx); + if (result_up != INVALID_SET_IDX) { + if (!m_suboptimal_idxs[result_up]) { + m_suboptimal_idxs.Set(result_up); + m_suboptimal_chunks.push_back(result_up); + } + merged_chunks.Set(result_up); break; } } else { + if (!(direction & 2)) continue; // Attempt to merge the chunk downwards. - auto result_down = MergeStep(chunk); - if (result_down != TxIdx(-1)) { - m_suboptimal_chunks.push_back(result_down); + auto result_down = MergeStep(chunk_idx); + if (result_down != INVALID_SET_IDX) { + if (!m_suboptimal_idxs[result_down]) { + m_suboptimal_idxs.Set(result_down); + m_suboptimal_chunks.push_back(result_down); + } + merged_chunks.Set(result_down); break; } } } } + m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps); } /** Initialize the data structure for optimization. It must be topological already. */ void StartOptimizing() noexcept { + m_cost.StartOptimizingBegin(); + Assume(m_suboptimal_chunks.empty()); // Mark chunks suboptimal. - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - m_suboptimal_chunks.push_back(tx); - // Randomize the initial order of suboptimal chunks in the queue. - TxIdx j = m_rng.randrange(m_suboptimal_chunks.size()); - if (j != m_suboptimal_chunks.size() - 1) { - std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); - } + m_suboptimal_idxs = m_chunk_idxs; + for (auto chunk_idx : m_chunk_idxs) { + m_suboptimal_chunks.push_back(chunk_idx); + // Randomize the initial order of suboptimal chunks in the queue. + SetIdx j = m_rng.randrange(m_suboptimal_chunks.size()); + if (j != m_suboptimal_chunks.size() - 1) { + std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]); } } + m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size()); } /** Try to improve the forest. Returns false if it is optimal, true otherwise. */ bool OptimizeStep() noexcept { - while (!m_suboptimal_chunks.empty()) { - // Pop an entry from the potentially-suboptimal chunk queue. - TxIdx chunk = m_suboptimal_chunks.front(); - m_suboptimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk]; - // If what was popped is not currently a chunk representative, continue. This may - // happen when a split chunk merges in Improve() with one or more existing chunks that - // are themselves on the suboptimal queue already. - if (chunk_data.chunk_rep != chunk) continue; - // Remember the best dependency seen so far. - DepIdx candidate_dep = DepIdx(-1); - uint64_t candidate_tiebreak = 0; - // Iterate over all transactions. - for (auto tx : chunk_data.chunk_setinfo.transactions) { - const auto& tx_data = m_tx_data[tx]; - // Iterate over all active child dependencies of the transaction. - const auto children = std::span{tx_data.child_deps}; - for (DepIdx dep_idx : children) { - const auto& dep_data = m_dep_data[dep_idx]; - if (!dep_data.active) continue; - // Skip if this dependency is ineligible (the top chunk that would be created - // does not have higher feerate than the chunk it is currently part of). - auto cmp = FeeRateCompare(dep_data.top_setinfo.feerate, chunk_data.chunk_setinfo.feerate); - if (cmp <= 0) continue; - // Generate a random tiebreak for this dependency, and reject it if its tiebreak - // is worse than the best so far. This means that among all eligible - // dependencies, a uniformly random one will be chosen. - uint64_t tiebreak = m_rng.rand64(); - if (tiebreak < candidate_tiebreak) continue; - // Remember this as our (new) candidate dependency. - candidate_dep = dep_idx; - candidate_tiebreak = tiebreak; - } - } - // If a candidate with positive gain was found, deactivate it and then make the state - // topological again with a sequence of merges. - if (candidate_dep != DepIdx(-1)) Improve(candidate_dep); - // Stop processing for now, even if nothing was activated, as the loop above may have - // had a nontrivial cost. + auto chunk_idx = PickChunkToOptimize(); + if (chunk_idx == INVALID_SET_IDX) { + // No improvable chunk was found, we are done. + return false; + } + auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx); + if (parent_idx == TxIdx(-1)) { + // Nothing to improve in chunk_idx. Need to continue with other chunks, if any. return !m_suboptimal_chunks.empty(); } - // No improvable chunk was found, we are done. - return false; + // Deactivate the found dependency and then make the state topological again with a + // sequence of merges. + Improve(parent_idx, child_idx); + return true; } /** Initialize data structure for minimizing the chunks. Can only be called if state is known * to be optimal. OptimizeStep() cannot be called anymore afterwards. */ void StartMinimizing() noexcept { + m_cost.StartMinimizingBegin(); m_nonminimal_chunks.clear(); m_nonminimal_chunks.reserve(m_transaction_idxs.Count()); // Gather all chunks, and for each, add it with a random pivot in it, and a random initial // direction, to m_nonminimal_chunks. - for (auto tx : m_transaction_idxs) { - auto& tx_data = m_tx_data[tx]; - if (tx_data.chunk_rep == tx) { - TxIdx pivot_idx = PickRandomTx(tx_data.chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(tx, pivot_idx, m_rng.randbits<1>()); - // Randomize the initial order of nonminimal chunks in the queue. - TxIdx j = m_rng.randrange(m_nonminimal_chunks.size()); - if (j != m_nonminimal_chunks.size() - 1) { - std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]); - } + for (auto chunk_idx : m_chunk_idxs) { + TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>()); + // Randomize the initial order of nonminimal chunks in the queue. + SetIdx j = m_rng.randrange(m_nonminimal_chunks.size()); + if (j != m_nonminimal_chunks.size() - 1) { + std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]); } } + m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size()); } /** Try to reduce a chunk's size. Returns false if all chunks are minimal, true otherwise. */ @@ -1163,11 +1353,11 @@ class SpanningForestState { // If the queue of potentially-non-minimal chunks is empty, we are done. if (m_nonminimal_chunks.empty()) return false; + m_cost.MinimizeStepBegin(); // Pop an entry from the potentially-non-minimal chunk queue. - auto [chunk_rep, pivot_idx, flags] = m_nonminimal_chunks.front(); + auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front(); m_nonminimal_chunks.pop_front(); - auto& chunk_data = m_tx_data[chunk_rep]; - Assume(chunk_data.chunk_rep == chunk_rep); + auto& chunk_info = m_set_info[chunk_idx]; /** Whether to move the pivot down rather than up. */ bool move_pivot_down = flags & 1; /** Whether this is already the second stage. */ @@ -1175,32 +1365,31 @@ class SpanningForestState // Find a random dependency whose top and bottom set feerates are equal, and which has // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down). - DepIdx candidate_dep = DepIdx(-1); + std::pair candidate_dep; uint64_t candidate_tiebreak{0}; bool have_any = false; // Iterate over all transactions. - for (auto tx_idx : chunk_data.chunk_setinfo.transactions) { + for (auto tx_idx : chunk_info.transactions) { const auto& tx_data = m_tx_data[tx_idx]; // Iterate over all active child dependencies of the transaction. - for (auto dep_idx : tx_data.child_deps) { - auto& dep_data = m_dep_data[dep_idx]; - // Skip inactive child dependencies. - if (!dep_data.active) continue; + for (auto child_idx : tx_data.active_children) { + const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]]; // Skip if this dependency does not have equal top and bottom set feerates. Note // that the top cannot have higher feerate than the bottom, or OptimizeSteps would // have dealt with it. - if (dep_data.top_setinfo.feerate << chunk_data.chunk_setinfo.feerate) continue; + if (dep_top_info.feerate << chunk_info.feerate) continue; have_any = true; // Skip if this dependency does not have pivot in the right place. - if (move_pivot_down == dep_data.top_setinfo.transactions[pivot_idx]) continue; + if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue; // Remember this as our chosen dependency if it has a better tiebreak. uint64_t tiebreak = m_rng.rand64() | 1; if (tiebreak > candidate_tiebreak) { candidate_tiebreak = tiebreak; - candidate_dep = dep_idx; + candidate_dep = {tx_idx, child_idx}; } } } + m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count()); // If no dependencies have equal top and bottom set feerate, this chunk is minimal. if (!have_any) return true; // If all found dependencies have the pivot in the wrong place, try moving it in the other @@ -1208,23 +1397,25 @@ class SpanningForestState if (candidate_tiebreak == 0) { // Switch to other direction, and to second phase. flags ^= 3; - if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_rep, pivot_idx, flags); + if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags); return true; } // Otherwise, deactivate the dependency that was found. - Deactivate(candidate_dep); - auto& dep_data = m_dep_data[candidate_dep]; - auto parent_chunk_rep = m_tx_data[dep_data.parent].chunk_rep; - auto child_chunk_rep = m_tx_data[dep_data.child].chunk_rep; - // Try to activate a dependency between the new bottom and the new top (opposite from the + auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second); + // Determine if there is a dependency from the new bottom to the new top (opposite from the // dependency that was just deactivated). - auto merged_chunk_rep = MergeChunks(child_chunk_rep, parent_chunk_rep); - if (merged_chunk_rep != TxIdx(-1)) { - // A self-merge happened. - // Re-insert the chunk into the queue, in the same direction. Note that the chunk_rep + auto& parent_reachable = m_reachable[parent_chunk_idx].first; + auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions; + if (parent_reachable.Overlaps(child_chunk_txn)) { + // A self-merge is needed. Note that the child_chunk_idx is the top, and + // parent_chunk_idx is the bottom, because we activate a dependency in the reverse + // direction compared to the deactivation above. + auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx); + // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx // will have changed. - m_nonminimal_chunks.emplace_back(merged_chunk_rep, pivot_idx, flags); + m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags); + m_cost.MinimizeStepEnd(/*split=*/false); } else { // No self-merge happens, and thus we have found a way to split the chunk. Create two // smaller chunks, and add them to the queue. The one that contains the current pivot @@ -1234,17 +1425,18 @@ class SpanningForestState // possible already. The new chunk without the current pivot gets a new randomly-chosen // one. if (move_pivot_down) { - auto parent_pivot_idx = PickRandomTx(m_tx_data[parent_chunk_rep].chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(parent_chunk_rep, parent_pivot_idx, m_rng.randbits<1>()); - m_nonminimal_chunks.emplace_back(child_chunk_rep, pivot_idx, flags); + auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>()); + m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags); } else { - auto child_pivot_idx = PickRandomTx(m_tx_data[child_chunk_rep].chunk_setinfo.transactions); - m_nonminimal_chunks.emplace_back(parent_chunk_rep, pivot_idx, flags); - m_nonminimal_chunks.emplace_back(child_chunk_rep, child_pivot_idx, m_rng.randbits<1>()); + auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions); + m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags); + m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>()); } if (m_rng.randbool()) { std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[m_nonminimal_chunks.size() - 2]); } + m_cost.MinimizeStepEnd(/*split=*/true); } return true; } @@ -1265,42 +1457,38 @@ class SpanningForestState * - smallest tx size first * - the lowest transaction, by fallback_order, first */ - std::vector GetLinearization(const StrongComparator auto& fallback_order) const noexcept + std::vector GetLinearization(const StrongComparator auto& fallback_order) noexcept { + m_cost.GetLinearizationBegin(); /** The output linearization. */ std::vector ret; - ret.reserve(m_transaction_idxs.Count()); - /** A heap with all chunks (by representative) that can currently be included, sorted by + ret.reserve(m_set_info.size()); + /** A heap with all chunks (by set index) that can currently be included, sorted by * chunk feerate (high to low), chunk size (small to large), and by least maximum element * according to the fallback order (which is the second pair element). */ - std::vector> ready_chunks; - /** Information about chunks: - * - The first value is only used for chunk representatives, and counts the number of - * unmet dependencies this chunk has on other chunks (not including dependencies within - * the chunk itself). - * - The second value is the number of unmet dependencies overall. - */ - std::vector> chunk_deps(m_tx_data.size(), {0, 0}); - /** The set of all chunk representatives. */ - SetType chunk_reps; + std::vector> ready_chunks; + /** For every chunk, indexed by SetIdx, the number of unmet dependencies the chunk has on + * other chunks (not including dependencies within the chunk itself). */ + std::vector chunk_deps(m_set_info.size(), 0); + /** For every transaction, indexed by TxIdx, the number of unmet dependencies the + * transaction has. */ + std::vector tx_deps(m_tx_data.size(), 0); /** A heap with all transactions within the current chunk that can be included, sorted by * tx feerate (high to low), tx size (small to large), and fallback order. */ std::vector ready_tx; - // Populate chunk_deps[c] with the number of {out-of-chunk dependencies, dependencies} the - // child has. + // Populate chunk_deps and tx_deps. + unsigned num_deps{0}; for (TxIdx chl_idx : m_transaction_idxs) { const auto& chl_data = m_tx_data[chl_idx]; - chunk_deps[chl_idx].second = chl_data.parents.Count(); - auto chl_chunk_rep = chl_data.chunk_rep; - chunk_reps.Set(chl_chunk_rep); - for (auto par_idx : chl_data.parents) { - auto par_chunk_rep = m_tx_data[par_idx].chunk_rep; - chunk_deps[chl_chunk_rep].first += (par_chunk_rep != chl_chunk_rep); - } + tx_deps[chl_idx] = chl_data.parents.Count(); + num_deps += tx_deps[chl_idx]; + auto chl_chunk_idx = chl_data.chunk_idx; + auto& chl_chunk_info = m_set_info[chl_chunk_idx]; + chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count(); } /** Function to compute the highest element of a chunk, by fallback_order. */ - auto max_fallback_fn = [&](TxIdx chunk_rep) noexcept { - auto& chunk = m_tx_data[chunk_rep].chunk_setinfo.transactions; + auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept { + auto& chunk = m_set_info[chunk_idx].transactions; auto it = chunk.begin(); DepGraphIndex ret = *it; ++it; @@ -1338,8 +1526,8 @@ class SpanningForestState // Bail out for identical chunks. if (a.first == b.first) return false; // First sort by increasing chunk feerate. - auto& chunk_feerate_a = m_tx_data[a.first].chunk_setinfo.feerate; - auto& chunk_feerate_b = m_tx_data[b.first].chunk_setinfo.feerate; + auto& chunk_feerate_a = m_set_info[a.first].feerate; + auto& chunk_feerate_b = m_set_info[b.first].feerate; auto feerate_cmp = FeeRateCompare(chunk_feerate_a, chunk_feerate_b); if (feerate_cmp != 0) return feerate_cmp < 0; // Then by decreasing chunk size. @@ -1354,24 +1542,23 @@ class SpanningForestState return a.second < b.second; }; // Construct a heap with all chunks that have no out-of-chunk dependencies. - for (TxIdx chunk_rep : chunk_reps) { - if (chunk_deps[chunk_rep].first == 0) { - ready_chunks.emplace_back(chunk_rep, max_fallback_fn(chunk_rep)); + for (SetIdx chunk_idx : m_chunk_idxs) { + if (chunk_deps[chunk_idx] == 0) { + ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx)); } } std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); // Pop chunks off the heap. while (!ready_chunks.empty()) { - auto [chunk_rep, _rnd] = ready_chunks.front(); + auto [chunk_idx, _rnd] = ready_chunks.front(); std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); ready_chunks.pop_back(); - Assume(m_tx_data[chunk_rep].chunk_rep == chunk_rep); - Assume(chunk_deps[chunk_rep].first == 0); - const auto& chunk_txn = m_tx_data[chunk_rep].chunk_setinfo.transactions; + Assume(chunk_deps[chunk_idx] == 0); + const auto& chunk_txn = m_set_info[chunk_idx].transactions; // Build heap of all includable transactions in chunk. Assume(ready_tx.empty()); for (TxIdx tx_idx : chunk_txn) { - if (chunk_deps[tx_idx].second == 0) ready_tx.push_back(tx_idx); + if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx); } Assume(!ready_tx.empty()); std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn); @@ -1389,25 +1576,26 @@ class SpanningForestState for (TxIdx chl_idx : tx_data.children) { auto& chl_data = m_tx_data[chl_idx]; // Decrement tx dependency count. - Assume(chunk_deps[chl_idx].second > 0); - if (--chunk_deps[chl_idx].second == 0 && chunk_txn[chl_idx]) { + Assume(tx_deps[chl_idx] > 0); + if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) { // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap. ready_tx.push_back(chl_idx); std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn); } // Decrement chunk dependency count if this is out-of-chunk dependency. - if (chl_data.chunk_rep != chunk_rep) { - Assume(chunk_deps[chl_data.chunk_rep].first > 0); - if (--chunk_deps[chl_data.chunk_rep].first == 0) { + if (chl_data.chunk_idx != chunk_idx) { + Assume(chunk_deps[chl_data.chunk_idx] > 0); + if (--chunk_deps[chl_data.chunk_idx] == 0) { // Child chunk has no dependencies left. Add it to the chunk heap. - ready_chunks.emplace_back(chl_data.chunk_rep, max_fallback_fn(chl_data.chunk_rep)); + ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx)); std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn); } } } } } - Assume(ret.size() == m_transaction_idxs.Count()); + Assume(ret.size() == m_set_info.size()); + m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps); return ret; } @@ -1427,168 +1615,170 @@ class SpanningForestState std::vector GetDiagram() const noexcept { std::vector ret; - for (auto tx : m_transaction_idxs) { - if (m_tx_data[tx].chunk_rep == tx) { - ret.push_back(m_tx_data[tx].chunk_setinfo.feerate); - } + for (auto chunk_idx : m_chunk_idxs) { + ret.push_back(m_set_info[chunk_idx].feerate); } std::sort(ret.begin(), ret.end(), std::greater{}); return ret; } /** Determine how much work was performed so far. */ - uint64_t GetCost() const noexcept { return m_cost; } + uint64_t GetCost() const noexcept { return m_cost.GetCost(); } /** Verify internal consistency of the data structure. */ - void SanityCheck(const DepGraph& depgraph) const + void SanityCheck() const { // // Verify dependency parent/child information, and build list of (active) dependencies. // std::vector> expected_dependencies; - std::vector> all_dependencies; - std::vector> active_dependencies; - for (auto parent_idx : depgraph.Positions()) { - for (auto child_idx : depgraph.GetReducedChildren(parent_idx)) { + std::vector> all_dependencies; + std::vector> active_dependencies; + for (auto parent_idx : m_depgraph.Positions()) { + for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) { expected_dependencies.emplace_back(parent_idx, child_idx); } } - for (DepIdx dep_idx = 0; dep_idx < m_dep_data.size(); ++dep_idx) { - const auto& dep_data = m_dep_data[dep_idx]; - all_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx); - // Also add to active_dependencies if it is active. - if (m_dep_data[dep_idx].active) { - active_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx); + for (auto tx_idx : m_transaction_idxs) { + for (auto child_idx : m_tx_data[tx_idx].children) { + all_dependencies.emplace_back(tx_idx, child_idx); + if (m_tx_data[tx_idx].active_children[child_idx]) { + active_dependencies.emplace_back(tx_idx, child_idx); + } } } std::sort(expected_dependencies.begin(), expected_dependencies.end()); std::sort(all_dependencies.begin(), all_dependencies.end()); - assert(expected_dependencies.size() == all_dependencies.size()); - for (size_t i = 0; i < expected_dependencies.size(); ++i) { - assert(expected_dependencies[i] == - std::make_pair(std::get<0>(all_dependencies[i]), - std::get<1>(all_dependencies[i]))); - } + assert(expected_dependencies == all_dependencies); // // Verify the chunks against the list of active dependencies // - for (auto tx_idx: depgraph.Positions()) { - // Only process chunks for now. - if (m_tx_data[tx_idx].chunk_rep == tx_idx) { - const auto& chunk_data = m_tx_data[tx_idx]; - // Verify that transactions in the chunk point back to it. This guarantees - // that chunks are non-overlapping. - for (auto chunk_tx : chunk_data.chunk_setinfo.transactions) { - assert(m_tx_data[chunk_tx].chunk_rep == tx_idx); - } - // Verify the chunk's transaction set: it must contain the representative, and for - // every active dependency, if it contains the parent or child, it must contain - // both. It must have exactly N-1 active dependencies in it, guaranteeing it is - // acyclic. - SetType expected_chunk = SetType::Singleton(tx_idx); - while (true) { - auto old = expected_chunk; - size_t active_dep_count{0}; - for (const auto& [par, chl, _dep] : active_dependencies) { - if (expected_chunk[par] || expected_chunk[chl]) { - expected_chunk.Set(par); - expected_chunk.Set(chl); - ++active_dep_count; - } - } - if (old == expected_chunk) { - assert(expected_chunk.Count() == active_dep_count + 1); - break; + SetType chunk_cover; + for (auto chunk_idx : m_chunk_idxs) { + const auto& chunk_info = m_set_info[chunk_idx]; + // Verify that transactions in the chunk point back to it. This guarantees + // that chunks are non-overlapping. + for (auto tx_idx : chunk_info.transactions) { + assert(m_tx_data[tx_idx].chunk_idx == chunk_idx); + } + assert(!chunk_cover.Overlaps(chunk_info.transactions)); + chunk_cover |= chunk_info.transactions; + // Verify the chunk's transaction set: start from an arbitrary chunk transaction, + // and for every active dependency, if it contains the parent or child, add the + // other. It must have exactly N-1 active dependencies in it, guaranteeing it is + // acyclic. + assert(chunk_info.transactions.Any()); + SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First()); + while (true) { + auto old = expected_chunk; + size_t active_dep_count{0}; + for (const auto& [par, chl] : active_dependencies) { + if (expected_chunk[par] || expected_chunk[chl]) { + expected_chunk.Set(par); + expected_chunk.Set(chl); + ++active_dep_count; } } - assert(chunk_data.chunk_setinfo.transactions == expected_chunk); - // Verify the chunk's feerate. - assert(chunk_data.chunk_setinfo.feerate == - depgraph.FeeRate(chunk_data.chunk_setinfo.transactions)); + if (old == expected_chunk) { + assert(expected_chunk.Count() == active_dep_count + 1); + break; + } } + assert(chunk_info.transactions == expected_chunk); + // Verify the chunk's feerate. + assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions)); + // Verify the chunk's reachable transactions. + assert(m_reachable[chunk_idx] == GetReachable(expected_chunk)); + // Verify that the chunk's reachable transactions don't include its own transactions. + assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions)); + assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions)); } + // Verify that together, the chunks cover all transactions. + assert(chunk_cover == m_depgraph.Positions()); // - // Verify other transaction data. + // Verify transaction data. // - assert(m_transaction_idxs == depgraph.Positions()); + assert(m_transaction_idxs == m_depgraph.Positions()); for (auto tx_idx : m_transaction_idxs) { const auto& tx_data = m_tx_data[tx_idx]; - // Verify it has a valid chunk representative, and that chunk includes this - // transaction. - assert(m_tx_data[tx_data.chunk_rep].chunk_rep == tx_data.chunk_rep); - assert(m_tx_data[tx_data.chunk_rep].chunk_setinfo.transactions[tx_idx]); + // Verify it has a valid chunk index, and that chunk includes this transaction. + assert(m_chunk_idxs[tx_data.chunk_idx]); + assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]); // Verify parents/children. - assert(tx_data.parents == depgraph.GetReducedParents(tx_idx)); - assert(tx_data.children == depgraph.GetReducedChildren(tx_idx)); - // Verify list of child dependencies. - std::vector expected_child_deps; - for (const auto& [par_idx, chl_idx, dep_idx] : all_dependencies) { - if (tx_idx == par_idx) { - assert(tx_data.children[chl_idx]); - expected_child_deps.push_back(dep_idx); - } + assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx)); + assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx)); + // Verify active_children is a subset of children. + assert(tx_data.active_children.IsSubsetOf(tx_data.children)); + // Verify each active child's dep_top_idx points to a valid non-chunk set. + for (auto child_idx : tx_data.active_children) { + assert(tx_data.dep_top_idx[child_idx] < m_set_info.size()); + assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]); } - std::sort(expected_child_deps.begin(), expected_child_deps.end()); - auto child_deps_copy = tx_data.child_deps; - std::sort(child_deps_copy.begin(), child_deps_copy.end()); - assert(expected_child_deps == child_deps_copy); } // - // Verify active dependencies' top_setinfo. + // Verify active dependencies' top sets. // - for (const auto& [par_idx, chl_idx, dep_idx] : active_dependencies) { - const auto& dep_data = m_dep_data[dep_idx]; - // Verify the top_info's transactions: it must contain the parent, and for every - // active dependency, except dep_idx itself, if it contains the parent or child, it - // must contain both. + for (const auto& [par_idx, chl_idx] : active_dependencies) { + // Verify the top set's transactions: it must contain the parent, and for every + // active dependency, except the chl_idx->par_idx dependency itself, if it contains the + // parent or child, it must contain both. It must have exactly N-1 active dependencies + // in it, guaranteeing it is acyclic. SetType expected_top = SetType::Singleton(par_idx); while (true) { auto old = expected_top; - for (const auto& [par2_idx, chl2_idx, dep2_idx] : active_dependencies) { - if (dep2_idx != dep_idx && (expected_top[par2_idx] || expected_top[chl2_idx])) { + size_t active_dep_count{0}; + for (const auto& [par2_idx, chl2_idx] : active_dependencies) { + if (par_idx == par2_idx && chl_idx == chl2_idx) continue; + if (expected_top[par2_idx] || expected_top[chl2_idx]) { expected_top.Set(par2_idx); expected_top.Set(chl2_idx); + ++active_dep_count; } } - if (old == expected_top) break; + if (old == expected_top) { + assert(expected_top.Count() == active_dep_count + 1); + break; + } } assert(!expected_top[chl_idx]); - assert(dep_data.top_setinfo.transactions == expected_top); - // Verify the top_info's feerate. - assert(dep_data.top_setinfo.feerate == - depgraph.FeeRate(dep_data.top_setinfo.transactions)); + auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]]; + assert(dep_top_info.transactions == expected_top); + // Verify the top set's feerate. + assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions)); } // // Verify m_suboptimal_chunks. // + SetType suboptimal_idxs; for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) { - auto tx_idx = m_suboptimal_chunks[i]; - assert(m_transaction_idxs[tx_idx]); + auto chunk_idx = m_suboptimal_chunks[i]; + assert(!suboptimal_idxs[chunk_idx]); + suboptimal_idxs.Set(chunk_idx); } + assert(m_suboptimal_idxs == suboptimal_idxs); // // Verify m_nonminimal_chunks. // - SetType nonminimal_reps; + SetType nonminimal_idxs; for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) { - auto [chunk_rep, pivot, flags] = m_nonminimal_chunks[i]; - assert(m_tx_data[chunk_rep].chunk_rep == chunk_rep); - assert(m_tx_data[pivot].chunk_rep == chunk_rep); - assert(!nonminimal_reps[chunk_rep]); - nonminimal_reps.Set(chunk_rep); + auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i]; + assert(m_tx_data[pivot].chunk_idx == chunk_idx); + assert(!nonminimal_idxs[chunk_idx]); + nonminimal_idxs.Set(chunk_idx); } - assert(nonminimal_reps.IsSubsetOf(m_transaction_idxs)); + assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs)); } }; /** Find or improve a linearization for a cluster. * * @param[in] depgraph Dependency graph of the cluster to be linearized. - * @param[in] max_iterations Upper bound on the amount of work that will be done. + * @param[in] max_cost Upper bound on the amount of work that will be done. * @param[in] rng_seed A random number seed to control search order. This prevents peers * from predicting exactly which clusters would be hard for us to * linearize. @@ -1608,7 +1798,7 @@ class SpanningForestState template std::tuple, bool, uint64_t> Linearize( const DepGraph& depgraph, - uint64_t max_iterations, + uint64_t max_cost, uint64_t rng_seed, const StrongComparator auto& fallback_order, std::span old_linearization = {}, @@ -1624,23 +1814,23 @@ std::tuple, bool, uint64_t> Linearize( } // Make improvement steps to it until we hit the max_iterations limit, or an optimal result // is found. - if (forest.GetCost() < max_iterations) { + if (forest.GetCost() < max_cost) { forest.StartOptimizing(); do { if (!forest.OptimizeStep()) break; - } while (forest.GetCost() < max_iterations); + } while (forest.GetCost() < max_cost); } // Make chunk minimization steps until we hit the max_iterations limit, or all chunks are // minimal. bool optimal = false; - if (forest.GetCost() < max_iterations) { + if (forest.GetCost() < max_cost) { forest.StartMinimizing(); do { if (!forest.MinimizeStep()) { optimal = true; break; } - } while (forest.GetCost() < max_iterations); + } while (forest.GetCost() < max_cost); } return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()}; } diff --git a/src/coins.cpp b/src/coins.cpp index fc33c521617c..25b1ead0c1dd 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -15,6 +15,7 @@ TRACEPOINT_SEMAPHORE(utxocache, spent); TRACEPOINT_SEMAPHORE(utxocache, uncache); std::optional CCoinsView::GetCoin(const COutPoint& outpoint) const { return std::nullopt; } +std::optional CCoinsView::PeekCoin(const COutPoint& outpoint) const { return GetCoin(outpoint); } uint256 CCoinsView::GetBestBlock() const { return uint256(); } std::vector CCoinsView::GetHeadBlocks() const { return std::vector(); } void CCoinsView::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& hashBlock) @@ -31,6 +32,7 @@ bool CCoinsView::HaveCoin(const COutPoint &outpoint) const CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { } std::optional CCoinsViewBacked::GetCoin(const COutPoint& outpoint) const { return base->GetCoin(outpoint); } +std::optional CCoinsViewBacked::PeekCoin(const COutPoint& outpoint) const { return base->PeekCoin(outpoint); } bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); } uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } std::vector CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); } @@ -39,6 +41,14 @@ void CCoinsViewBacked::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& h std::unique_ptr CCoinsViewBacked::Cursor() const { return base->Cursor(); } size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } +std::optional CCoinsViewCache::PeekCoin(const COutPoint& outpoint) const +{ + if (auto it{cacheCoins.find(outpoint)}; it != cacheCoins.end()) { + return it->second.coin.IsSpent() ? std::nullopt : std::optional{it->second.coin}; + } + return base->PeekCoin(outpoint); +} + CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) : CCoinsViewBacked(baseIn), m_deterministic(deterministic), cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic), CCoinsMap::key_equal{}, &m_cache_coins_memory_resource) @@ -50,10 +60,15 @@ size_t CCoinsViewCache::DynamicMemoryUsage() const { return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage; } +std::optional CCoinsViewCache::FetchCoinFromBase(const COutPoint& outpoint) const +{ + return base->GetCoin(outpoint); +} + CCoinsMap::iterator CCoinsViewCache::FetchCoin(const COutPoint &outpoint) const { const auto [ret, inserted] = cacheCoins.try_emplace(outpoint); if (inserted) { - if (auto coin{base->GetCoin(outpoint)}) { + if (auto coin{FetchCoinFromBase(outpoint)}) { ret->second.coin = std::move(*coin); cachedCoinsUsage += ret->second.coin.DynamicMemoryUsage(); Assert(!ret->second.coin.IsSpent()); @@ -98,10 +113,12 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi fresh = !it->second.IsDirty(); } if (!inserted) { - cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); + Assume(TrySub(m_dirty_count, it->second.IsDirty())); + Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage())); } it->second.coin = std::move(coin); CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; if (fresh) CCoinsCacheEntry::SetFresh(*it, m_sentinel); cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); TRACEPOINT(utxocache, add, @@ -117,6 +134,7 @@ void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coi auto [it, inserted] = cacheCoins.try_emplace(std::move(outpoint), std::move(coin)); if (inserted) { CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; cachedCoinsUsage += mem_usage; } } @@ -135,7 +153,8 @@ void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) { CCoinsMap::iterator it = FetchCoin(outpoint); if (it == cacheCoins.end()) return false; - cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); + Assume(TrySub(m_dirty_count, it->second.IsDirty())); + Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage())); TRACEPOINT(utxocache, spent, outpoint.hash.data(), (uint32_t)outpoint.n, @@ -149,6 +168,7 @@ bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) { cacheCoins.erase(it); } else { CCoinsCacheEntry::SetDirty(*it, m_sentinel); + ++m_dirty_count; it->second.coin.Clear(); } return true; @@ -207,8 +227,9 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha } else { entry.coin = it->second.coin; } - cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + ++m_dirty_count; + cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); // We can mark it FRESH in the parent if it was FRESH in the child // Otherwise it might have just been flushed from the parent's cache // and already exist in the grandparent @@ -227,11 +248,12 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha if (itUs->second.IsFresh() && it->second.coin.IsSpent()) { // The grandparent cache does not have an entry, and the coin // has been spent. We can just delete it from the parent cache. - cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); + Assume(TrySub(m_dirty_count, itUs->second.IsDirty())); + Assume(TrySub(cachedCoinsUsage, itUs->second.coin.DynamicMemoryUsage())); cacheCoins.erase(itUs); } else { // A normal modification. - cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); + Assume(TrySub(cachedCoinsUsage, itUs->second.coin.DynamicMemoryUsage())); if (cursor.WillErase(*it)) { // Since this entry will be erased, // we can move the coin into us instead of copying it @@ -240,7 +262,10 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha itUs->second.coin = it->second.coin; } cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); - CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + if (!itUs->second.IsDirty()) { + CCoinsCacheEntry::SetDirty(*itUs, m_sentinel); + ++m_dirty_count; + } // NOTE: It isn't safe to mark the coin as FRESH in the parent // cache. If it already existed and was spent in the parent // cache then marking it FRESH would prevent that spentness @@ -253,8 +278,9 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha void CCoinsViewCache::Flush(bool reallocate_cache) { - auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/true)}; + auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/true)}; base->BatchWrite(cursor, hashBlock); + Assume(m_dirty_count == 0); cacheCoins.clear(); if (reallocate_cache) { ReallocateCache(); @@ -264,8 +290,9 @@ void CCoinsViewCache::Flush(bool reallocate_cache) void CCoinsViewCache::Sync() { - auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/false)}; + auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/false)}; base->BatchWrite(cursor, hashBlock); + Assume(m_dirty_count == 0); if (m_sentinel.second.Next() != &m_sentinel) { /* BatchWrite must clear flags of all entries */ throw std::logic_error("Not all unspent flagged entries were cleared"); @@ -276,6 +303,7 @@ void CCoinsViewCache::Reset() noexcept { cacheCoins.clear(); cachedCoinsUsage = 0; + m_dirty_count = 0; SetBestBlock(uint256::ZERO); } @@ -283,7 +311,7 @@ void CCoinsViewCache::Uncache(const COutPoint& hash) { CCoinsMap::iterator it = cacheCoins.find(hash); if (it != cacheCoins.end() && !it->second.IsDirty()) { - cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); + Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage())); TRACEPOINT(utxocache, uncache, hash.hash.data(), (uint32_t)hash.n, @@ -348,7 +376,7 @@ void CCoinsViewCache::SanityCheck() const // Count the number of entries actually in the list. ++count_linked; } - assert(count_linked == count_dirty); + assert(count_dirty == count_linked && count_dirty == m_dirty_count); assert(recomputed_usage == cachedCoinsUsage); } @@ -393,3 +421,8 @@ bool CCoinsViewErrorCatcher::HaveCoin(const COutPoint& outpoint) const { return ExecuteBackedWrapper([&]() { return CCoinsViewBacked::HaveCoin(outpoint); }, m_err_callbacks); } + +std::optional CCoinsViewErrorCatcher::PeekCoin(const COutPoint& outpoint) const +{ + return ExecuteBackedWrapper>([&]() { return CCoinsViewBacked::PeekCoin(outpoint); }, m_err_callbacks); +} diff --git a/src/coins.h b/src/coins.h index 4b39c0bacd28..08c1886f9307 100644 --- a/src/coins.h +++ b/src/coins.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -265,10 +266,11 @@ struct CoinsViewCacheCursor //! This is an optimization compared to erasing all entries as the cursor iterates them when will_erase is set. //! Calling CCoinsMap::clear() afterwards is faster because a CoinsCachePair cannot be coerced back into a //! CCoinsMap::iterator to be erased, and must therefore be looked up again by key in the CCoinsMap before being erased. - CoinsViewCacheCursor(CoinsCachePair& sentinel LIFETIMEBOUND, + CoinsViewCacheCursor(size_t& dirty_count LIFETIMEBOUND, + CoinsCachePair& sentinel LIFETIMEBOUND, CCoinsMap& map LIFETIMEBOUND, bool will_erase) noexcept - : m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {} + : m_dirty_count(dirty_count), m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {} inline CoinsCachePair* Begin() const noexcept { return m_sentinel.second.Next(); } inline CoinsCachePair* End() const noexcept { return &m_sentinel; } @@ -277,6 +279,7 @@ struct CoinsViewCacheCursor inline CoinsCachePair* NextAndMaybeErase(CoinsCachePair& current) noexcept { const auto next_entry{current.second.Next()}; + Assume(TrySub(m_dirty_count, current.second.IsDirty())); // If we are not going to erase the cache, we must still erase spent entries. // Otherwise, clear the state of the entry. if (!m_will_erase) { @@ -291,7 +294,10 @@ struct CoinsViewCacheCursor } inline bool WillErase(CoinsCachePair& current) const noexcept { return m_will_erase || current.second.coin.IsSpent(); } + size_t GetDirtyCount() const noexcept { return m_dirty_count; } + size_t GetTotalCount() const noexcept { return m_map.size(); } private: + size_t& m_dirty_count; CoinsCachePair& m_sentinel; CCoinsMap& m_map; bool m_will_erase; @@ -302,9 +308,15 @@ class CCoinsView { public: //! Retrieve the Coin (unspent transaction output) for a given outpoint. + //! May populate the cache. Use PeekCoin() to perform a non-caching lookup. virtual std::optional GetCoin(const COutPoint& outpoint) const; + //! Retrieve the Coin (unspent transaction output) for a given outpoint, without caching results. + //! Does not populate the cache. Use GetCoin() to cache the result. + virtual std::optional PeekCoin(const COutPoint& outpoint) const; + //! Just check whether a given outpoint is unspent. + //! May populate the cache. Use PeekCoin() to perform a non-caching lookup. virtual bool HaveCoin(const COutPoint &outpoint) const; //! Retrieve the block hash whose state this CCoinsView currently represents @@ -340,6 +352,7 @@ class CCoinsViewBacked : public CCoinsView public: CCoinsViewBacked(CCoinsView *viewIn); std::optional GetCoin(const COutPoint& outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; @@ -369,6 +382,8 @@ class CCoinsViewCache : public CCoinsViewBacked /* Cached dynamic memory usage for the inner Coin objects. */ mutable size_t cachedCoinsUsage{0}; + /* Running count of dirty Coin cache entries. */ + mutable size_t m_dirty_count{0}; /** * Discard all modifications made to this cache without flushing to the base view. @@ -376,6 +391,9 @@ class CCoinsViewCache : public CCoinsViewBacked */ void Reset() noexcept; + /* Fetch the coin from base. Used for cache misses in FetchCoin. */ + virtual std::optional FetchCoinFromBase(const COutPoint& outpoint) const; + public: CCoinsViewCache(CCoinsView *baseIn, bool deterministic = false); @@ -386,6 +404,7 @@ class CCoinsViewCache : public CCoinsViewBacked // Standard CCoinsView methods std::optional GetCoin(const COutPoint& outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; void SetBestBlock(const uint256 &hashBlock); @@ -458,9 +477,12 @@ class CCoinsViewCache : public CCoinsViewBacked */ void Uncache(const COutPoint &outpoint); - //! Calculate the size of the cache (in number of transaction outputs) + //! Size of the cache (in number of transaction outputs) unsigned int GetCacheSize() const; + //! Number of dirty cache entries (transaction outputs) + size_t GetDirtyCount() const noexcept { return m_dirty_count; } + //! Calculate the size of the cache (in bytes) size_t DynamicMemoryUsage() const; @@ -504,6 +526,27 @@ class CCoinsViewCache : public CCoinsViewBacked CCoinsMap::iterator FetchCoin(const COutPoint &outpoint) const; }; +/** + * CCoinsViewCache overlay that avoids populating/mutating parent cache layers on cache misses. + * + * This is achieved by fetching coins from the base view using PeekCoin() instead of GetCoin(), + * so intermediate CCoinsViewCache layers are not filled. + * + * Used during ConnectBlock() as an ephemeral, resettable top-level view that is flushed only + * on success, so invalid blocks don't pollute the underlying cache. + */ +class CoinsViewOverlay : public CCoinsViewCache +{ +private: + std::optional FetchCoinFromBase(const COutPoint& outpoint) const override + { + return base->PeekCoin(outpoint); + } + +public: + using CCoinsViewCache::CCoinsViewCache; +}; + //! Utility function to add all of a transaction's outputs to a cache. //! When check is false, this assumes that overwrites are only possible for coinbase transactions. //! When check is true, the underlying view may be queried to determine whether an addition is @@ -536,6 +579,7 @@ class CCoinsViewErrorCatcher final : public CCoinsViewBacked std::optional GetCoin(const COutPoint& outpoint) const override; bool HaveCoin(const COutPoint &outpoint) const override; + std::optional PeekCoin(const COutPoint& outpoint) const override; private: /** A list of callbacks to execute upon leveldb read error. */ diff --git a/src/common/args.cpp b/src/common/args.cpp index 3ffa4d3f105e..5c8589cf4402 100644 --- a/src/common/args.cpp +++ b/src/common/args.cpp @@ -483,29 +483,33 @@ std::string SettingToString(const common::SettingsValue& value, const std::strin return SettingToString(value).value_or(strDefault); } -int64_t ArgsManager::GetIntArg(const std::string& strArg, int64_t nDefault) const +template +Int ArgsManager::GetArg(const std::string& strArg, Int nDefault) const { - return GetIntArg(strArg).value_or(nDefault); + return GetArg(strArg).value_or(nDefault); } -std::optional ArgsManager::GetIntArg(const std::string& strArg) const +template +std::optional ArgsManager::GetArg(const std::string& strArg) const { const common::SettingsValue value = GetSetting(strArg); - return SettingToInt(value); + return SettingTo(value); } -std::optional SettingToInt(const common::SettingsValue& value) +template +std::optional SettingTo(const common::SettingsValue& value) { if (value.isNull()) return std::nullopt; if (value.isFalse()) return 0; if (value.isTrue()) return 1; - if (value.isNum()) return value.getInt(); - return LocaleIndependentAtoi(value.get_str()); + if (value.isNum()) return value.getInt(); + return LocaleIndependentAtoi(value.get_str()); } -int64_t SettingToInt(const common::SettingsValue& value, int64_t nDefault) +template +Int SettingTo(const common::SettingsValue& value, Int nDefault) { - return SettingToInt(value).value_or(nDefault); + return SettingTo(value).value_or(nDefault); } bool ArgsManager::GetBoolArg(const std::string& strArg, bool fDefault) const @@ -531,6 +535,23 @@ bool SettingToBool(const common::SettingsValue& value, bool fDefault) return SettingToBool(value).value_or(fDefault); } +#define INSTANTIATE_INT_TYPE(Type) \ + template Type ArgsManager::GetArg(const std::string&, Type) const; \ + template std::optional ArgsManager::GetArg(const std::string&) const; \ + template Type SettingTo(const common::SettingsValue&, Type); \ + template std::optional SettingTo(const common::SettingsValue&) + +INSTANTIATE_INT_TYPE(int8_t); +INSTANTIATE_INT_TYPE(uint8_t); +INSTANTIATE_INT_TYPE(int16_t); +INSTANTIATE_INT_TYPE(uint16_t); +INSTANTIATE_INT_TYPE(int32_t); +INSTANTIATE_INT_TYPE(uint32_t); +INSTANTIATE_INT_TYPE(int64_t); +INSTANTIATE_INT_TYPE(uint64_t); + +#undef INSTANTIATE_INT_TYPE + bool ArgsManager::SoftSetArg(const std::string& strArg, const std::string& strValue) { LOCK(cs_args); diff --git a/src/common/args.h b/src/common/args.h index 1b9233ec75c1..ea4e173bf72a 100644 --- a/src/common/args.h +++ b/src/common/args.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -89,8 +90,11 @@ struct SectionInfo { std::string SettingToString(const common::SettingsValue&, const std::string&); std::optional SettingToString(const common::SettingsValue&); -int64_t SettingToInt(const common::SettingsValue&, int64_t); -std::optional SettingToInt(const common::SettingsValue&); +template +Int SettingTo(const common::SettingsValue&, Int); + +template +std::optional SettingTo(const common::SettingsValue&); bool SettingToBool(const common::SettingsValue&, bool); std::optional SettingToBool(const common::SettingsValue&); @@ -293,8 +297,14 @@ class ArgsManager * @param nDefault (e.g. 1) * @return command-line argument (0 if invalid number) or default value */ - int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const; - std::optional GetIntArg(const std::string& strArg) const; + template + Int GetArg(const std::string& strArg, Int nDefault) const; + + template + std::optional GetArg(const std::string& strArg) const; + + int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const { return GetArg(strArg, nDefault); } + std::optional GetIntArg(const std::string& strArg) const { return GetArg(strArg); } /** * Return boolean argument or default value diff --git a/src/common/messages.cpp b/src/common/messages.cpp index 123db93cf61c..637ec62af895 100644 --- a/src/common/messages.cpp +++ b/src/common/messages.cpp @@ -4,11 +4,11 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include - #include -#include #include +#include #include +#include #include #include #include @@ -33,7 +33,6 @@ std::string StringForFeeReason(FeeReason reason) {FeeReason::DOUBLE_ESTIMATE, "Double Target 95% Threshold"}, {FeeReason::CONSERVATIVE, "Conservative Double Target longer horizon"}, {FeeReason::MEMPOOL_MIN, "Mempool Min Fee"}, - {FeeReason::PAYTXFEE, "PayTxFee set"}, {FeeReason::FALLBACK, "Fallback fee"}, {FeeReason::REQUIRED, "Minimum Required Fee"}, }; @@ -68,7 +67,6 @@ std::string FeeModeInfo(const std::pair& mode, std "less responsive to short-term drops in the prevailing fee market. This mode\n" "potentially returns a higher fee rate estimate.\n", mode.first); default: - // Other modes apart from the ones handled are fee rate units; they should not be clarified. assert(false); } } diff --git a/src/common/netif.cpp b/src/common/netif.cpp index cba4d4e74736..8c0c4fa0832c 100644 --- a/src/common/netif.cpp +++ b/src/common/netif.cpp @@ -34,6 +34,8 @@ #include #endif +#include + namespace { //! Return CNetAddr for the specified OS-level network address. @@ -134,7 +136,9 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) return std::nullopt; } - for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) { + using recv_result_t = std::conditional_t, int64_t, decltype(NLMSG_HDRLEN)>; + + for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, static_cast(recv_result)); hdr = NLMSG_NEXT(hdr, recv_result)) { if (!(hdr->nlmsg_flags & NLM_F_MULTI)) { done = true; } diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp index 4864136b8405..a640b823f9c7 100644 --- a/src/common/pcp.cpp +++ b/src/common/pcp.cpp @@ -4,6 +4,7 @@ #include +#include #include #include #include @@ -81,6 +82,8 @@ constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12; constexpr uint8_t NATPMP_RESULT_SUCCESS = 0; //! Result code representing unsupported version. constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1; +//! Result code representing not authorized (router doesn't support port mapping). +constexpr uint8_t NATPMP_RESULT_NOT_AUTHORIZED = 2; //! Result code representing lack of resources. constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4; @@ -144,6 +147,8 @@ constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20; //! Result code representing success (RFC6887 7.4), shared with NAT-PMP. constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS; +//! Result code representing not authorized (RFC6887 7.4), shared with NAT-PMP. +constexpr uint8_t PCP_RESULT_NOT_AUTHORIZED = NATPMP_RESULT_NOT_AUTHORIZED; //! Result code representing lack of resources (RFC6887 7.4). constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8; @@ -374,7 +379,16 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); if (result_code != NATPMP_RESULT_SUCCESS) { - LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + if (result_code == NATPMP_RESULT_NOT_AUTHORIZED) { + static std::atomic warned{false}; + if (!warned.exchange(true)) { + LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } else { + LogDebug(BCLog::NET, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } + } else { + LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + } if (result_code == NATPMP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } @@ -508,7 +522,16 @@ std::variant PCPRequestPortMap(const PCPMappingNonc uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS); CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))}; if (result_code != PCP_RESULT_SUCCESS) { - LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + if (result_code == PCP_RESULT_NOT_AUTHORIZED) { + static std::atomic warned{false}; + if (!warned.exchange(true)) { + LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } else { + LogDebug(BCLog::NET, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } + } else { + LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + } if (result_code == PCP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } diff --git a/src/common/run_command.cpp b/src/common/run_command.cpp index 57683e03467e..86f89e17f23e 100644 --- a/src/common/run_command.cpp +++ b/src/common/run_command.cpp @@ -8,12 +8,13 @@ #include #include +#include #ifdef ENABLE_EXTERNAL_SIGNER #include #endif // ENABLE_EXTERNAL_SIGNER -UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in) +UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in) { #ifdef ENABLE_EXTERNAL_SIGNER namespace sp = subprocess; @@ -22,9 +23,9 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& std::istringstream stdout_stream; std::istringstream stderr_stream; - if (str_command.empty()) return UniValue::VNULL; + if (cmd_args.empty()) return UniValue::VNULL; - auto c = sp::Popen(str_command, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE}); + auto c = sp::Popen(cmd_args, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE}); if (!str_std_in.empty()) { c.send(str_std_in); } @@ -38,7 +39,7 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& std::getline(stderr_stream, error); const int n_error = c.retcode(); - if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error)); + if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", util::Join(cmd_args, " "), n_error, error)); if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result); return result_json; diff --git a/src/common/run_command.h b/src/common/run_command.h index 56c94f83bd9b..9162c704561b 100644 --- a/src/common/run_command.h +++ b/src/common/run_command.h @@ -6,16 +6,17 @@ #define BITCOIN_COMMON_RUN_COMMAND_H #include +#include class UniValue; /** * Execute a command which returns JSON, and parse the result. * - * @param str_command The command to execute, including any arguments + * @param cmd_args The command and arguments * @param str_std_in string to pass to stdin * @return parsed JSON */ -UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in=""); +UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in = ""); #endif // BITCOIN_COMMON_RUN_COMMAND_H diff --git a/src/compat/stdin.cpp b/src/compat/stdin.cpp index 20540f2ad61e..10c811ad3807 100644 --- a/src/compat/stdin.cpp +++ b/src/compat/stdin.cpp @@ -18,25 +18,38 @@ // https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin void SetStdinEcho(bool enable) { + if (!StdinTerminal()) { + return; + } #ifdef WIN32 HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); DWORD mode; - GetConsoleMode(hStdin, &mode); + if (!GetConsoleMode(hStdin, &mode)) { + fputs("GetConsoleMode failed\n", stderr); + return; + } if (!enable) { mode &= ~ENABLE_ECHO_INPUT; } else { mode |= ENABLE_ECHO_INPUT; } - SetConsoleMode(hStdin, mode); + if (!SetConsoleMode(hStdin, mode)) { + fputs("SetConsoleMode failed\n", stderr); + } #else struct termios tty; - tcgetattr(STDIN_FILENO, &tty); + if (tcgetattr(STDIN_FILENO, &tty) != 0) { + fputs("tcgetattr failed\n", stderr); + return; + } if (!enable) { - tty.c_lflag &= ~ECHO; + tty.c_lflag &= static_cast(~ECHO); } else { tty.c_lflag |= ECHO; } - (void)tcsetattr(STDIN_FILENO, TCSANOW, &tty); + if (tcsetattr(STDIN_FILENO, TCSANOW, &tty) != 0) { + fputs("tcsetattr failed\n", stderr); + } #endif } diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp index 85fb1ed1e52f..24952fea197c 100644 --- a/src/dummywallet.cpp +++ b/src/dummywallet.cpp @@ -38,7 +38,6 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const "-maxapsfee=", "-maxtxfee=", "-mintxfee=", - "-paytxfee=", "-signer=", "-spendzeroconfchange", "-txconfirmtarget=", diff --git a/src/external_signer.cpp b/src/external_signer.cpp index 84d98a199062..3790f4d36f98 100644 --- a/src/external_signer.cpp +++ b/src/external_signer.cpp @@ -9,24 +9,27 @@ #include #include #include +#include #include #include #include #include -ExternalSigner::ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name) +ExternalSigner::ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name) : m_command{std::move(command)}, m_chain{std::move(chain)}, m_fingerprint{std::move(fingerprint)}, m_name{std::move(name)} {} -std::string ExternalSigner::NetworkArg() const +std::vector ExternalSigner::NetworkArg() const { - return " --chain " + m_chain; + return {"--chain", m_chain}; } bool ExternalSigner::Enumerate(const std::string& command, std::vector& signers, const std::string& chain) { // Call enumerate - const UniValue result = RunCommandParseJSON(command + " enumerate"); + std::vector cmd_args = Cat(subprocess::util::split(command), {"enumerate"}); + + const UniValue result = RunCommandParseJSON(cmd_args, ""); if (!result.isArray()) { throw std::runtime_error(strprintf("'%s' received invalid response, expected array of signers", command)); } @@ -56,19 +59,19 @@ bool ExternalSigner::Enumerate(const std::string& command, std::vector command = Cat(m_command, Cat({"--stdin", "--fingerprint", m_fingerprint}, NetworkArg())); const std::string stdinStr = "signtx " + EncodeBase64(ssTx.str()); const UniValue signer_result = RunCommandParseJSON(command, stdinStr); diff --git a/src/external_signer.h b/src/external_signer.h index 1b36d49622e1..5ba37c0626b9 100644 --- a/src/external_signer.h +++ b/src/external_signer.h @@ -19,19 +19,19 @@ class ExternalSigner { private: //! The command which handles interaction with the external signer. - std::string m_command; + std::vector m_command; //! Bitcoin mainnet, testnet, etc std::string m_chain; - std::string NetworkArg() const; + std::vector NetworkArg() const; public: //! @param[in] command the command which handles interaction with the external signer //! @param[in] fingerprint master key fingerprint of the signer //! @param[in] chain "main", "test", "regtest" or "signet" //! @param[in] name device name - ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name); + ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name); //! Master key fingerprint of the signer std::string m_fingerprint; diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 671e119642f2..b84f0da08fda 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -211,7 +211,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg) } } } - auto hreq{std::make_unique(req, *static_cast(arg))}; + auto hreq{std::make_shared(req, *static_cast(arg))}; // Early address-based allow check if (!ClientAllowed(hreq->GetPeer())) { @@ -258,7 +258,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg) return; } - auto item = [req = std::move(hreq), in_path = std::move(path), fn = i->handler]() { + auto item = [req = hreq, in_path = std::move(path), fn = i->handler]() { std::string err_msg; try { fn(req.get(), in_path); @@ -276,7 +276,13 @@ static void http_request_cb(struct evhttp_request* req, void* arg) req->WriteReply(HTTP_INTERNAL_SERVER_ERROR, err_msg); }; - [[maybe_unused]] auto _{g_threadpool_http.Submit(std::move(item))}; + if (auto res = g_threadpool_http.Submit(std::move(item)); !res.has_value()) { + Assume(hreq.use_count() == 1); // ensure request will be deleted + // Both SubmitError::Inactive and SubmitError::Interrupted mean shutdown + LogWarning("HTTP request rejected during server shutdown: '%s'", SubmitErrorString(res.error())); + hreq->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Request rejected during server shutdown"); + return; + } } else { hreq->WriteReply(HTTP_NOT_FOUND); } @@ -410,8 +416,8 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) } LogDebug(BCLog::HTTP, "Initialized HTTP server\n"); - g_max_queue_depth = std::max((long)gArgs.GetIntArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); - LogDebug(BCLog::HTTP, "set work queue of depth %d\n", g_max_queue_depth); + g_max_queue_depth = std::max(gArgs.GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1); + LogDebug(BCLog::HTTP, "set work queue of depth %d", g_max_queue_depth); // transfer ownership to eventBase/HTTP via .release() eventBase = base_ctr.release(); @@ -431,8 +437,8 @@ static std::thread g_thread_http; void StartHTTPServer() { - int rpcThreads = std::max((long)gArgs.GetIntArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); - LogInfo("Starting HTTP server with %d worker threads\n", rpcThreads); + int rpcThreads = std::max(gArgs.GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1); + LogInfo("Starting HTTP server with %d worker threads", rpcThreads); g_threadpool_http.Start(rpcThreads); g_thread_http = std::thread(ThreadHTTP, eventBase); } diff --git a/src/index/base.h b/src/index/base.h index 8cb8ad8effea..d8fd85669335 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -122,9 +122,6 @@ class BaseIndex : public CValidationInterface void ChainStateFlushed(const kernel::ChainstateRole& role, const CBlockLocator& locator) override; - /// Return custom notification options for index. - [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; } - /// Initialize internal state from the database and block index. [[nodiscard]] virtual bool CustomInit(const std::optional& block) { return true; } @@ -151,6 +148,9 @@ class BaseIndex : public CValidationInterface /// Get the name of the index for display in logs. const std::string& GetName() const LIFETIMEBOUND { return m_name; } + /// Return custom notification options for index. + [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; } + /// Blocks the current thread until the index is caught up to the current /// state of the block chain. This only blocks if the index has gotten in /// sync once and only needs to process blocks in the ValidationInterface diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h index 96d393a3a975..0bb4a74e1256 100644 --- a/src/index/blockfilterindex.h +++ b/src/index/blockfilterindex.h @@ -63,8 +63,6 @@ class BlockFilterIndex final : public BaseIndex std::optional ReadFilterHeader(int height, const uint256& expected_block_hash); protected: - interfaces::Chain::NotifyOptions CustomOptions() override; - bool CustomInit(const std::optional& block) override; bool CustomCommit(CDBBatch& batch) override; @@ -80,6 +78,8 @@ class BlockFilterIndex final : public BaseIndex explicit BlockFilterIndex(std::unique_ptr chain, BlockFilterType filter_type, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); + interfaces::Chain::NotifyOptions CustomOptions() override; + BlockFilterType GetFilterType() const { return m_filter_type; } /** Get a single filter by block. */ diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h index 041c0b896a62..0e26fba56d95 100644 --- a/src/index/coinstatsindex.h +++ b/src/index/coinstatsindex.h @@ -52,8 +52,6 @@ class CoinStatsIndex final : public BaseIndex bool AllowPrune() const override { return true; } protected: - interfaces::Chain::NotifyOptions CustomOptions() override; - bool CustomInit(const std::optional& block) override; bool CustomCommit(CDBBatch& batch) override; @@ -68,6 +66,8 @@ class CoinStatsIndex final : public BaseIndex // Constructs the index, which becomes available to be queried. explicit CoinStatsIndex(std::unique_ptr chain, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); + interfaces::Chain::NotifyOptions CustomOptions() override; + // Look up stats for a specific block using CBlockIndex std::optional LookUpStats(const CBlockIndex& block_index) const; }; diff --git a/src/index/txospenderindex.cpp b/src/index/txospenderindex.cpp new file mode 100644 index 000000000000..d451bb1e0a49 --- /dev/null +++ b/src/index/txospenderindex.cpp @@ -0,0 +1,184 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* The database is used to find the spending transaction of a given utxo. + * For every input of every transaction it stores a key that is a pair(siphash(input outpoint), transaction location on disk) and an empty value. + * To find the spending transaction of an outpoint, we perform a range query on siphash(outpoint), and for each returned key load the transaction + * and return it if it does spend the provided outpoint. + */ + +// LevelDB key prefix. We only have one key for now but it will make it easier to add others if needed. +constexpr uint8_t DB_TXOSPENDERINDEX{'s'}; + +std::unique_ptr g_txospenderindex; + +struct DBKey { + uint64_t hash; + CDiskTxPos pos; + + explicit DBKey(const uint64_t& hash_in, const CDiskTxPos& pos_in) : hash(hash_in), pos(pos_in) {} + + SERIALIZE_METHODS(DBKey, obj) + { + uint8_t prefix{DB_TXOSPENDERINDEX}; + READWRITE(prefix); + if (prefix != DB_TXOSPENDERINDEX) { + throw std::ios_base::failure("Invalid format for spender index DB key"); + } + READWRITE(obj.hash); + READWRITE(obj.pos); + } +}; + +TxoSpenderIndex::TxoSpenderIndex(std::unique_ptr chain, size_t n_cache_size, bool f_memory, bool f_wipe) + : BaseIndex(std::move(chain), "txospenderindex"), m_db{std::make_unique(gArgs.GetDataDirNet() / "indexes" / "txospenderindex" / "db", n_cache_size, f_memory, f_wipe)} +{ + if (!m_db->Read("siphash_key", m_siphash_key)) { + FastRandomContext rng(false); + m_siphash_key = {rng.rand64(), rng.rand64()}; + m_db->Write("siphash_key", m_siphash_key, /*fSync=*/ true); + } +} + +interfaces::Chain::NotifyOptions TxoSpenderIndex::CustomOptions() +{ + interfaces::Chain::NotifyOptions options; + options.disconnect_data = true; + return options; +} + +static uint64_t CreateKeyPrefix(std::pair siphash_key, const COutPoint& vout) +{ + return PresaltedSipHasher(siphash_key.first, siphash_key.second)(vout.hash.ToUint256(), vout.n); +} + +static DBKey CreateKey(std::pair siphash_key, const COutPoint& vout, const CDiskTxPos& pos) +{ + return DBKey(CreateKeyPrefix(siphash_key, vout), pos); +} + +void TxoSpenderIndex::WriteSpenderInfos(const std::vector>& items) +{ + CDBBatch batch(*m_db); + for (const auto& [outpoint, pos] : items) { + DBKey key(CreateKey(m_siphash_key, outpoint, pos)); + // key is hash(spent outpoint) | disk pos, value is empty + batch.Write(key, ""); + } + m_db->WriteBatch(batch); +} + + +void TxoSpenderIndex::EraseSpenderInfos(const std::vector>& items) +{ + CDBBatch batch(*m_db); + for (const auto& [outpoint, pos] : items) { + batch.Erase(CreateKey(m_siphash_key, outpoint, pos)); + } + m_db->WriteBatch(batch); +} + +static std::vector> BuildSpenderPositions(const interfaces::BlockInfo& block) +{ + std::vector> items; + items.reserve(block.data->vtx.size()); + + CDiskTxPos pos({block.file_number, block.data_pos}, GetSizeOfCompactSize(block.data->vtx.size())); + for (const auto& tx : block.data->vtx) { + if (!tx->IsCoinBase()) { + for (const auto& input : tx->vin) { + items.emplace_back(input.prevout, pos); + } + } + pos.nTxOffset += ::GetSerializeSize(TX_WITH_WITNESS(*tx)); + } + + return items; +} + + +bool TxoSpenderIndex::CustomAppend(const interfaces::BlockInfo& block) +{ + WriteSpenderInfos(BuildSpenderPositions(block)); + return true; +} + +bool TxoSpenderIndex::CustomRemove(const interfaces::BlockInfo& block) +{ + EraseSpenderInfos(BuildSpenderPositions(block)); + return true; +} + +util::Expected TxoSpenderIndex::ReadTransaction(const CDiskTxPos& tx_pos) const +{ + AutoFile file{m_chainstate->m_blockman.OpenBlockFile(tx_pos, /*fReadOnly=*/true)}; + if (file.IsNull()) { + return util::Unexpected("cannot open block"); + } + CBlockHeader header; + TxoSpender spender; + try { + file >> header; + file.seek(tx_pos.nTxOffset, SEEK_CUR); + file >> TX_WITH_WITNESS(spender.tx); + spender.block_hash = header.GetHash(); + return spender; + } catch (const std::exception& e) { + return util::Unexpected(e.what()); + } +} + +util::Expected, std::string> TxoSpenderIndex::FindSpender(const COutPoint& txo) const +{ + const uint64_t prefix{CreateKeyPrefix(m_siphash_key, txo)}; + std::unique_ptr it(m_db->NewIterator()); + DBKey key(prefix, CDiskTxPos()); + + // find all keys that start with the outpoint hash, load the transaction at the location specified in the key + // and return it if it does spend the provided outpoint + for (it->Seek(std::pair{DB_TXOSPENDERINDEX, prefix}); it->Valid() && it->GetKey(key) && key.hash == prefix; it->Next()) { + if (const auto spender{ReadTransaction(key.pos)}) { + for (const auto& input : spender->tx->vin) { + if (input.prevout == txo) { + return std::optional{*spender}; + } + } + } else { + LogError("Deserialize or I/O error - %s", spender.error()); + return util::Unexpected{strprintf("IO error finding spending tx for outpoint %s:%d.", txo.hash.GetHex(), txo.n)}; + } + } + return util::Expected, std::string>(std::nullopt); +} + +BaseIndex::DB& TxoSpenderIndex::GetDB() const { return *m_db; } diff --git a/src/index/txospenderindex.h b/src/index/txospenderindex.h new file mode 100644 index 000000000000..dce1cec385d2 --- /dev/null +++ b/src/index/txospenderindex.h @@ -0,0 +1,66 @@ + +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_INDEX_TXOSPENDERINDEX_H +#define BITCOIN_INDEX_TXOSPENDERINDEX_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct CDiskTxPos; + +static constexpr bool DEFAULT_TXOSPENDERINDEX{false}; + +struct TxoSpender { + CTransactionRef tx; + uint256 block_hash; +}; + +/** + * TxoSpenderIndex is used to look up which transaction spent a given output. + * The index is written to a LevelDB database and, for each input of each transaction in a block, + * records the outpoint that is spent and the hash of the spending transaction. + */ +class TxoSpenderIndex final : public BaseIndex +{ +private: + std::unique_ptr m_db; + std::pair m_siphash_key; + bool AllowPrune() const override { return false; } + void WriteSpenderInfos(const std::vector>& items); + void EraseSpenderInfos(const std::vector>& items); + util::Expected ReadTransaction(const CDiskTxPos& pos) const; + +protected: + interfaces::Chain::NotifyOptions CustomOptions() override; + + bool CustomAppend(const interfaces::BlockInfo& block) override; + + bool CustomRemove(const interfaces::BlockInfo& block) override; + + BaseIndex::DB& GetDB() const override; + +public: + explicit TxoSpenderIndex(std::unique_ptr chain, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); + + util::Expected, std::string> FindSpender(const COutPoint& txo) const; +}; + +/// The global txo spender index. May be null. +extern std::unique_ptr g_txospenderindex; + + +#endif // BITCOIN_INDEX_TXOSPENDERINDEX_H diff --git a/src/init.cpp b/src/init.cpp index e6cc2045b4a9..6a6e7a925b20 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,10 @@ #include #endif +#ifdef ENABLE_EMBEDDED_ASMAP +#include +#endif + using common::AmountErrMsg; using common::InvalidPortErrMsg; using common::ResolveErrMsg; @@ -360,6 +365,7 @@ void Shutdown(NodeContext& node) // Stop and delete all indexes only after flushing background callbacks. for (auto* index : node.indexes) index->Stop(); if (g_txindex) g_txindex.reset(); + if (g_txospenderindex) g_txospenderindex.reset(); if (g_coin_stats_index) g_coin_stats_index.reset(); DestroyAllBlockFilterIndexes(); node.indexes.clear(); // all instances are nullptr now @@ -524,13 +530,20 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc) argsman.AddArg("-shutdownnotify=", "Execute command immediately before beginning shutdown. The need for shutdown may be urgent, so be careful not to delay it long (if the command doesn't require interaction with the server, consider having it fork into the background).", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #endif argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-txospenderindex", strprintf("Maintain a transaction output spender index, used by the gettxspendingprevout rpc call (default: %u)", DEFAULT_TXOSPENDERINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-blockfilterindex=", strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) + " If is not supplied or if = 1, indexes for all known types are enabled.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-addnode=", strprintf("Add a node to connect to and attempt to keep the connection open (see the addnode RPC help for more info). This option can be specified multiple times to add multiple nodes; connections are limited to %u at a time and are counted separately from the -maxconnections limit.", MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); - argsman.AddArg("-asmap=", "Specify asn mapping used for bucketing of the peers. Relative paths will be prefixed by the net-specific datadir location.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-asmap=", strprintf("Specify asn mapping used for bucketing of the peers. Relative paths will be prefixed by the net-specific datadir location.%s", + #ifdef ENABLE_EMBEDDED_ASMAP + " If a bool arg is given (-asmap or -asmap=1), the embedded mapping data in the binary will be used." + #else + "" + #endif + ), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-bantime=", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-bind=[:][=onion]", strprintf("Bind to given address and always listen on it (default: 0.0.0.0). Use [host]:port notation for IPv6. Append =onion to tag any incoming connections to that address and port as incoming Tor connections (default: 127.0.0.1:%u=onion, testnet3: 127.0.0.1:%u=onion, testnet4: 127.0.0.1:%u=onion, signet: 127.0.0.1:%u=onion, regtest: 127.0.0.1:%u=onion)", defaultChainParams->GetDefaultPort() + 1, testnetChainParams->GetDefaultPort() + 1, testnet4ChainParams->GetDefaultPort() + 1, signetChainParams->GetDefaultPort() + 1, regtestChainParams->GetDefaultPort() + 1), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); argsman.AddArg("-cjdnsreachable", "If set, then this host is configured for CJDNS (connecting to fc00::/8 addresses would lead us to the CJDNS network, see doc/cjdns.md) (default: 0)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -988,6 +1001,8 @@ bool AppInitParameterInteraction(const ArgsManager& args) if (args.GetIntArg("-prune", 0)) { if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); + if (args.GetBoolArg("-txospenderindex", DEFAULT_TXOSPENDERINDEX)) + return InitError(_("Prune mode is incompatible with -txospenderindex.")); if (args.GetBoolArg("-reindex-chainstate", false)) { return InitError(_("Prune mode is incompatible with -reindex-chainstate. Use full -reindex instead.")); } @@ -1191,8 +1206,10 @@ bool AppInitLockDirectories() bool AppInitInterfaces(NodeContext& node) { - node.chain = node.init->makeChain(); - node.mining = node.init->makeMining(); + node.chain = interfaces::MakeChain(node); + // Specify wait_loaded=false so internal mining interface can be initialized + // on early startup and does not need to be tied to chainstate loading. + node.mining = interfaces::MakeMining(node, /*wait_loaded=*/false); return true; } @@ -1289,16 +1306,12 @@ static ChainstateLoadResult InitAndLoadChainstate( const ArgsManager& args) { // This function may be called twice, so any dirty state must be reset. - node.notifications.reset(); // Drop state, such as a cached tip block + node.notifications->setChainstateLoaded(false); // Drop state, such as a cached tip block node.mempool.reset(); node.chainman.reset(); // Drop state, such as an initialized m_block_tree_db const CChainParams& chainparams = Params(); - Assert(!node.notifications); // Was reset above - node.notifications = std::make_unique(Assert(node.shutdown_request), node.exit_status, *Assert(node.warnings)); - ReadNotificationArgs(args, *node.notifications); - CTxMemPool::Options mempool_opts{ .check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0, .signals = node.validation_signals.get(), @@ -1399,6 +1412,7 @@ static ChainstateLoadResult InitAndLoadChainstate( std::tie(status, error) = catch_exceptions([&] { return VerifyLoadedChainstate(chainman, options); }); if (status == node::ChainstateLoadStatus::SUCCESS) { LogInfo("Block index and chainstate loaded"); + node.notifications->setChainstateLoaded(true); } } return {status, error}; @@ -1471,6 +1485,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.validation_signals = std::make_unique(std::make_unique(scheduler)); auto& validation_signals = *node.validation_signals; + // Create KernelNotifications object. Important to do this early before + // calling ipc->listenAddress() below so makeMining and other IPC methods + // can use this. + assert(!node.notifications); + node.notifications = std::make_unique(Assert(node.shutdown_request), node.exit_status, *Assert(node.warnings)); + ReadNotificationArgs(args, *node.notifications); + // Create client interfaces for wallets that are supposed to be loaded // according to -wallet and -disablewallet options. This only constructs // the interfaces, it doesn't load wallet data. Wallets actually get loaded @@ -1560,29 +1581,50 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) ApplyArgsManOptions(args, peerman_opts); { - // Read asmap file if configured and initialize + // Read asmap file if configured or embedded asmap data and initialize // Netgroupman with or without it assert(!node.netgroupman); if (args.IsArgSet("-asmap") && !args.IsArgNegated("-asmap")) { - fs::path asmap_path = args.GetPathArg("-asmap"); - if (asmap_path.empty()) { - InitError(_("-asmap requires a file path. Use -asmap=.")); - return false; - } - if (!asmap_path.is_absolute()) { - asmap_path = args.GetDataDirNet() / asmap_path; - } - if (!fs::exists(asmap_path)) { - InitError(strprintf(_("Could not find asmap file %s"), fs::quoted(fs::PathToString(asmap_path)))); - return false; - } - std::vector asmap{DecodeAsmap(asmap_path)}; - if (asmap.size() == 0) { - InitError(strprintf(_("Could not parse asmap file %s"), fs::quoted(fs::PathToString(asmap_path)))); - return false; + uint256 asmap_version{}; + if (!args.GetBoolArg("-asmap", false)) { + fs::path asmap_path = args.GetPathArg("-asmap"); + if (!asmap_path.is_absolute()) { + asmap_path = args.GetDataDirNet() / asmap_path; + } + + // If a specific path was passed with the asmap argument check if + // the file actually exists in that location + if (!fs::exists(asmap_path)) { + InitError(strprintf(_("Could not find asmap file %s"), fs::quoted(fs::PathToString(asmap_path)))); + return false; + } + + // If a file exists at the path, try to read the file + std::vector asmap{DecodeAsmap(asmap_path)}; + if (asmap.empty()) { + InitError(strprintf(_("Could not parse asmap file %s"), fs::quoted(fs::PathToString(asmap_path)))); + return false; + } + asmap_version = AsmapVersion(asmap); + node.netgroupman = std::make_unique(NetGroupManager::WithLoadedAsmap(std::move(asmap))); + } else { + #ifdef ENABLE_EMBEDDED_ASMAP + // Use the embedded asmap data + std::span asmap{node::data::ip_asn}; + if (asmap.empty() || !CheckStandardAsmap(asmap)) { + InitError(strprintf(_("Could not read embedded asmap data"))); + return false; + } + node.netgroupman = std::make_unique(NetGroupManager::WithEmbeddedAsmap(asmap)); + asmap_version = AsmapVersion(asmap); + LogInfo("Opened asmap data (%zu bytes) from embedded byte array\n", asmap.size()); + #else + // If there is no embedded data, fail and report it since + // the user tried to use it + InitError(strprintf(_("Embedded asmap data not available"))); + return false; + #endif } - const uint256 asmap_version = AsmapVersion(asmap); - node.netgroupman = std::make_unique(NetGroupManager::WithLoadedAsmap(std::move(asmap))); LogInfo("Using asmap version %s for IP bucketing", asmap_version.ToString()); } else { node.netgroupman = std::make_unique(NetGroupManager::NoAsmap()); @@ -1793,6 +1835,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { LogInfo("* Using %.1f MiB for transaction index database", index_cache_sizes.tx_index * (1.0 / 1024 / 1024)); } + if (args.GetBoolArg("-txospenderindex", DEFAULT_TXOSPENDERINDEX)) { + LogInfo("* Using %.1f MiB for transaction output spender index database", index_cache_sizes.txospender_index * (1.0 / 1024 / 1024)); + } for (BlockFilterType filter_type : g_enabled_filter_types) { LogInfo("* Using %.1f MiB for %s block filter index database", index_cache_sizes.filter_index * (1.0 / 1024 / 1024), BlockFilterTypeName(filter_type)); @@ -1861,6 +1906,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.indexes.emplace_back(g_txindex.get()); } + if (args.GetBoolArg("-txospenderindex", DEFAULT_TXOSPENDERINDEX)) { + g_txospenderindex = std::make_unique(interfaces::MakeChain(node), index_cache_sizes.txospender_index, false, do_reindex); + node.indexes.emplace_back(g_txospenderindex.get()); + } + for (const auto& filter_type : g_enabled_filter_types) { InitBlockFilterIndex([&]{ return interfaces::MakeChain(node); }, filter_type, index_cache_sizes.filter_index, false, do_reindex); node.indexes.emplace_back(GetBlockFilterIndex(filter_type)); @@ -2264,41 +2314,70 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) bool StartIndexBackgroundSync(NodeContext& node) { - // Find the oldest block among all indexes. - // This block is used to verify that we have the required blocks' data stored on disk, - // starting from that point up to the current tip. - // indexes_start_block='nullptr' means "start from height 0". - std::optional indexes_start_block; - std::string older_index_name; ChainstateManager& chainman = *Assert(node.chainman); const Chainstate& chainstate = WITH_LOCK(::cs_main, return chainman.ValidatedChainstate()); const CChain& index_chain = chainstate.m_chain; + const int current_height = WITH_LOCK(::cs_main, return index_chain.Height()); + + // Skip checking data availability if we have not synced any blocks yet + if (current_height > 0) { + // Before starting index sync, verify that all required block data is available + // on disk from each index's current sync position up to the chain tip. + // + // This is done separately for undo and block data: First we verify block + undo + // data existence from tip down to the lowest height required by any index that + // needs undo data (e.g., coinstatsindex, blockfilterindex). Then, if any + // block-only index needs to sync from a lower height than previously covered, + // verify block data existence down to that lower height. + // + // This avoids checking undo data for blocks where no index requires it, + // though currently block and undo data availability are synchronized on disk + // under normal circumstances. + std::optional block_start; + std::string block_start_name; + std::optional undo_start; + std::string undo_start_name; + + for (const auto& index : node.indexes) { + const IndexSummary& summary = index->GetSummary(); + if (summary.synced) continue; + + // Get the last common block between the index best block and the active chain + const CBlockIndex* pindex = nullptr; + { + LOCK(::cs_main); + pindex = chainman.m_blockman.LookupBlockIndex(summary.best_block_hash); + if (!index_chain.Contains(pindex)) { + pindex = index_chain.FindFork(pindex); + } + } + if (!pindex) { + pindex = index_chain.Genesis(); + } - for (auto index : node.indexes) { - const IndexSummary& summary = index->GetSummary(); - if (summary.synced) continue; + bool need_undo = index->CustomOptions().connect_undo_data; + auto& op_start_index = need_undo ? undo_start : block_start; + auto& name_index = need_undo ? undo_start_name : block_start_name; - // Get the last common block between the index best block and the active chain - LOCK(::cs_main); - const CBlockIndex* pindex = chainman.m_blockman.LookupBlockIndex(summary.best_block_hash); - if (!index_chain.Contains(pindex)) { - pindex = index_chain.FindFork(pindex); + if (op_start_index && pindex->nHeight >= op_start_index.value()->nHeight) continue; + op_start_index = pindex; + name_index = summary.name; } - if (!indexes_start_block || !pindex || pindex->nHeight < indexes_start_block.value()->nHeight) { - indexes_start_block = pindex; - older_index_name = summary.name; - if (!pindex) break; // Starting from genesis so no need to look for earlier block. + // Verify all blocks needed to sync to current tip are present including undo data. + if (undo_start) { + LOCK(::cs_main); + if (!chainman.m_blockman.CheckBlockDataAvailability(*index_chain.Tip(), *Assert(undo_start.value()), BlockStatus{BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO})) { + return InitError(Untranslated(strprintf("%s best block of the index goes beyond pruned data (including undo data). Please disable the index or reindex (which will download the whole blockchain again)", undo_start_name))); + } } - }; - // Verify all blocks needed to sync to current tip are present. - if (indexes_start_block) { - LOCK(::cs_main); - const CBlockIndex* start_block = *indexes_start_block; - if (!start_block) start_block = chainman.ActiveChain().Genesis(); - if (!chainman.m_blockman.CheckBlockDataAvailability(*index_chain.Tip(), *Assert(start_block))) { - return InitError(Untranslated(strprintf("%s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)", older_index_name))); + // Verify all blocks needed to sync to current tip are present unless we already checked all of them above. + if (block_start && !(undo_start && undo_start.value()->nHeight <= block_start.value()->nHeight)) { + LOCK(::cs_main); + if (!chainman.m_blockman.CheckBlockDataAvailability(*index_chain.Tip(), *Assert(block_start.value()), BlockStatus{BLOCK_HAVE_DATA})) { + return InitError(Untranslated(strprintf("%s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)", block_start_name))); + } } } diff --git a/src/interfaces/init.h b/src/interfaces/init.h index b909c9e6f6b0..463d43e7c2ea 100644 --- a/src/interfaces/init.h +++ b/src/interfaces/init.h @@ -39,6 +39,7 @@ class Init virtual Ipc* ipc() { return nullptr; } virtual bool canListenIpc() { return false; } virtual const char* exeName() { return nullptr; } + virtual void makeMiningOld2() { throw std::runtime_error("Old mining interface (@2) not supported. Please update your client!"); } }; //! Return implementation of Init interface for the node process. If the argv diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index 993f70bd7aaf..f4c42e204a7c 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -42,31 +42,9 @@ class BlockTemplate // Sigop cost per transaction, not including coinbase transaction. virtual std::vector getTxSigops() = 0; - /** - * Return serialized dummy coinbase transaction. - * - * @note deprecated: use getCoinbaseTx() - */ - virtual CTransactionRef getCoinbaseRawTx() = 0; - /** Return fields needed to construct a coinbase transaction */ virtual node::CoinbaseTx getCoinbaseTx() = 0; - /** - * Return scriptPubKey with SegWit OP_RETURN. - * - * @note deprecated: use getCoinbaseTx() - */ - virtual std::vector getCoinbaseCommitment() = 0; - - /** - * Return which output in the dummy coinbase contains the SegWit OP_RETURN. - * - * @note deprecated. Scan outputs from getCoinbaseTx() outputs field for the - * SegWit marker. - */ - virtual int getWitnessCommitmentIndex() = 0; - /** * Compute merkle path to the coinbase transaction * @@ -138,20 +116,28 @@ class Mining * @param[in] timeout how long to wait for a new tip (default is forever) * * @retval BlockRef hash and height of the current chain tip after this call. - * @retval std::nullopt if the node is shut down. + * @retval std::nullopt if the node is shut down or interrupt() is called. */ virtual std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; /** * Construct a new block template. * - * During node initialization, this will wait until the tip is connected. - * * @param[in] options options for creating the block + * @param[in] cooldown wait for tip to be connected and IBD to complete. + * If the best header is ahead of the tip, wait for the + * tip to catch up. It's recommended to disable this on + * regtest and signets with only one miner, as these + * could stall. * @retval BlockTemplate a block template. - * @retval std::nullptr if the node is shut down. + * @retval std::nullptr if the node is shut down or interrupt() is called. + */ + virtual std::unique_ptr createNewBlock(const node::BlockCreateOptions& options = {}, bool cooldown = true) = 0; + + /** + * Interrupts createNewBlock and waitTipChanged. */ - virtual std::unique_ptr createNewBlock(const node::BlockCreateOptions& options = {}) = 0; + virtual void interrupt() = 0; /** * Checks if a given block is valid. @@ -174,7 +160,11 @@ class Mining }; //! Return implementation of Mining interface. -std::unique_ptr MakeMining(node::NodeContext& node); +//! +//! @param[in] wait_loaded waits for chainstate data to be loaded before +//! returning. Used to prevent external clients from +//! being able to crash the node during startup. +std::unique_ptr MakeMining(node::NodeContext& node, bool wait_loaded=true); } // namespace interfaces diff --git a/src/ipc/capnp/init.capnp b/src/ipc/capnp/init.capnp index 64a7bf9b2b4a..a20ef2fcaf36 100644 --- a/src/ipc/capnp/init.capnp +++ b/src/ipc/capnp/init.capnp @@ -19,5 +19,8 @@ using Mining = import "mining.capnp"; interface Init $Proxy.wrap("interfaces::Init") { construct @0 (threadMap: Proxy.ThreadMap) -> (threadMap :Proxy.ThreadMap); makeEcho @1 (context :Proxy.Context) -> (result :Echo.Echo); - makeMining @2 (context :Proxy.Context) -> (result :Mining.Mining); + makeMining @3 (context :Proxy.Context) -> (result :Mining.Mining); + + # DEPRECATED: no longer supported; server returns an error. + makeMiningOld2 @2 () -> (); } diff --git a/src/ipc/capnp/mining.capnp b/src/ipc/capnp/mining.capnp index 25df11863315..64cad4d49f28 100644 --- a/src/ipc/capnp/mining.capnp +++ b/src/ipc/capnp/mining.capnp @@ -12,13 +12,19 @@ using Proxy = import "/mp/proxy.capnp"; $Proxy.include("interfaces/mining.h"); $Proxy.includeTypes("ipc/capnp/mining-types.h"); +const maxMoney :Int64 = 2100000000000000; +const maxDouble :Float64 = 1.7976931348623157e308; +const defaultBlockReservedWeight :UInt32 = 8000; +const defaultCoinbaseOutputMaxAdditionalSigops :UInt32 = 400; + interface Mining $Proxy.wrap("interfaces::Mining") { isTestChain @0 (context :Proxy.Context) -> (result: Bool); isInitialBlockDownload @1 (context :Proxy.Context) -> (result: Bool); getTip @2 (context :Proxy.Context) -> (result: Common.BlockRef, hasResult: Bool); - waitTipChanged @3 (context :Proxy.Context, currentTip: Data, timeout: Float64) -> (result: Common.BlockRef); - createNewBlock @4 (options: BlockCreateOptions) -> (result: BlockTemplate); - checkBlock @5 (block: Data, options: BlockCheckOptions) -> (reason: Text, debug: Text, result: Bool); + waitTipChanged @3 (context :Proxy.Context, currentTip: Data, timeout: Float64 = .maxDouble) -> (result: Common.BlockRef); + createNewBlock @4 (context :Proxy.Context, options: BlockCreateOptions, cooldown: Bool = true) -> (result: BlockTemplate); + checkBlock @5 (context :Proxy.Context, block: Data, options: BlockCheckOptions) -> (reason: Text, debug: Text, result: Bool); + interrupt @6 () -> (); } interface BlockTemplate $Proxy.wrap("interfaces::BlockTemplate") { @@ -27,30 +33,27 @@ interface BlockTemplate $Proxy.wrap("interfaces::BlockTemplate") { getBlock @2 (context: Proxy.Context) -> (result: Data); getTxFees @3 (context: Proxy.Context) -> (result: List(Int64)); getTxSigops @4 (context: Proxy.Context) -> (result: List(Int64)); - getCoinbaseRawTx @5 (context: Proxy.Context) -> (result: Data); - getCoinbaseTx @12 (context: Proxy.Context) -> (result: CoinbaseTx); - getCoinbaseCommitment @6 (context: Proxy.Context) -> (result: Data); - getWitnessCommitmentIndex @7 (context: Proxy.Context) -> (result: Int32); - getCoinbaseMerklePath @8 (context: Proxy.Context) -> (result: List(Data)); - submitSolution @9 (context: Proxy.Context, version: UInt32, timestamp: UInt32, nonce: UInt32, coinbase :Data) -> (result: Bool); - waitNext @10 (context: Proxy.Context, options: BlockWaitOptions) -> (result: BlockTemplate); - interruptWait @11() -> (); + getCoinbaseTx @5 (context: Proxy.Context) -> (result: CoinbaseTx); + getCoinbaseMerklePath @6 (context: Proxy.Context) -> (result: List(Data)); + submitSolution @7 (context: Proxy.Context, version: UInt32, timestamp: UInt32, nonce: UInt32, coinbase :Data) -> (result: Bool); + waitNext @8 (context: Proxy.Context, options: BlockWaitOptions) -> (result: BlockTemplate); + interruptWait @9() -> (); } struct BlockCreateOptions $Proxy.wrap("node::BlockCreateOptions") { - useMempool @0 :Bool $Proxy.name("use_mempool"); - blockReservedWeight @1 :UInt64 $Proxy.name("block_reserved_weight"); - coinbaseOutputMaxAdditionalSigops @2 :UInt64 $Proxy.name("coinbase_output_max_additional_sigops"); + useMempool @0 :Bool = true $Proxy.name("use_mempool"); + blockReservedWeight @1 :UInt64 = .defaultBlockReservedWeight $Proxy.name("block_reserved_weight"); + coinbaseOutputMaxAdditionalSigops @2 :UInt64 = .defaultCoinbaseOutputMaxAdditionalSigops $Proxy.name("coinbase_output_max_additional_sigops"); } struct BlockWaitOptions $Proxy.wrap("node::BlockWaitOptions") { - timeout @0 : Float64 $Proxy.name("timeout"); - feeThreshold @1 : Int64 $Proxy.name("fee_threshold"); + timeout @0 : Float64 = .maxDouble $Proxy.name("timeout"); + feeThreshold @1 : Int64 = .maxMoney $Proxy.name("fee_threshold"); } struct BlockCheckOptions $Proxy.wrap("node::BlockCheckOptions") { - checkMerkleRoot @0 :Bool $Proxy.name("check_merkle_root"); - checkPow @1 :Bool $Proxy.name("check_pow"); + checkMerkleRoot @0 :Bool = true $Proxy.name("check_merkle_root"); + checkPow @1 :Bool = true $Proxy.name("check_pow"); } struct CoinbaseTx $Proxy.wrap("node::CoinbaseTx") { diff --git a/src/ipc/test/ipc_test.cpp b/src/ipc/test/ipc_test.cpp index bec288813ad3..31607299ae72 100644 --- a/src/ipc/test/ipc_test.cpp +++ b/src/ipc/test/ipc_test.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -23,6 +24,11 @@ #include +static_assert(ipc::capnp::messages::MAX_MONEY == MAX_MONEY); +static_assert(ipc::capnp::messages::MAX_DOUBLE == std::numeric_limits::max()); +static_assert(ipc::capnp::messages::DEFAULT_BLOCK_RESERVED_WEIGHT == DEFAULT_BLOCK_RESERVED_WEIGHT); +static_assert(ipc::capnp::messages::DEFAULT_COINBASE_OUTPUT_MAX_ADDITIONAL_SIGOPS == DEFAULT_COINBASE_OUTPUT_MAX_ADDITIONAL_SIGOPS); + //! Remote init class. class TestInit : public interfaces::Init { diff --git a/src/ipc/test/ipc_tests.cpp b/src/ipc/test/ipc_tests.cpp index cc03904e28bc..ebe4b397afa1 100644 --- a/src/ipc/test/ipc_tests.cpp +++ b/src/ipc/test/ipc_tests.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/src/net.cpp b/src/net.cpp index c967de7094ee..4f88aa8aab99 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -390,8 +390,9 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, } } - LogDebug(BCLog::NET, "trying %s connection %s lastseen=%.1fhrs\n", + LogDebug(BCLog::NET, "trying %s connection (%s) to %s, lastseen=%.1fhrs\n", use_v2transport ? "v2" : "v1", + ConnectionTypeAsString(conn_type), pszDest ? pszDest : addrConnect.ToStringAddrPort(), Ticks(pszDest ? 0h : Now() - addrConnect.nTime)); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index e5b4bc7772df..4450ff23f55e 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -542,6 +542,8 @@ class PeerManagerImpl final : public PeerManager bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); std::vector GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + std::vector GetPrivateBroadcastInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + std::vector AbortPrivateBroadcast(const uint256& id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void InitiateTxBroadcastToAll(const Txid& txid, const Wtxid& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void InitiateTxBroadcastPrivate(const CTransactionRef& tx) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); @@ -1855,6 +1857,31 @@ PeerManagerInfo PeerManagerImpl::GetInfo() const }; } +std::vector PeerManagerImpl::GetPrivateBroadcastInfo() const +{ + return m_tx_for_private_broadcast.GetBroadcastInfo(); +} + +std::vector PeerManagerImpl::AbortPrivateBroadcast(const uint256& id) +{ + const auto snapshot{m_tx_for_private_broadcast.GetBroadcastInfo()}; + std::vector removed_txs; + + size_t connections_cancelled{0}; + for (const auto& [tx, _] : snapshot) { + if (tx->GetHash().ToUint256() != id && tx->GetWitnessHash().ToUint256() != id) continue; + if (const auto peer_acks{m_tx_for_private_broadcast.Remove(tx)}) { + removed_txs.push_back(tx); + if (NUM_PRIVATE_BROADCAST_PER_TX > *peer_acks) { + connections_cancelled += (NUM_PRIVATE_BROADCAST_PER_TX - *peer_acks); + } + } + } + m_connman.m_private_broadcast.NumToOpenSub(connections_cancelled); + + return removed_txs; +} + void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) { if (m_opts.max_extra_txs <= 0) @@ -2057,12 +2084,12 @@ void PeerManagerImpl::BlockConnected( } // The following task can be skipped since we don't maintain a mempool for - // the historical chainstate. - if (role.historical) { - return; + // the historical chainstate, or during ibd since we don't receive incoming + // transactions from peers into the mempool. + if (!role.historical && !m_chainman.IsInitialBlockDownload()) { + LOCK(m_tx_download_mutex); + m_txdownloadman.BlockConnected(pblock); } - LOCK(m_tx_download_mutex); - m_txdownloadman.BlockConnected(pblock); } void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) @@ -3531,7 +3558,7 @@ void PeerManagerImpl::PushPrivateBroadcastTx(CNode& node) { Assume(node.IsPrivateBroadcastConn()); - const auto opt_tx{m_tx_for_private_broadcast.PickTxForSend(node.GetId())}; + const auto opt_tx{m_tx_for_private_broadcast.PickTxForSend(node.GetId(), CService{node.addr})}; if (!opt_tx) { LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: no more transactions for private broadcast (connected in vain), peer=%d%s", node.GetId(), node.LogIP(fLogIPs)); node.fDisconnect = true; diff --git a/src/net_processing.h b/src/net_processing.h index 504e708d702b..36ae021f679d 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -9,8 +9,10 @@ #include #include #include +#include #include #include +#include #include #include @@ -118,6 +120,21 @@ class PeerManager : public CValidationInterface, public NetEventsInterface /** Get peer manager info. */ virtual PeerManagerInfo GetInfo() const = 0; + /** Get info about transactions currently being privately broadcast. */ + virtual std::vector GetPrivateBroadcastInfo() const = 0; + + /** + * Abort private broadcast attempts for transactions currently being privately broadcast. + * + * @param[in] id A transaction identifier. It will be matched against both txid and wtxid for + * all transactions in the private broadcast queue. + * + * @return Transactions removed from the private broadcast queue. If the provided id matches a + * txid that corresponds to multiple transactions with different wtxids, multiple + * transactions may be returned. + */ + virtual std::vector AbortPrivateBroadcast(const uint256& id) = 0; + /** * Initiate a transaction broadcast to eligible peers. * Queue the witness transaction id to `Peer::TxRelay::m_tx_inventory_to_send` diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index ddaaad19c767..ab6d23ec074d 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -487,10 +487,18 @@ bool BlockManager::LoadBlockIndex(const std::optional& snapshot_blockha pindex->m_chain_tx_count = pindex->nTx; } } - if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) { - pindex->nStatus |= BLOCK_FAILED_CHILD; + + if (pindex->nStatus & BLOCK_FAILED_CHILD) { + // BLOCK_FAILED_CHILD is deprecated, but may still exist on disk. Replace it with BLOCK_FAILED_VALID. + pindex->nStatus = (pindex->nStatus & ~BLOCK_FAILED_CHILD) | BLOCK_FAILED_VALID; + m_dirty_blockindex.insert(pindex); + } + if (!(pindex->nStatus & BLOCK_FAILED_VALID) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_VALID)) { + // All descendants of invalid blocks are invalid too. + pindex->nStatus |= BLOCK_FAILED_VALID; m_dirty_blockindex.insert(pindex); } + if (pindex->pprev) { pindex->BuildSkip(); } @@ -623,10 +631,18 @@ const CBlockIndex& BlockManager::GetFirstBlock(const CBlockIndex& upper_block, u return *last_block; } -bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block) +bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block, BlockStatus block_status) { - if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false; - return &GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block; + if (!(upper_block.nStatus & block_status)) return false; + const auto& first_block = GetFirstBlock(upper_block, block_status, &lower_block); + // Special case: the genesis block has no undo data + if (block_status & BLOCK_HAVE_UNDO && lower_block.nHeight == 0 && first_block.nHeight == 1) { + // This might indicate missing data, or it could simply reflect the expected absence of undo data for the genesis block. + // To distinguish between the two, check if all required block data *except* undo is available up to the genesis block. + BlockStatus flags{block_status & ~BLOCK_HAVE_UNDO}; + return first_block.pprev && first_block.pprev->nStatus & flags; + } + return &first_block == &lower_block; } // If we're using -prune with -reindex, then delete block files that will be ignored by the diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 8c8a6d2c743f..4fefa86a61fe 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -412,10 +412,11 @@ class BlockManager /** Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); - //! Check if all blocks in the [upper_block, lower_block] range have data available. + //! Check if all blocks in the [upper_block, lower_block] range have data available as + //! defined by the status mask. //! The caller is responsible for ensuring that lower_block is an ancestor of upper_block //! (part of the same chain). - bool CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block, BlockStatus block_status = BLOCK_HAVE_DATA) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * @brief Returns the earliest block with specified `status_mask` flags set after diff --git a/src/node/caches.cpp b/src/node/caches.cpp index ecff3c628369..5e285e8907bc 100644 --- a/src/node/caches.cpp +++ b/src/node/caches.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,8 @@ static constexpr size_t MAX_TX_INDEX_CACHE{1024_MiB}; //! Max memory allocated to all block filter index caches combined in bytes. static constexpr size_t MAX_FILTER_INDEX_CACHE{1024_MiB}; +//! Max memory allocated to tx spenderindex DB specific cache in bytes. +static constexpr size_t MAX_TXOSPENDER_INDEX_CACHE{1024_MiB}; //! Maximum dbcache size on 32-bit systems. static constexpr size_t MAX_32BIT_DBCACHE{1024_MiB}; @@ -44,6 +47,8 @@ CacheSizes CalculateCacheSizes(const ArgsManager& args, size_t n_indexes) IndexCacheSizes index_sizes; index_sizes.tx_index = std::min(total_cache / 8, args.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? MAX_TX_INDEX_CACHE : 0); total_cache -= index_sizes.tx_index; + index_sizes.txospender_index = std::min(total_cache / 8, args.GetBoolArg("-txospenderindex", DEFAULT_TXOSPENDERINDEX) ? MAX_TXOSPENDER_INDEX_CACHE : 0); + total_cache -= index_sizes.txospender_index; if (n_indexes > 0) { size_t max_cache = std::min(total_cache / 8, MAX_FILTER_INDEX_CACHE); index_sizes.filter_index = max_cache / n_indexes; diff --git a/src/node/caches.h b/src/node/caches.h index 2cf526b298e3..8ea2fbffc0b4 100644 --- a/src/node/caches.h +++ b/src/node/caches.h @@ -21,6 +21,7 @@ namespace node { struct IndexCacheSizes { size_t tx_index{0}; size_t filter_index{0}; + size_t txospender_index{0}; }; struct CacheSizes { IndexCacheSizes index; diff --git a/src/node/data/ip_asn.dat b/src/node/data/ip_asn.dat new file mode 100644 index 000000000000..9fad606c3b19 Binary files /dev/null and b/src/node/data/ip_asn.dat differ diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index af9388d4b9f1..6c61b2105d9b 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -890,26 +890,11 @@ class BlockTemplateImpl : public BlockTemplate return m_block_template->vTxSigOpsCost; } - CTransactionRef getCoinbaseRawTx() override - { - return m_block_template->block.vtx[0]; - } - CoinbaseTx getCoinbaseTx() override { return m_block_template->m_coinbase_tx; } - std::vector getCoinbaseCommitment() override - { - return m_block_template->vchCoinbaseCommitment; - } - - int getWitnessCommitmentIndex() override - { - return GetWitnessCommitmentIndex(m_block_template->block); - } - std::vector getCoinbaseMerklePath() override { return TransactionMerklePath(m_block_template->block, 0); @@ -965,10 +950,10 @@ class MinerImpl : public Mining std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override { - return WaitTipChanged(chainman(), notifications(), current_tip, timeout); + return WaitTipChanged(chainman(), notifications(), current_tip, timeout, m_interrupt_mining); } - std::unique_ptr createNewBlock(const BlockCreateOptions& options) override + std::unique_ptr createNewBlock(const BlockCreateOptions& options, bool cooldown) override { // Reject too-small values instead of clamping so callers don't silently // end up mining with different options than requested. This matches the @@ -981,13 +966,35 @@ class MinerImpl : public Mining } // Ensure m_tip_block is set so consumers of BlockTemplate can rely on that. - if (!waitTipChanged(uint256::ZERO, MillisecondsDouble::max())) return {}; + std::optional maybe_tip{waitTipChanged(uint256::ZERO, MillisecondsDouble::max())}; + + if (!maybe_tip) return {}; + + if (cooldown) { + // Do not return a template during IBD, because it can have long + // pauses and sometimes takes a while to get started. Although this + // is useful in general, it's gated behind the cooldown argument, + // because on regtest and single miner signets this would wait + // forever if no block was mined in the past day. + while (chainman().IsInitialBlockDownload()) { + maybe_tip = waitTipChanged(maybe_tip->hash, MillisecondsDouble{1000}); + if (!maybe_tip || chainman().m_interrupt || WITH_LOCK(notifications().m_tip_block_mutex, return m_interrupt_mining)) return {}; + } + + // Also wait during the final catch-up moments after IBD. + if (!CooldownIfHeadersAhead(chainman(), notifications(), *maybe_tip, m_interrupt_mining)) return {}; + } BlockAssembler::Options assemble_options{options}; ApplyArgsManOptions(*Assert(m_node.args), assemble_options); return std::make_unique(assemble_options, BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(), m_node); } + void interrupt() override + { + InterruptWait(notifications(), m_interrupt_mining); + } + bool checkBlock(const CBlock& block, const node::BlockCheckOptions& options, std::string& reason, std::string& debug) override { LOCK(chainman().GetMutex()); @@ -1000,6 +1007,8 @@ class MinerImpl : public Mining NodeContext* context() override { return &m_node; } ChainstateManager& chainman() { return *Assert(m_node.chainman); } KernelNotifications& notifications() { return *Assert(m_node.notifications); } + // Treat as if guarded by notifications().m_tip_block_mutex + bool m_interrupt_mining{false}; NodeContext& m_node; }; } // namespace @@ -1008,5 +1017,17 @@ class MinerImpl : public Mining namespace interfaces { std::unique_ptr MakeNode(node::NodeContext& context) { return std::make_unique(context); } std::unique_ptr MakeChain(node::NodeContext& context) { return std::make_unique(context); } -std::unique_ptr MakeMining(node::NodeContext& context) { return std::make_unique(context); } +std::unique_ptr MakeMining(node::NodeContext& context, bool wait_loaded) +{ + if (wait_loaded) { + node::KernelNotifications& kernel_notifications(*Assert(context.notifications)); + util::SignalInterrupt& interrupt(*Assert(context.shutdown_signal)); + WAIT_LOCK(kernel_notifications.m_tip_block_mutex, lock); + kernel_notifications.m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) { + return kernel_notifications.m_state.chainstate_loaded || interrupt; + }); + if (interrupt) return nullptr; + } + return std::make_unique(context); +} } // namespace interfaces diff --git a/src/node/kernel_notifications.cpp b/src/node/kernel_notifications.cpp index f207b49a96b0..ab0e5ccb69e6 100644 --- a/src/node/kernel_notifications.cpp +++ b/src/node/kernel_notifications.cpp @@ -53,7 +53,7 @@ kernel::InterruptResult KernelNotifications::blockTip(SynchronizationState state { LOCK(m_tip_block_mutex); Assume(index.GetBlockHash() != uint256::ZERO); - m_tip_block = index.GetBlockHash(); + m_state.tip_block = index.GetBlockHash(); m_tip_block_cv.notify_all(); } @@ -103,7 +103,7 @@ void KernelNotifications::fatalError(const bilingual_str& message) std::optional KernelNotifications::TipBlock() { AssertLockHeld(m_tip_block_mutex); - return m_tip_block; + return m_state.tip_block; }; diff --git a/src/node/kernel_notifications.h b/src/node/kernel_notifications.h index e9f1e8f1889b..b90248bf0ab0 100644 --- a/src/node/kernel_notifications.h +++ b/src/node/kernel_notifications.h @@ -29,6 +29,18 @@ namespace node { class Warnings; static constexpr int DEFAULT_STOPATHEIGHT{0}; +//! State tracked by the KernelNotifications interface meant to be used by +//! mining code, index code, RPCs, and other code sitting above the validation +//! layer. +//! +//! Currently just tracks the chain tip, but could be used to hold other +//! information in the future, like the last flushed block, pruning +//! information, etc. +struct KernelState { + bool chainstate_loaded{false}; + std::optional tip_block; +}; + class KernelNotifications : public kernel::Notifications { public: @@ -49,6 +61,13 @@ class KernelNotifications : public kernel::Notifications void fatalError(const bilingual_str& message) override; + void setChainstateLoaded(bool chainstate_loaded) EXCLUSIVE_LOCKS_REQUIRED(!m_tip_block_mutex) { + LOCK(m_tip_block_mutex); + if (!chainstate_loaded) m_state = {}; + m_state.chainstate_loaded = chainstate_loaded; + m_tip_block_cv.notify_all(); + } + //! Block height after which blockTip notification will return Interrupted{}, if >0. int m_stop_at_height{DEFAULT_STOPATHEIGHT}; //! Useful for tests, can be set to false to avoid shutdown on fatal error. @@ -56,6 +75,7 @@ class KernelNotifications : public kernel::Notifications Mutex m_tip_block_mutex; std::condition_variable m_tip_block_cv GUARDED_BY(m_tip_block_mutex); + KernelState m_state GUARDED_BY(m_tip_block_mutex); //! The block for which the last blockTip notification was received. //! It's first set when the tip is connected during node initialization. //! Might be unset during an early shutdown. @@ -65,8 +85,6 @@ class KernelNotifications : public kernel::Notifications const std::function& m_shutdown_request; std::atomic& m_exit_status; node::Warnings& m_warnings; - - std::optional m_tip_block GUARDED_BY(m_tip_block_mutex); }; void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications); diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 7bdd8b690d7c..7ea8d10b7306 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -197,7 +197,7 @@ std::unique_ptr BlockAssembler::CreateNewBlock() coinbase_tx.lock_time = coinbaseTx.nLockTime; pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx)); - pblocktemplate->vchCoinbaseCommitment = m_chainstate.m_chainman.GenerateCoinbaseCommitment(*pblock, pindexPrev); + m_chainstate.m_chainman.GenerateCoinbaseCommitment(*pblock, pindexPrev); const CTransactionRef& final_coinbase{pblock->vtx[0]}; if (final_coinbase->HasWitness()) { @@ -455,7 +455,41 @@ std::optional GetTip(ChainstateManager& chainman) return BlockRef{tip->GetBlockHash(), tip->nHeight}; } -std::optional WaitTipChanged(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const uint256& current_tip, MillisecondsDouble& timeout) +bool CooldownIfHeadersAhead(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const BlockRef& last_tip, bool& interrupt_mining) +{ + uint256 last_tip_hash{last_tip.hash}; + + while (const std::optional remaining = chainman.BlocksAheadOfTip()) { + const int cooldown_seconds = std::clamp(*remaining, 3, 20); + const auto cooldown_deadline{MockableSteadyClock::now() + std::chrono::seconds{cooldown_seconds}}; + + { + WAIT_LOCK(kernel_notifications.m_tip_block_mutex, lock); + kernel_notifications.m_tip_block_cv.wait_until(lock, cooldown_deadline, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) { + const auto tip_block = kernel_notifications.TipBlock(); + return chainman.m_interrupt || interrupt_mining || (tip_block && *tip_block != last_tip_hash); + }); + if (chainman.m_interrupt || interrupt_mining) { + interrupt_mining = false; + return false; + } + + // If the tip changed during the wait, extend the deadline + const auto tip_block = kernel_notifications.TipBlock(); + if (tip_block && *tip_block != last_tip_hash) { + last_tip_hash = *tip_block; + continue; + } + } + + // No tip change and the cooldown window has expired. + if (MockableSteadyClock::now() >= cooldown_deadline) break; + } + + return true; +} + +std::optional WaitTipChanged(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const uint256& current_tip, MillisecondsDouble& timeout, bool& interrupt) { Assume(timeout >= 0ms); // No internal callers should use a negative timeout if (timeout < 0ms) timeout = 0ms; @@ -468,16 +502,22 @@ std::optional WaitTipChanged(ChainstateManager& chainman, KernelNotifi // always returns valid tip information when possible and only // returns null when shutting down, not when timing out. kernel_notifications.m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) { - return kernel_notifications.TipBlock() || chainman.m_interrupt; + return kernel_notifications.TipBlock() || chainman.m_interrupt || interrupt; }); - if (chainman.m_interrupt) return {}; + if (chainman.m_interrupt || interrupt) { + interrupt = false; + return {}; + } // At this point TipBlock is set, so continue to wait until it is // different then `current_tip` provided by caller. kernel_notifications.m_tip_block_cv.wait_until(lock, deadline, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) { - return Assume(kernel_notifications.TipBlock()) != current_tip || chainman.m_interrupt; + return Assume(kernel_notifications.TipBlock()) != current_tip || chainman.m_interrupt || interrupt; }); + if (chainman.m_interrupt || interrupt) { + interrupt = false; + return {}; + } } - if (chainman.m_interrupt) return {}; // Must release m_tip_block_mutex before getTip() locks cs_main, to // avoid deadlocks. diff --git a/src/node/miner.h b/src/node/miner.h index 0c268f1826d5..5c8668771f59 100644 --- a/src/node/miner.h +++ b/src/node/miner.h @@ -46,7 +46,6 @@ struct CBlockTemplate std::vector vTxFees; // Sigops per transaction, not including coinbase transaction (unlike CBlock::vtx). std::vector vTxSigOpsCost; - std::vector vchCoinbaseCommitment; /* A vector of package fee rates, ordered by the sequence in which * packages are selected for inclusion in the block template.*/ std::vector m_package_feerates; @@ -142,7 +141,7 @@ void ApplyArgsManOptions(const ArgsManager& gArgs, BlockAssembler::Options& opti void AddMerkleRootAndCoinbase(CBlock& block, CTransactionRef coinbase, uint32_t version, uint32_t timestamp, uint32_t nonce); -/* Interrupt the current wait for the next block template. */ +/* Interrupt a blocking call. */ void InterruptWait(KernelNotifications& kernel_notifications, bool& interrupt_wait); /** * Return a new block template when fees rise to a certain threshold or after a @@ -160,9 +159,31 @@ std::unique_ptr WaitAndCreateNewBlock(ChainstateManager& chainma std::optional GetTip(ChainstateManager& chainman); /* Waits for the connected tip to change until timeout has elapsed. During node initialization, this will wait until the tip is connected (regardless of `timeout`). - * Returns the current tip, or nullopt if the node is shutting down. */ -std::optional WaitTipChanged(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const uint256& current_tip, MillisecondsDouble& timeout); + * Returns the current tip, or nullopt if the node is shutting down or interrupt() + * is called. + */ +std::optional WaitTipChanged(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const uint256& current_tip, MillisecondsDouble& timeout, bool& interrupt); +/** + * Wait while the best known header extends the current chain tip AND at least + * one block is being added to the tip every 3 seconds. If the tip is + * sufficiently far behind, allow up to 20 seconds for the next tip update. + * + * It’s not safe to keep waiting, because a malicious miner could announce a + * header and delay revealing the block, causing all other miners using this + * software to stall. At the same time, we need to balance between the default + * waiting time being brief, but not ending the cooldown prematurely when a + * random block is slow to download (or process). + * + * The cooldown only applies to createNewBlock(), which is typically called + * once per connected client. Subsequent templates are provided by waitNext(). + * + * @param last_tip tip at the start of the cooldown window. + * @param interrupt_mining set to true to interrupt the cooldown. + * + * @returns false if interrupted. + */ +bool CooldownIfHeadersAhead(ChainstateManager& chainman, KernelNotifications& kernel_notifications, const BlockRef& last_tip, bool& interrupt_mining); } // namespace node #endif // BITCOIN_NODE_MINER_H diff --git a/src/node/types.h b/src/node/types.h index e3ee05dd0d94..01e74528e06f 100644 --- a/src/node/types.h +++ b/src/node/types.h @@ -55,7 +55,7 @@ struct BlockCreateOptions { * The maximum additional sigops which the pool will add in coinbase * transaction outputs. */ - size_t coinbase_output_max_additional_sigops{400}; + size_t coinbase_output_max_additional_sigops{DEFAULT_COINBASE_OUTPUT_MAX_ADDITIONAL_SIGOPS}; /** * Script to put in the coinbase transaction. The default is an * anyone-can-spend dummy. diff --git a/src/policy/ephemeral_policy.h b/src/policy/ephemeral_policy.h index 33876b49e78a..8ac060ee6780 100644 --- a/src/policy/ephemeral_policy.h +++ b/src/policy/ephemeral_policy.h @@ -41,13 +41,13 @@ class TxValidationState; /* All the following checks are only called if standardness rules are being applied. */ -/** Must be called for each transaction once transaction fees are known. +/** Called for each transaction once transaction fees are known. * Does context-less checks about a single transaction. * @returns false if the fee is non-zero and dust exists, populating state. True otherwise. */ bool PreCheckEphemeralTx(const CTransaction& tx, CFeeRate dust_relay_rate, CAmount base_fee, CAmount mod_fee, TxValidationState& state); -/** Must be called for each transaction(package) if any dust is in the package. +/** Called for each transaction(package) if any dust is in the package. * Checks that each transaction's parents have their dust spent by the child, * where parents are either in the mempool or in the package itself. * Sets out_child_state and out_child_wtxid on failure. diff --git a/src/policy/feerate.cpp b/src/policy/feerate.cpp index f74da8a7e228..f62835acbcf2 100644 --- a/src/policy/feerate.cpp +++ b/src/policy/feerate.cpp @@ -26,11 +26,12 @@ CAmount CFeeRate::GetFee(int32_t virtual_bytes) const return nFee; } -std::string CFeeRate::ToString(const FeeEstimateMode& fee_estimate_mode) const +std::string CFeeRate::ToString(FeeRateFormat fee_rate_format) const { - const CAmount feerate_per_kvb = GetFeePerK(); - switch (fee_estimate_mode) { - case FeeEstimateMode::SAT_VB: return strprintf("%d.%03d %s/vB", feerate_per_kvb / 1000, feerate_per_kvb % 1000, CURRENCY_ATOM); - default: return strprintf("%d.%08d %s/kvB", feerate_per_kvb / COIN, feerate_per_kvb % COIN, CURRENCY_UNIT); - } + const CAmount feerate_per_kvb{GetFeePerK()}; + switch (fee_rate_format) { + case FeeRateFormat::BTC_KVB: return strprintf("%d.%08d %s/kvB", feerate_per_kvb / COIN, feerate_per_kvb % COIN, CURRENCY_UNIT); + case FeeRateFormat::SAT_VB: return strprintf("%d.%03d %s/vB", feerate_per_kvb / 1000, feerate_per_kvb % 1000, CURRENCY_ATOM); + } // no default case, so the compiler can warn about missing cases + assert(false); } diff --git a/src/policy/feerate.h b/src/policy/feerate.h index 5994fe9962bf..f6b49a1465b3 100644 --- a/src/policy/feerate.h +++ b/src/policy/feerate.h @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -18,13 +19,9 @@ const std::string CURRENCY_UNIT = "BTC"; // One formatted unit const std::string CURRENCY_ATOM = "sat"; // One indivisible minimum value unit -/* Used to determine type of fee estimation requested */ -enum class FeeEstimateMode { - UNSET, //!< Use default settings based on other criteria - ECONOMICAL, //!< Force estimateSmartFee to use non-conservative estimates - CONSERVATIVE, //!< Force estimateSmartFee to use conservative estimates - BTC_KVB, //!< Use BTC/kvB fee rate unit - SAT_VB, //!< Use sat/vB fee rate unit +enum class FeeRateFormat { + BTC_KVB, //!< Use BTC/kvB fee rate unit + SAT_VB, //!< Use sat/vB fee rate unit }; /** @@ -75,7 +72,7 @@ class CFeeRate m_feerate = FeePerVSize(GetFeePerK() + a.GetFeePerK(), 1000); return *this; } - std::string ToString(const FeeEstimateMode& fee_estimate_mode = FeeEstimateMode::BTC_KVB) const; + std::string ToString(FeeRateFormat fee_rate_format = FeeRateFormat::BTC_KVB) const; friend CFeeRate operator*(const CFeeRate& f, int a) { return CFeeRate(a * f.m_feerate.fee, f.m_feerate.size); } friend CFeeRate operator*(int a, const CFeeRate& f) { return CFeeRate(a * f.m_feerate.fee, f.m_feerate.size); } diff --git a/src/policy/fees/block_policy_estimator.cpp b/src/policy/fees/block_policy_estimator.cpp index 423daba1c8f4..84941b6edfd4 100644 --- a/src/policy/fees/block_policy_estimator.cpp +++ b/src/policy/fees/block_policy_estimator.cpp @@ -33,8 +33,8 @@ #include // The current format written, and the version required to read. Must be -// increased to at least 289900+1 on the next breaking change. -constexpr int CURRENT_FEES_FILE_VERSION{149900}; +// increased to at least 309900+1 on the next breaking change. +constexpr int CURRENT_FEES_FILE_VERSION{309900}; static constexpr double INF_FEERATE = 1e99; @@ -980,7 +980,6 @@ bool CBlockPolicyEstimator::Write(AutoFile& fileout) const try { LOCK(m_cs_fee_estimator); fileout << CURRENT_FEES_FILE_VERSION; - fileout << int{0}; // Unused dummy field. Written files may contain any value in [0, 289900] fileout << nBestSeenHeight; if (BlockSpan() > HistoricalBlockSpan()/2) { fileout << firstRecordedHeight << nBestSeenHeight; @@ -1004,8 +1003,8 @@ bool CBlockPolicyEstimator::Read(AutoFile& filein) { try { LOCK(m_cs_fee_estimator); - int nVersionRequired, dummy; - filein >> nVersionRequired >> dummy; + int nVersionRequired; + filein >> nVersionRequired; if (nVersionRequired > CURRENT_FEES_FILE_VERSION) { throw std::runtime_error{strprintf("File version (%d) too high to be read.", nVersionRequired)}; } diff --git a/src/policy/fees/block_policy_estimator.h b/src/policy/fees/block_policy_estimator.h index 505eed0867d3..026bf1eb8420 100644 --- a/src/policy/fees/block_policy_estimator.h +++ b/src/policy/fees/block_policy_estimator.h @@ -64,7 +64,6 @@ enum class FeeReason { DOUBLE_ESTIMATE, CONSERVATIVE, MEMPOOL_MIN, - PAYTXFEE, FALLBACK, REQUIRED, }; @@ -181,13 +180,15 @@ class CBlockPolicyEstimator : public CValidationInterface static constexpr double SUFFICIENT_TXS_SHORT = 0.5; /** Minimum and Maximum values for tracking feerates - * The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate we - * might ever want to track. Historically this has been 1000 since it was - * inheriting DEFAULT_MIN_RELAY_TX_FEE and changing it is disruptive as it - * invalidates old estimates files. So leave it at 1000 unless it becomes - * necessary to lower it, and then lower it substantially. + * The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate. + * MIN_BUCKET_FEERATE has historically inherited DEFAULT_MIN_RELAY_TX_FEE. + * It is hardcoded because changing it is disruptive, as it invalidates existing fee + * estimate files. + * + * Whenever DEFAULT_MIN_RELAY_TX_FEE changes, this value should be updated + * accordingly. At the same time CURRENT_FEES_FILE_VERSION should be bumped. */ - static constexpr double MIN_BUCKET_FEERATE = 1000; + static constexpr double MIN_BUCKET_FEERATE = 100; static constexpr double MAX_BUCKET_FEERATE = 1e7; /** Spacing of FeeRate buckets diff --git a/src/policy/policy.h b/src/policy/policy.h index 35189d71335a..0bc7b3277083 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -24,6 +24,8 @@ class CScript; static constexpr unsigned int DEFAULT_BLOCK_MAX_WEIGHT{MAX_BLOCK_WEIGHT}; /** Default for -blockreservedweight **/ static constexpr unsigned int DEFAULT_BLOCK_RESERVED_WEIGHT{8000}; +/** Default sigops cost to reserve for coinbase transaction outputs when creating block templates. */ +static constexpr unsigned int DEFAULT_COINBASE_OUTPUT_MAX_ADDITIONAL_SIGOPS{400}; /** This accounts for the block header, var_int encoding of the transaction count and a minimally viable * coinbase transaction. It adds an additional safety margin, because even with a thorough understanding * of block serialization, it's easy to make a costly mistake when trying to squeeze every last byte. diff --git a/src/private_broadcast.cpp b/src/private_broadcast.cpp index c7c311c0e893..3900f10a9324 100644 --- a/src/private_broadcast.cpp +++ b/src/private_broadcast.cpp @@ -31,7 +31,7 @@ std::optional PrivateBroadcast::Remove(const CTransactionRef& tx) return std::nullopt; } -std::optional PrivateBroadcast::PickTxForSend(const NodeId& will_send_to_nodeid) +std::optional PrivateBroadcast::PickTxForSend(const NodeId& will_send_to_nodeid, const CService& will_send_to_address) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) { LOCK(m_mutex); @@ -43,7 +43,7 @@ std::optional PrivateBroadcast::PickTxForSend(const NodeId& wil if (it != m_transactions.end()) { auto& [tx, sent_to]{*it}; - sent_to.emplace_back(will_send_to_nodeid, NodeClock::now()); + sent_to.emplace_back(will_send_to_nodeid, will_send_to_address, NodeClock::now()); return tx; } @@ -104,6 +104,25 @@ std::vector PrivateBroadcast::GetStale() const return stale; } +std::vector PrivateBroadcast::GetBroadcastInfo() const + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) +{ + LOCK(m_mutex); + std::vector entries; + entries.reserve(m_transactions.size()); + + for (const auto& [tx, sent_to] : m_transactions) { + std::vector peers; + peers.reserve(sent_to.size()); + for (const auto& status : sent_to) { + peers.emplace_back(PeerSendInfo{.address = status.address, .sent = status.picked, .received = status.confirmed}); + } + entries.emplace_back(TxBroadcastInfo{.tx = tx, .peers = std::move(peers)}); + } + + return entries; +} + PrivateBroadcast::Priority PrivateBroadcast::DerivePriority(const std::vector& sent_to) { Priority p; diff --git a/src/private_broadcast.h b/src/private_broadcast.h index e88db6bbb7be..286344248d9c 100644 --- a/src/private_broadcast.h +++ b/src/private_broadcast.h @@ -30,6 +30,17 @@ class PrivateBroadcast { public: + struct PeerSendInfo { + CService address; + NodeClock::time_point sent; + std::optional received; + }; + + struct TxBroadcastInfo { + CTransactionRef tx; + std::vector peers; + }; + /** * Add a transaction to the storage. * @param[in] tx The transaction to add. @@ -54,9 +65,11 @@ class PrivateBroadcast * and oldest send/confirm times. * @param[in] will_send_to_nodeid Will remember that the returned transaction * was picked for sending to this node. + * @param[in] will_send_to_address Address of the peer to which this transaction + * will be sent. * @return Most urgent transaction or nullopt if there are no transactions. */ - std::optional PickTxForSend(const NodeId& will_send_to_nodeid) + std::optional PickTxForSend(const NodeId& will_send_to_nodeid, const CService& will_send_to_address) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); /** @@ -95,14 +108,21 @@ class PrivateBroadcast std::vector GetStale() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + /** + * Get stats about all transactions currently being privately broadcast. + */ + std::vector GetBroadcastInfo() const + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + private: /// Status of a transaction sent to a given node. struct SendStatus { const NodeId nodeid; /// Node to which the transaction will be sent (or was sent). + const CService address; /// Address of the node. const NodeClock::time_point picked; ///< When was the transaction picked for sending to the node. std::optional confirmed; ///< When was the transaction reception confirmed by the node (by PONG). - SendStatus(const NodeId& nodeid, const NodeClock::time_point& picked) : nodeid{nodeid}, picked{picked} {} + SendStatus(const NodeId& nodeid, const CService& address, const NodeClock::time_point& picked) : nodeid{nodeid}, address{address}, picked{picked} {} }; /// Cumulative stats from all the send attempts for a transaction. Used to prioritize transactions. diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index a426e6150436..3100a055bb7f 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -496,7 +496,7 @@ void BitcoinGUI::createActions() action->setEnabled(false); } m_migrate_wallet_menu->addSeparator(); - QAction* restore_migrate_file_action = m_migrate_wallet_menu->addAction(tr("Restore and Migrate Wallet File...")); + QAction* restore_migrate_file_action = m_migrate_wallet_menu->addAction(tr("Restore and Migrate Wallet File…")); restore_migrate_file_action->setEnabled(true); connect(restore_migrate_file_action, &QAction::triggered, [this] { diff --git a/src/qt/bitcoinstrings.cpp b/src/qt/bitcoinstrings.cpp index 8878600c0545..6a7af280a9b2 100644 --- a/src/qt/bitcoinstrings.cpp +++ b/src/qt/bitcoinstrings.cpp @@ -13,9 +13,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "%s failed to validate the -assumeutxo snapsho QT_TRANSLATE_NOOP("bitcoin-core", "%s is set very high!"), QT_TRANSLATE_NOOP("bitcoin-core", "%s is set very high! Fees this large could be paid on a single transaction."), QT_TRANSLATE_NOOP("bitcoin-core", "%s request to listen on port %u. This port is considered \"bad\" and thus it is unlikely that any peer will connect to it. See doc/p2p-bad-ports.md for details and a full list."), -QT_TRANSLATE_NOOP("bitcoin-core", "-asmap requires a file path. Use -asmap=."), QT_TRANSLATE_NOOP("bitcoin-core", "-maxmempool must be at least %d MB"), -QT_TRANSLATE_NOOP("bitcoin-core", "-paytxfee is deprecated and will be fully removed in v31.0."), QT_TRANSLATE_NOOP("bitcoin-core", "A %zu MiB dbcache may be too large for a system memory of only %zu MiB."), QT_TRANSLATE_NOOP("bitcoin-core", "A fatal internal error occurred, see debug.log for details: "), QT_TRANSLATE_NOOP("bitcoin-core", "Assumeutxo data not found for the given blockhash '%s'."), @@ -35,6 +33,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Corrupted block database detected"), QT_TRANSLATE_NOOP("bitcoin-core", "Could not find asmap file %s"), QT_TRANSLATE_NOOP("bitcoin-core", "Could not generate scriptPubKeys (cache is empty)"), QT_TRANSLATE_NOOP("bitcoin-core", "Could not parse asmap file %s"), +QT_TRANSLATE_NOOP("bitcoin-core", "Could not read embedded asmap data"), QT_TRANSLATE_NOOP("bitcoin-core", "Could not top up scriptPubKeys"), QT_TRANSLATE_NOOP("bitcoin-core", "Creating wallet…"), QT_TRANSLATE_NOOP("bitcoin-core", "Disk space for %s may not accommodate the block files. Approximately %u GB of data will be stored in this directory."), @@ -45,6 +44,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Done loading"), QT_TRANSLATE_NOOP("bitcoin-core", "Dump file %s does not exist."), QT_TRANSLATE_NOOP("bitcoin-core", "Duplicate binding configuration for address %s. Please check your -bind, -bind=...=onion and -whitebind settings."), QT_TRANSLATE_NOOP("bitcoin-core", "Elliptic curve cryptography sanity check failure. %s is shutting down."), +QT_TRANSLATE_NOOP("bitcoin-core", "Embedded asmap data not available"), QT_TRANSLATE_NOOP("bitcoin-core", "Error creating %s: Could not write version metadata."), QT_TRANSLATE_NOOP("bitcoin-core", "Error initializing block database"), QT_TRANSLATE_NOOP("bitcoin-core", "Error loading %s"), @@ -133,7 +133,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Invalid -proxy address or hostname, ends with QT_TRANSLATE_NOOP("bitcoin-core", "Invalid -proxy address or hostname: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid P2P permission: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for %s=: '%s'"), -QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for %s=: '%s' (must be at least %s)"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for %s=: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for -%s=: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid netmask specified in -whitelist: '%s'"), @@ -174,6 +173,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Prune cannot be configured with a negative va QT_TRANSLATE_NOOP("bitcoin-core", "Prune configured below the minimum of %d MiB. Please use a higher number."), QT_TRANSLATE_NOOP("bitcoin-core", "Prune mode is incompatible with -reindex-chainstate. Use full -reindex instead."), QT_TRANSLATE_NOOP("bitcoin-core", "Prune mode is incompatible with -txindex."), +QT_TRANSLATE_NOOP("bitcoin-core", "Prune mode is incompatible with -txospenderindex."), QT_TRANSLATE_NOOP("bitcoin-core", "Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of a pruned node)"), QT_TRANSLATE_NOOP("bitcoin-core", "Pruning blockstore…"), QT_TRANSLATE_NOOP("bitcoin-core", "Reducing -maxconnections from %d to %d, because of system limitations."), @@ -213,6 +213,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "The inputs size exceeds the maximum weight. P QT_TRANSLATE_NOOP("bitcoin-core", "The preselected coins total amount does not cover the transaction target. Please allow other inputs to be automatically selected or include more coins manually"), QT_TRANSLATE_NOOP("bitcoin-core", "The source code is available from %s."), QT_TRANSLATE_NOOP("bitcoin-core", "The specified config file %s does not exist"), +QT_TRANSLATE_NOOP("bitcoin-core", "The total exceeds your balance when the %s transaction fee is included."), QT_TRANSLATE_NOOP("bitcoin-core", "The transaction amount is too small to pay the fee"), QT_TRANSLATE_NOOP("bitcoin-core", "The transaction amount is too small to send after the fee has been deducted"), QT_TRANSLATE_NOOP("bitcoin-core", "The transactions removal process can only be executed within a db txn"), @@ -224,7 +225,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", "This is the maximum transaction fee you pay ( QT_TRANSLATE_NOOP("bitcoin-core", "This is the minimum transaction fee you pay on every transaction."), QT_TRANSLATE_NOOP("bitcoin-core", "This is the transaction fee you may discard if change is smaller than dust at this level"), QT_TRANSLATE_NOOP("bitcoin-core", "This is the transaction fee you may pay when fee estimates are not available."), -QT_TRANSLATE_NOOP("bitcoin-core", "This is the transaction fee you will pay if you send a transaction."), QT_TRANSLATE_NOOP("bitcoin-core", "Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments."), QT_TRANSLATE_NOOP("bitcoin-core", "Transaction %s does not belong to this wallet"), QT_TRANSLATE_NOOP("bitcoin-core", "Transaction amount too small"), diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts index 61013e0821f7..ba3d54bf8ecf 100644 --- a/src/qt/locale/bitcoin_en.ts +++ b/src/qt/locale/bitcoin_en.ts @@ -544,7 +544,12 @@ Signing is only possible with addresses of the type 'legacy'. - + + Restore and Migrate Wallet File… + + + + &File &File @@ -801,12 +806,7 @@ Signing is only possible with addresses of the type 'legacy'. - - Restore and Migrate Wallet File... - - - - + Restore and Migrate Wallet Backup @@ -2314,10 +2314,13 @@ The migration process will create a backup of the wallet before migrating. This Could not sign any more inputs. - + - Signed %1 inputs, but more signatures are still required. - + Signed %n input(s), but more signatures are still required. + + Signed %n input, but more signatures are still required. + Signed %n inputs, but more signatures are still required. + @@ -2390,10 +2393,13 @@ The migration process will create a backup of the wallet before migrating. This or - + - Transaction has %1 unsigned inputs. - + Transaction has %n unsigned input(s). + + Transaction has %n unsigned input. + Transaction has %n unsigned inputs. + @@ -3863,7 +3869,7 @@ For more information on using this console, type %6. SendCoinsDialog - + Send Coins Send Coins @@ -4045,7 +4051,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 S&end - + Copy quantity @@ -4075,7 +4081,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + %1 (%2 blocks) @@ -4258,11 +4264,6 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 The amount exceeds your balance. - - - The total exceeds your balance when the %1 transaction fee is included. - - Duplicate address found: addresses should only be used once each. @@ -5263,8 +5264,8 @@ Go to File > Open Wallet to load a wallet. WalletModel - - + + Send Coins Send Coins @@ -5415,7 +5416,7 @@ Go to File > Open Wallet to load a wallet. - + Disk space for %s may not accommodate the block files. Approximately %u GB of data will be stored in this directory. @@ -5425,7 +5426,7 @@ Go to File > Open Wallet to load a wallet. - + Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height %s @@ -5465,7 +5466,7 @@ Go to File > Open Wallet to load a wallet. - + Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start. @@ -5505,7 +5506,7 @@ Go to File > Open Wallet to load a wallet. - + Rename of '%s' -> '%s' failed. You should resolve this by manually moving or deleting the invalid snapshot directory %s, otherwise you will encounter the same error again on the next startup. @@ -5520,7 +5521,7 @@ Go to File > Open Wallet to load a wallet. - + The transaction amount is too small to send after the fee has been deducted @@ -5545,7 +5546,7 @@ Go to File > Open Wallet to load a wallet. - + Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments. @@ -5585,12 +5586,12 @@ Go to File > Open Wallet to load a wallet. - + -maxmempool must be at least %d MB - + Cannot obtain a lock on directory %s. %s is probably already running. @@ -5610,17 +5611,17 @@ Go to File > Open Wallet to load a wallet. - + %s is set very high! Fees this large could be paid on a single transaction. - + Cannot provide specific connections and have addrman find outgoing connections at the same time. - + Error loading %s: External signer wallet being loaded without external signer support compiled @@ -5676,7 +5677,7 @@ Go to File > Open Wallet to load a wallet. - + Invalid amount for %s=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions) @@ -5716,7 +5717,7 @@ Go to File > Open Wallet to load a wallet. - + Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of a pruned node) @@ -5802,11 +5803,6 @@ Unable to restore backup of wallet. - -paytxfee is deprecated and will be fully removed in v31.0. - - - - A fatal internal error occurred, see debug.log for details: @@ -5870,6 +5866,11 @@ Unable to restore backup of wallet. Could not parse asmap file %s + + + Could not read embedded asmap data + + Could not top up scriptPubKeys @@ -5895,6 +5896,11 @@ Unable to restore backup of wallet. Elliptic curve cryptography sanity check failure. %s is shutting down. + + + Embedded asmap data not available + + Error initializing block database @@ -6186,27 +6192,17 @@ Unable to restore backup of wallet. - - Invalid amount for %s=<amount>: '%s' (must be at least %s) - - - - + Invalid amount for %s=<amount>: '%s' - - -asmap requires a file path. Use -asmap=<file>. - - - - + A %zu MiB dbcache may be too large for a system memory of only %zu MiB. - + Creating wallet… @@ -6216,12 +6212,12 @@ Unable to restore backup of wallet. - + Error creating %s: Could not write version metadata. - + Invalid amount for -%s=<amount>: '%s' @@ -6335,6 +6331,11 @@ Unable to restore backup of wallet. Prune mode is incompatible with -txindex. + + + Prune mode is incompatible with -txospenderindex. + + Pruning blockstore… @@ -6460,6 +6461,11 @@ Unable to restore backup of wallet. The specified config file %s does not exist + + + The total exceeds your balance when the %s transaction fee is included. + + The transaction amount is too small to pay the fee @@ -6491,12 +6497,7 @@ Unable to restore backup of wallet. - - This is the transaction fee you will pay if you send a transaction. - - - - + Transaction %s does not belong to this wallet @@ -6650,12 +6651,12 @@ Please try running the latest software version. - + Do you want to rebuild the databases now? - + Error: Could not add watchonly tx %s to watchonly wallet @@ -6685,12 +6686,12 @@ Please try running the latest software version. - + Not enough file descriptors available. %d available, %d required. - + Unrecognized network in -proxy='%s': '%s' diff --git a/src/qt/locale/bitcoin_en.xlf b/src/qt/locale/bitcoin_en.xlf index a9c042a8dc70..4a67456fb7a2 100644 --- a/src/qt/locale/bitcoin_en.xlf +++ b/src/qt/locale/bitcoin_en.xlf @@ -474,219 +474,219 @@ Signing is only possible with addresses of the type 'legacy'. 363 + Restore and Migrate Wallet File… + 499 + + &File 539 - + &Settings 560 - + &Help 621 - + Tabs toolbar 632 - + Syncing Headers (%1%)… 1114 - + Synchronizing with network… 1172 - + Indexing blocks on disk… 1177 - + Processing blocks on disk… 1179 - + Connecting to peers… 1186 - + Request payments (generates QR codes and bitcoin: URIs) 273 - + Show the list of used sending addresses and labels 338 - + Show the list of used receiving addresses and labels 340 - + &Command-line options 371 1195 - + Processed %n block(s) of transaction history. - + Processed %n block(s) of transaction history. - + %1 behind 1218 - + Catching up… 1223 - + Last received block was generated %1 ago. 1242 - + Transactions after this will not yet be visible. 1244 - + Error 1279 - + Warning 1283 - + Information 1287 - + Up to date 1199 - + Ctrl+Q 301 - + Load Partially Signed Bitcoin Transaction 327 - + Load PSBT from &clipboard… 328 - + Load Partially Signed Bitcoin Transaction from clipboard 329 - + Node window 331 - + Open node debugging and diagnostic console 332 - + &Sending addresses 337 - + &Receiving addresses 339 - + Open a bitcoin: URI 343 - + Open Wallet 345 - + Open a wallet 347 - + Close wallet 351 - + Restore Wallet… 358 Name of the menu item that restores wallet from a backup file. - + Restore a wallet from a backup file 361 Status tip for Restore Wallet menu item - + Close all wallets 364 - + Migrate Wallet 366 - + Migrate a wallet 368 - + Show the %1 help message to get a list with possible Bitcoin command-line options 373 - + &Mask values 375 - + Mask the values in the Overview tab 377 - + No wallets available 432 495 - + Wallet Data 438 503 Name of the wallet data file format. - + Load Wallet Backup 441 The title for Restore Wallet File Windows - + Restore Wallet 449 Title of pop-up window shown when the user is attempting to restore a wallet. - + Wallet Name 451 514 Label of the input field where the name of the wallet is entered. - + Invalid Wallet Name 455 - + Wallet name cannot be empty 455 - - Restore and Migrate Wallet File... - 499 - Restore and Migrate Wallet Backup 504 @@ -2056,10 +2056,15 @@ The migration process will create a backup of the wallet before migrating. This Could not sign any more inputs. 99 - - Signed %1 inputs, but more signatures are still required. + 101 - + + Signed %n input(s), but more signatures are still required. + + + Signed %n input(s), but more signatures are still required. + + Signed transaction successfully. Transaction is ready to broadcast. 104 @@ -2117,10 +2122,15 @@ The migration process will create a backup of the wallet before migrating. This or 216 - - Transaction has %1 unsigned inputs. + 222 - + + Transaction has %n unsigned input(s). + + + Transaction has %n unsigned input(s). + + Transaction is missing some information about inputs. 268 @@ -3315,7 +3325,7 @@ For more information on using this console, type %6. Send Coins 14 - ../sendcoinsdialog.cpp764 + ../sendcoinsdialog.cpp758 Coin Control Features @@ -3465,294 +3475,290 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 Copy quantity - 99 + 98 Copy amount - 100 + 99 Copy fee - 101 + 100 Copy after fee - 102 + 101 Copy bytes - 103 + 102 Copy change - 104 + 103 %1 (%2 blocks) - 176 + 173 Sign on device - 206 + 203 "device" usually means a hardware wallet. Connect your hardware wallet first. - 209 + 206 Set external signer script path in Options -> Wallet - 213 + 210 "External signer" means using devices such as hardware wallets. Cr&eate Unsigned - 216 + 213 Creates a Partially Signed Bitcoin Transaction (PSBT) for use with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet. - 217 + 214 %1 to '%2' - 320 + 317 %1 to %2 - 325 + 322 To review recipient list click "Show Details…" - 392 + 389 Sign failed - 455 + 452 External signer not found - 460 + 457 "External signer" means using devices such as hardware wallets. External signer failure - 466 + 463 "External signer" means using devices such as hardware wallets. Save Transaction Data - 430 + 427 Partially Signed Transaction (Binary) - 432 + 429 Expanded name of the binary PSBT file format. See: BIP 174. PSBT saved - 440 + 437 Popup message when a PSBT has been saved to a file External balance: - 713 + 710 or - 388 + 385 You can increase the fee later (signals Replace-By-Fee, BIP-125). - 370 + 367 Please, review your transaction proposal. This will produce a Partially Signed Bitcoin Transaction (PSBT) which you can save or copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet. - 339 + 336 Text to inform a user attempting to create a transaction of their current options. At this stage, a user can only create a PSBT. This string is displayed when private keys are disabled and an external signer is not available. %1 from wallet '%2' - 309 + 306 Do you want to create this transaction? - 333 + 330 Message displayed when attempting to create a transaction. Cautionary text to prompt the user to verify that the displayed transaction details represent the transaction the user intends to create. Please, review your transaction. You can create and send this transaction or create a Partially Signed Bitcoin Transaction (PSBT), which you can save or copy and then sign with, e.g., an offline %1 wallet, or a PSBT-compatible hardware wallet. - 344 + 341 Text to inform a user attempting to create a transaction of their current options. At this stage, a user can send their transaction or create a PSBT. This string is displayed when both private keys and PSBT controls are enabled. Please, review your transaction. - 347 + 344 Text to prompt a user to review the details of the transaction they are attempting to send. Transaction fee - 355 + 352 %1 kvB - 360 + 357 PSBT transaction creation When reviewing a newly created PSBT (via Send flow), the transaction fee is shown, with "virtual size" of the transaction displayed for context Not signalling Replace-By-Fee, BIP-125. - 372 + 369 Total Amount - 385 + 382 Unsigned Transaction - 409 + 406 PSBT copied Caption of "PSBT has been copied" messagebox The PSBT has been copied to the clipboard. You can also save it. - 410 + 407 PSBT saved to disk - 440 + 437 Confirm send coins - 489 + 486 The recipient address is not valid. Please recheck. - 737 + 734 The amount to pay must be larger than 0. - 740 + 737 The amount exceeds your balance. - 743 + 740 - The total exceeds your balance when the %1 transaction fee is included. - 746 - - Duplicate address found: addresses should only be used once each. - 749 + 743 - + Transaction creation failed! - 752 + 746 - + A fee higher than %1 is considered an absurdly high fee. - 756 + 750 - + %1/kvB - 832 - 869 + 826 + 863 - 883 - + 877 + Estimated to begin confirmation within %n block(s). - + Estimated to begin confirmation within %n block(s). - + Warning: Invalid Bitcoin address - 982 + 976 - + Warning: Unknown change address - 987 + 981 - + Confirm custom change address - 990 + 984 - + The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure? - 990 + 984 - + (no label) - 1011 + 1005 - + A&mount: 151 - + Pay &To: 35 - + &Label: 128 - + Choose previously used address 60 - + The Bitcoin address to send the payment to 53 - + Alt+A 76 - + Paste address from clipboard 83 - + Alt+P 99 - + Remove this entry 106 - + The amount to send in the selected unit 166 - + The fee will be deducted from the amount being sent. The recipient will receive less bitcoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally. 173 - + S&ubtract fee from amount 176 - + Use available balance 183 - + Message: 192 - + Enter a label for this address to add it to the list of used addresses 141 144 - + A message that was attached to the bitcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Bitcoin network. 202 @@ -3760,11 +3766,11 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + Send 151 - + Create Unsigned 153 @@ -3772,105 +3778,105 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + Signatures - Sign / Verify a Message 14 - + &Sign Message 27 - + You can sign messages/agreements with your legacy (P2PKH) addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to. 33 - + The Bitcoin address to sign the message with 51 - + Choose previously used address 58 274 - + Alt+A 68 284 - + Paste address from clipboard 78 - + Alt+P 88 - + Enter the message you want to sign here 100 103 - + Signature 110 - + Copy the current signature to the clipboard 140 - + Sign the message to prove you own this Bitcoin address 161 - + Sign &Message 164 - + Reset all sign message fields 178 - + Clear &All 181 338 - + &Verify Message 240 - + Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction! 246 - + The Bitcoin address the message was signed with 267 - + The signed message to verify 296 299 - + The signature given when the message was signed 306 309 - + Verify the message to ensure it was signed with the specified Bitcoin address 318 - + Verify &Message 321 - + Reset all verify message fields 335 - + Click "Sign Message" to generate signature 125 @@ -3878,59 +3884,59 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + The entered address is invalid. 122 221 - + Please check the address and try again. 122 222 - + The entered address does not refer to a legacy (P2PKH) key. Message signing for SegWit and other non-P2PKH address types is not supported in this version of %1. Please check the address and try again. 129 227 - + Wallet unlock was cancelled. 137 - + No error 148 - + Private key for the entered address is not available. 151 - + Message signing failed. 154 - + Message signed. 166 - + The signature could not be decoded. 232 - + Please check the signature and try again. 233 240 - + The signature did not match the message digest. 239 - + Message verification failed. 245 - + Message verified. 216 @@ -3938,11 +3944,11 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + (press q to shutdown and continue later) 175 - + press q to shutdown 176 @@ -3950,7 +3956,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + kB/s 74 @@ -3958,77 +3964,77 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + conflicted with a transaction with %1 confirmations 34 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents an unconfirmed transaction that conflicts with a confirmed transaction. - + 0/unconfirmed, in memory pool 41 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents an unconfirmed transaction that is in the memory pool. - + 0/unconfirmed, not in memory pool 46 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents an unconfirmed transaction that is not in the memory pool. - + abandoned 52 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents an abandoned transaction. - + %1/unconfirmed 60 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents a transaction confirmed in at least one block, but less than 6 blocks. - + %1 confirmations 65 Text explaining the current status of a transaction, shown in the status field of the details window for this transaction. This status represents a transaction confirmed in 6 or more blocks. - + Status 115 - + Date 118 - + Source 125 - + Generated 125 - + From 130 143 - + unknown 143 - + To 144 164 221 - + own address 146 228 - + label 148 - + Credit 184 196 @@ -4038,97 +4044,97 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 186 - + matures in %n more block(s) - + matures in %n more block(s) - + not accepted 188 - + Debit 233 259 322 - + Total debit 243 - + Total credit 244 - + Transaction fee 249 - + Net amount 271 - + Message 277 289 - + Comment 279 - + Transaction ID 281 - + Transaction total size 282 - + Transaction virtual size 283 - + Output index 284 - + %1 (Certificate was not verified) 300 - + Merchant 303 - + Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours. 311 - + Debug information 319 - + Transaction 327 - + Inputs 330 - + Amount 349 - + true 350 - + false 350 @@ -4136,7 +4142,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + This pane shows a detailed description of the transaction 20 @@ -4144,7 +4150,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + Details for %1 18 @@ -4152,87 +4158,87 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + Date 257 - + Type 257 - + Label 257 - + Unconfirmed 316 - + Abandoned 319 - + Confirming (%1 of %2 recommended confirmations) 322 - + Confirmed (%1 confirmations) 325 - + Conflicted 328 - + Immature (%1 confirmations, will be available after %2) 331 - + Generated but not accepted 334 - + Received with 373 - + Received from 375 - + Sent to 378 - + Mined 380 - + (n/a) 416 - + (no label) 604 - + Transaction status. Hover over this field to show number of confirmations. 643 - + Date and time that the transaction was received. 645 - + Type of transaction. 647 - + User-defined intent/purpose of the transaction. 649 - + Amount removed from or added to balance. 651 @@ -4240,158 +4246,158 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + All 66 82 - + Today 67 - + This week 68 - + This month 69 - + Last month 70 - + This year 71 - + Received with 83 - + Sent to 85 - + Mined 87 - + Other 88 - + Enter address, transaction id, or label to search 93 - + Min amount 97 - + Range… 72 - + &Copy address 160 - + Copy &label 161 - + Copy &amount 162 - + Copy transaction &ID 163 - + Copy &raw transaction 164 - + Copy full transaction &details 165 - + &Show transaction details 166 - + Increase transaction &fee 168 - + A&bandon transaction 171 - + &Edit address label 172 - + Show in %1 231 Transactions table context menu action to show the selected transaction in a third-party block explorer. %1 is a stand-in argument for the URL of the explorer. - + Export Transaction History 327 - + Comma separated file 330 Expanded name of the CSV file format. See: https://en.wikipedia.org/wiki/Comma-separated_values. - + Confirmed 339 - + Date 340 - + Type 341 - + Label 342 - + Address 343 - + ID 345 - + Exporting Failed 348 - + There was an error trying to save the transaction history to %1. 348 - + Exporting Successful 352 - + The transaction history was successfully saved to %1. 352 - + Range: 527 - + to 535 @@ -4399,39 +4405,39 @@ Note: Since the fee is calculated on a per-byte basis, a fee rate of "100 - + No wallet has been loaded. Go to File > Open Wallet to load a wallet. - OR - 45 - + Create a new wallet 50 - + Error 201 211 229 - + Unable to decode PSBT from clipboard (invalid base64) 201 - + Load Transaction Data 207 - + Partially Signed Transaction (*.psbt) 208 - + PSBT file must be smaller than 100 MiB 211 - + Unable to decode PSBT 229 @@ -4439,113 +4445,113 @@ Go to File > Open Wallet to load a wallet. - + Send Coins - 218 - 231 + 208 + 228 - + Fee bump error - 476 - 525 - 545 - 550 + 473 + 522 + 542 + 547 - + Increasing transaction fee failed - 476 + 473 - + Do you want to increase the fee? - 483 + 480 Asks a user if they would like to manually increase the fee of a transaction that has already been created. - + Current fee: - 487 + 484 - + Increase: - 491 + 488 - + New fee: - 495 + 492 - + Warning: This may pay the additional fee by reducing change outputs or adding inputs, when necessary. It may add a new change output if one does not already exist. These changes may potentially leak privacy. - 503 + 500 - + Confirm fee bump - 508 + 505 - + Can't draft transaction. - 525 + 522 - + PSBT copied - 532 + 529 - + Fee-bump PSBT copied to clipboard - 532 + 529 - + Can't sign transaction. - 545 + 542 - + Could not commit transaction - 550 + 547 - + Signer error - 563 + 560 - + Can't display address - 566 + 563 - + &Export 50 - + Export the data in the current tab to a file 51 - + Backup Wallet 214 - + Wallet Data 216 Name of the wallet data file format. - + Backup Failed 222 - + There was an error trying to save the wallet data to %1. 222 - + Backup Successful 226 - + The wallet data was successfully saved to %1. 226 - + Cancel 263 @@ -4553,402 +4559,406 @@ Go to File > Open Wallet to load a wallet. - + The %s developers 208 - + %s failed to validate the -assumeutxo snapshot state. This indicates a hardware problem, or a bug in the software, or a bad software modification that allowed an invalid snapshot to be loaded. As a result of this, the node will shut down and stop using any state that was built on the snapshot, resetting the chain height from %d to %d. On the next restart, the node will resume syncing from %d without using any snapshot data. Please report this incident to %s, including how you obtained the snapshot. The invalid snapshot chainstate will be left on disk in case it is helpful in diagnosing the issue that caused this error. 12 - + %s request to listen on port %u. This port is considered "bad" and thus it is unlikely that any peer will connect to it. See doc/p2p-bad-ports.md for details and a full list. 15 - + Disk space for %s may not accommodate the block files. Approximately %u GB of data will be stored in this directory. - 40 + 39 - + Distributed under the MIT software license, see the accompanying file %s or %s - 42 + 41 - + Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height %s 57 - + Error reading %s! Transaction data may be missing or incorrect. Rescanning wallet. 61 - + Error starting/committing db txn for wallet transactions removal process 65 - + Error: Dumpfile format record is incorrect. Got "%s", expected "format". 73 - + Error: Dumpfile identifier record is incorrect. Got "%s", expected "%s". 74 - + Error: Dumpfile version is not supported. This version of bitcoin-wallet only supports version 1 dumpfiles. Got dumpfile with version %s 76 - + Error: Unable to produce descriptors for this legacy wallet. Make sure to provide the wallet's passphrase if it is encrypted. 92 - + File %s already exists. If you are sure this is what you want, move it out of the way first. 120 - + Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start. - 140 + 139 - + Invalid value detected for '-wallet' or '-nowallet'. '-wallet' requires a string value, while '-nowallet' accepts only '1' to disable all wallets - 142 + 141 - + More than one onion bind address is provided. Using %s for the automatically created Tor onion service. - 153 + 152 - + No dump file provided. To use createfromdump, -dumpfile=<filename> must be provided. + 155 + + + No dump file provided. To use dump, -dumpfile=<filename> must be provided. 156 - No dump file provided. To use dump, -dumpfile=<filename> must be provided. - 157 + Please contribute if you find %s useful. Visit %s for further information about the software. + 168 - Please contribute if you find %s useful. Visit %s for further information about the software. - 169 + Prune configured below the minimum of %d MiB. Please use a higher number. + 173 - Prune configured below the minimum of %d MiB. Please use a higher number. + Prune mode is incompatible with -reindex-chainstate. Use full -reindex instead. 174 - Prune mode is incompatible with -reindex-chainstate. Use full -reindex instead. - 175 - - Rename of '%s' -> '%s' failed. You should resolve this by manually moving or deleting the invalid snapshot directory %s, otherwise you will encounter the same error again on the next startup. 181 - + SQLiteDatabase: Unknown sqlite wallet schema version %d. Only version %d is supported 188 - + The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct 210 - + The transaction amount is too small to send after the fee has been deducted - 217 + 218 - + This is a pre-release test build - use at your own risk - do not use for mining or merchant applications - 221 + 222 - + This is the maximum transaction fee you pay (in addition to the normal fee) to prioritize partial spend avoidance over regular coin selection. - 223 + 224 - + This is the transaction fee you may discard if change is smaller than dust at this level - 225 + 226 - + This is the transaction fee you may pay when fee estimates are not available. - 226 + 227 - + Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments. 228 - + Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate. 244 - + Unsupported category-specific logging level %1$s=%2$s. Expected %1$s=<category>:<loglevel>. Valid categories: %3$s. Valid loglevels: %4$s. 256 - + Unsupported chainstate database format found. Please restart with -reindex-chainstate. This will rebuild the chainstate database. 257 - + Warning: Private keys detected in wallet {%s} with disabled private keys 266 - + Witness data for blocks after height %d requires validation. Please restart with -reindex. 267 - + You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain 268 - + %s is set very high! 13 - + -maxmempool must be at least %d MB - 17 + 16 - + Cannot obtain a lock on directory %s. %s is probably already running. - 25 + 23 - + Cannot resolve -%s address: '%s' - 27 + 25 - + Cannot set -forcednsseed to true when setting -dnsseed to false. - 28 + 26 - + Cannot set -peerblockfilters without -blockfilterindex. - 29 + 27 - + %s is set very high! Fees this large could be paid on a single transaction. 14 - + Cannot provide specific connections and have addrman find outgoing connections at the same time. - 26 + 24 - + Error loading %s: External signer wallet being loaded without external signer support compiled 51 - + Error reading %s! All keys read correctly, but transaction data or address metadata may be missing or incorrect. 60 - + Error: Address book data in wallet cannot be identified to belong to migrated wallets 66 - + Error: Duplicate descriptors created during migration. Your wallet may be corrupted. 77 - + Error: Transaction %s in wallet cannot be identified to belong to migrated wallets 88 - + Failed to remove snapshot chainstate dir (%s). Manually remove it before restarting. 110 - + Failed to rename invalid peers.dat file. Please move or delete it and try again. 111 - + Fee estimation failed. Fallbackfee is disabled. Wait a few blocks or enable %s. 118 - + Flushing block file to disk failed. This is likely the result of an I/O error. 121 - + Flushing undo file to disk failed. This is likely the result of an I/O error. 122 - + Incompatible options: -dnsseed=1 was explicitly specified, but -onlynet forbids connections to IPv4/IPv6 124 - + Invalid amount for %s=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions) - 137 + 136 - + Maximum transaction weight is less than transaction weight without inputs + 147 + + + Maximum transaction weight is too low, can not accommodate change output 148 - Maximum transaction weight is too low, can not accommodate change output - 149 + Option '-checkpoints' is set but checkpoints were removed. This option has no effect. + 161 - Option '-checkpoints' is set but checkpoints were removed. This option has no effect. - 162 + Outbound connections restricted to CJDNS (-onlynet=cjdns) but -cjdnsreachable is not provided + 164 - Outbound connections restricted to CJDNS (-onlynet=cjdns) but -cjdnsreachable is not provided + Outbound connections restricted to Tor (-onlynet=onion) but the proxy for reaching the Tor network is explicitly forbidden: -onion=0 165 - Outbound connections restricted to Tor (-onlynet=onion) but the proxy for reaching the Tor network is explicitly forbidden: -onion=0 + Outbound connections restricted to Tor (-onlynet=onion) but the proxy for reaching the Tor network is not provided: none of -proxy, -onion or -listenonion is given 166 - Outbound connections restricted to Tor (-onlynet=onion) but the proxy for reaching the Tor network is not provided: none of -proxy, -onion or -listenonion is given + Outbound connections restricted to i2p (-onlynet=i2p) but -i2psam is not provided 167 - Outbound connections restricted to i2p (-onlynet=i2p) but -i2psam is not provided - 168 - - Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of a pruned node) 177 - + Rename of '%s' -> '%s' failed. Cannot clean up the background chainstate leveldb directory. 180 - + Specified -blockmaxweight (%d) exceeds consensus maximum block weight (%d) 196 - + Specified -blockreservedweight (%d) exceeds consensus maximum block weight (%d) 197 - + Specified -blockreservedweight (%d) is lower than minimum safety value of (%d) 198 - + The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs 211 - + The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs 212 - + The preselected coins total amount does not cover the transaction target. Please allow other inputs to be automatically selected or include more coins manually 213 - + UTXO snapshot failed to validate. Restart to resume normal initial block download, or try loading a different snapshot. 237 - + Unconfirmed UTXOs are available, but spending them creates a chain of transactions that will be rejected by the mempool 246 - + Unexpected legacy entry in descriptor wallet found. Loading wallet %s The wallet might have been tampered with or created with malicious intent. 247 - + Your computer's date and time appear to be more than %d minutes out of sync with the network, this may lead to consensus failure. After you've confirmed your computer's clock, this message should no longer appear when you restart your node. Without a restart, it should stop showing automatically after you've connected to a sufficient number of new outbound peers, which may take some time. You can inspect the `timeoffset` field of the `getpeerinfo` and `getnetworkinfo` RPC methods to get more info. 269 - + Unable to cleanup failed migration 270 - + Unable to restore backup of wallet. 271 - + default wallet 273 - + whitebind may only be used for incoming connections ("out" was passed) 274 - - -paytxfee is deprecated and will be fully removed in v31.0. + + A fatal internal error occurred, see debug.log for details: 18 + + Assumeutxo data not found for the given blockhash '%s'. + 19 + - A fatal internal error occurred, see debug.log for details: + Block verification was interrupted 20 - Assumeutxo data not found for the given blockhash '%s'. + Can't spend unconfirmed version %d pre-selected input with a version 3 tx 21 - Block verification was interrupted + Can't spend unconfirmed version 3 pre-selected input with a version %d tx 22 - Can't spend unconfirmed version %d pre-selected input with a version 3 tx - 23 + Cannot write to directory '%s'; check permissions. + 28 - Can't spend unconfirmed version 3 pre-selected input with a version %d tx - 24 + Config setting for %s only applied on %s network when in [%s] section. + 29 - Cannot write to directory '%s'; check permissions. + Copyright (C) %i-%i 30 - Config setting for %s only applied on %s network when in [%s] section. + Corrupt block found indicating potential hardware failure. 31 - Copyright (C) %i-%i + Corrupted block database detected 32 - Corrupt block found indicating potential hardware failure. + Could not find asmap file %s 33 - Corrupted block database detected + Could not generate scriptPubKeys (cache is empty) 34 - Could not find asmap file %s + Could not parse asmap file %s 35 - Could not generate scriptPubKeys (cache is empty) + Could not read embedded asmap data 36 - Could not parse asmap file %s + Could not top up scriptPubKeys 37 - Could not top up scriptPubKeys - 38 + Disk space is too low! + 40 - Disk space is too low! - 41 + Done loading + 43 - Done loading + Dump file %s does not exist. 44 - Dump file %s does not exist. - 45 + Elliptic curve cryptography sanity check failure. %s is shutting down. + 46 - Elliptic curve cryptography sanity check failure. %s is shutting down. + Embedded asmap data not available 47 @@ -5184,370 +5194,366 @@ Unable to restore backup of wallet. 134 - Invalid amount for %s=<amount>: '%s' (must be at least %s) - 136 - - Invalid amount for %s=<amount>: '%s' 135 - - -asmap requires a file path. Use -asmap=<file>. - 16 - - + A %zu MiB dbcache may be too large for a system memory of only %zu MiB. - 19 + 17 - + Creating wallet… - 39 + 38 - + Duplicate binding configuration for address %s. Please check your -bind, -bind=...=onion and -whitebind settings. - 46 + 45 - + Error creating %s: Could not write version metadata. 48 - + Invalid amount for -%s=<amount>: '%s' - 138 + 137 - + Invalid netmask specified in -whitelist: '%s' - 139 + 138 - + Invalid port specified in %s: '%s' - 141 + 140 - + Listening for incoming connections failed (listen returned error %s) - 143 + 142 - + Loading P2P addresses… - 144 + 143 - + Loading banlist… - 145 + 144 - + Loading block index… - 146 + 145 - + Loading wallet… - 147 + 146 - + Maximum transaction weight must be between %d and %d - 150 + 149 - + Missing amount - 151 + 150 - + Missing solving data for estimating transaction size - 152 + 151 - + Need to specify a port with -whitebind: '%s' - 154 + 153 - + No addresses available - 155 + 154 - + Not found pre-selected input %s - 159 + 158 - + Not solvable pre-selected input %s - 160 + 159 - + Only direction was set, no permissions: '%s' - 161 + 160 - + Option '-limitancestorsize' is given but ancestor size limits have been replaced with cluster size limits (see -limitclustersize). This option has no effect. - 163 + 162 - + Option '-limitdescendantsize' is given but descendant size limits have been replaced with cluster size limits (see -limitclustersize). This option has no effect. - 164 + 163 - + Private broadcast of own transactions requested (-privatebroadcast) and -proxyrandomize is disabled. Tor circuits for private broadcast connections may be correlated to other connections over Tor. For maximum privacy set -proxyrandomize=1. - 170 + 169 - + Private broadcast of own transactions requested (-privatebroadcast), but -connect is also configured. They are incompatible because the private broadcast needs to open new connections to randomly chosen Tor or I2P peers. Consider using -maxconnections=0 -addnode=... instead - 171 + 170 - + Private broadcast of own transactions requested (-privatebroadcast), but none of Tor or I2P networks is reachable - 172 + 171 - + Prune cannot be configured with a negative value. - 173 + 172 - + Prune mode is incompatible with -txindex. + 175 + + + Prune mode is incompatible with -txospenderindex. 176 - + Pruning blockstore… 178 - + Reducing -maxconnections from %d to %d, because of system limitations. 179 - + Replaying blocks… 182 - + Rescanning… 183 - + SQLiteDatabase: Failed to execute statement to verify database: %s 184 - + SQLiteDatabase: Failed to prepare statement to verify database: %s 185 - + SQLiteDatabase: Failed to read database verification error: %s 186 - + SQLiteDatabase: Unexpected application id. Expected %u, got %u 187 - + Section [%s] is not recognized. 189 - + Signer did not echo address 192 - + Signer echoed unexpected address %s 193 - + Signer returned error: %s 194 - + Signing transaction failed 195 - + Specified -walletdir "%s" does not exist 199 - + Specified -walletdir "%s" is a relative path 200 - + Specified -walletdir "%s" is not a directory 201 - + Specified blocks directory "%s" does not exist. 202 - + Specified data directory "%s" does not exist. 203 - + Starting network threads… 204 - + System error while flushing: %s 205 - + System error while loading external block file: %s 206 - + System error while saving block to disk: %s 207 - + The %s path uses exFAT, which is known to have intermittent corruption problems on macOS. Move this directory to a different filesystem to avoid data loss. 209 - + The source code is available from %s. 214 - + The specified config file %s does not exist 215 + + The total exceeds your balance when the %s transaction fee is included. + 216 + The transaction amount is too small to pay the fee - 216 + 217 The transactions removal process can only be executed within a db txn - 218 + 219 The wallet will avoid paying less than the minimum relay fee. - 219 + 220 There is no ScriptPubKeyManager for this address - 220 + 221 This is experimental software. - 222 + 223 This is the minimum transaction fee you pay on every transaction. - 224 + 225 - This is the transaction fee you will pay if you send a transaction. - 227 - - Transaction %s does not belong to this wallet 229 - + Transaction amount too small 230 - + Transaction amounts must not be negative 231 - + Transaction change output index out of range 232 - + Transaction must have at least one recipient 233 - + Transaction needs a change address, but we can't generate it. 234 - + Transaction too large 236 - + Unable to bind to %s on this computer (bind returned error %s) 238 - + Unable to bind to %s on this computer. %s is probably already running. 239 - + Unable to create the PID file '%s': %s 240 - + Unable to find UTXO for external input 241 - + Unable to open %s for writing 242 - + Unable to parse -maxuploadtarget: '%s' 243 - + Unable to start HTTP server. See debug log for details. 245 - + Unknown -blockfilterindex value %s. 248 - + Unknown address type '%s' 249 - + Unknown change type '%s' 250 - + Unknown network specified in -onlynet: '%s' 251 - + Unknown new rules activated (versionbit %i) 252 - + Unrecognised option "%s" provided in -test=<option>. 253 - + Unsupported global logging level %s=%s. Valid values: %s. 258 - + Wallet file creation failed: %s 263 - + Warning: Found invalid chain more than 6 blocks longer than our best chain. This could be due to database corruption or consensus incompatibility with peers. 265 - + acceptstalefeeestimates is not supported on %s chain. 272 - + Unsupported logging category %s=%s. 259 - + Error loading %s: Wallet is a legacy wallet. Please migrate to a descriptor wallet using the migration tool (migratewallet RPC). 53 - + Error: Dumpfile specifies an unsupported database format (%s). Only sqlite database dumps are supported 75 - + Failed to calculate bump fees, because unconfirmed UTXOs depend on an enormous cluster of unconfirmed transactions. 104 - + Transaction requires one destination of non-zero value, a non-zero feerate, or a pre-selected input 235 - + Unrecognized descriptor found. Loading wallet %s The wallet might have been created on a newer version. @@ -5555,63 +5561,63 @@ Please try running the latest software version. 254 - + Do you want to rebuild the databases now? - 43 + 42 - + Error: Could not add watchonly tx %s to watchonly wallet 68 - + Error: Could not delete watchonly transactions. 69 - + Error: Wallet does not exist 100 - + Error: cannot remove legacy wallet records 101 - + Failed to start indexes, shutting down… 113 - + Invalid -proxy address or hostname, ends with '=': '%s' 132 - + Not enough file descriptors available. %d available, %d required. - 158 + 157 - + Unrecognized network in -proxy='%s': '%s' 255 - + User Agent comment (%s) contains unsafe characters. 260 - + Verifying blocks… 261 - + Verifying wallet(s)… 262 - + Wallet needed to be rewritten: restart %s to complete 264 - + Settings file could not be read 190 - + Settings file could not be written 191 diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index 3d876a8f63bb..cfbc350f0912 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -88,14 +88,14 @@ static common::SettingsValue PruneSetting(bool prune_enabled, int prune_size_gb) static bool PruneEnabled(const common::SettingsValue& prune_setting) { // -prune=1 setting is manual pruning mode, so disabled for purposes of the gui - return SettingToInt(prune_setting, 0) > 1; + return SettingTo(prune_setting, 0) > 1; } //! Get pruning size value to show in GUI from bitcoin -prune setting. If //! pruning is not enabled, just show default recommended pruning size (2GB). static int PruneSizeGB(const common::SettingsValue& prune_setting) { - int value = SettingToInt(prune_setting, 0); + int value = SettingTo(prune_setting, 0); return value > 1 ? PruneMiBtoGB(value) : DEFAULT_PRUNE_TARGET_GB; } @@ -469,9 +469,9 @@ QVariant OptionsModel::getOption(OptionID option, const std::string& suffix) con suffix.empty() ? getOption(option, "-prev") : DEFAULT_PRUNE_TARGET_GB; case DatabaseCache: - return qlonglong(SettingToInt(setting(), DEFAULT_DB_CACHE >> 20)); + return qlonglong(SettingTo(setting(), DEFAULT_DB_CACHE >> 20)); case ThreadsScriptVerif: - return qlonglong(SettingToInt(setting(), DEFAULT_SCRIPTCHECK_THREADS)); + return qlonglong(SettingTo(setting(), DEFAULT_SCRIPTCHECK_THREADS)); case Listen: return SettingToBool(setting(), DEFAULT_LISTEN); case Server: diff --git a/src/qt/psbtoperationsdialog.cpp b/src/qt/psbtoperationsdialog.cpp index 7661e59fa7af..f5cb077386eb 100644 --- a/src/qt/psbtoperationsdialog.cpp +++ b/src/qt/psbtoperationsdialog.cpp @@ -98,7 +98,7 @@ void PSBTOperationsDialog::signTransaction() } else if (!complete && n_signed < 1) { showStatus(tr("Could not sign any more inputs."), StatusLevel::WARN); } else if (!complete) { - showStatus(tr("Signed %1 inputs, but more signatures are still required.").arg(n_signed), + showStatus(tr("Signed %n input(s), but more signatures are still required.", "", n_signed), StatusLevel::INFO); } else { showStatus(tr("Signed transaction successfully. Transaction is ready to broadcast."), @@ -219,7 +219,7 @@ QString PSBTOperationsDialog::renderTransaction(const PartiallySignedTransaction size_t num_unsigned = CountPSBTUnsignedInputs(psbtx); if (num_unsigned > 0) { tx_description.append("

"); - tx_description.append(tr("Transaction has %1 unsigned inputs.").arg(QString::number(num_unsigned))); + tx_description.append(tr("Transaction has %n unsigned input(s).", "", num_unsigned)); } return tx_description; diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 90e46c798158..d4a63987f47b 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -40,7 +40,6 @@ using common::PSBTError; using wallet::CCoinControl; -using wallet::DEFAULT_PAY_TX_FEE; static constexpr std::array confTargets{2, 4, 6, 12, 24, 48, 144, 504, 1008}; int getConfTargetForIndex(int index) { @@ -125,8 +124,6 @@ SendCoinsDialog::SendCoinsDialog(const PlatformStyle *_platformStyle, QWidget *p settings.setValue("nFeeRadio", 0); // recommended if (!settings.contains("nSmartFeeSliderPosition")) settings.setValue("nSmartFeeSliderPosition", 0); - if (!settings.contains("nTransactionFee")) - settings.setValue("nTransactionFee", (qint64)DEFAULT_PAY_TX_FEE); ui->groupFee->setId(ui->radioSmartFee, 0); ui->groupFee->setId(ui->radioCustomFee, 1); ui->groupFee->button((int)std::max(0, std::min(1, settings.value("nFeeRadio").toInt())))->setChecked(true); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 262b6bab0e31..475c5ca41c90 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -181,6 +181,24 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex return result; } +/** Serialize coinbase transaction metadata */ +UniValue coinbaseTxToJSON(const CTransaction& coinbase_tx) +{ + CHECK_NONFATAL(!coinbase_tx.vin.empty()); + const CTxIn& vin_0{coinbase_tx.vin[0]}; + UniValue coinbase_tx_obj(UniValue::VOBJ); + coinbase_tx_obj.pushKV("version", coinbase_tx.version); + coinbase_tx_obj.pushKV("locktime", coinbase_tx.nLockTime); + coinbase_tx_obj.pushKV("sequence", vin_0.nSequence); + coinbase_tx_obj.pushKV("coinbase", HexStr(vin_0.scriptSig)); + const auto& witness_stack{vin_0.scriptWitness.stack}; + if (!witness_stack.empty()) { + CHECK_NONFATAL(witness_stack.size() == 1); + coinbase_tx_obj.pushKV("witness", HexStr(witness_stack[0])); + } + return coinbase_tx_obj; +} + UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity, const uint256 pow_limit) { UniValue result = blockheaderToJSON(tip, blockindex, pow_limit); @@ -188,6 +206,10 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn result.pushKV("strippedsize", ::GetSerializeSize(TX_NO_WITNESS(block))); result.pushKV("size", ::GetSerializeSize(TX_WITH_WITNESS(block))); result.pushKV("weight", ::GetBlockWeight(block)); + + CHECK_NONFATAL(!block.vtx.empty()); + result.pushKV("coinbase_tx", coinbaseTxToJSON(*block.vtx[0])); + UniValue txs(UniValue::VARR); txs.reserve(block.vtx.size()); @@ -760,6 +782,14 @@ static RPCHelpMan getblock() {RPCResult::Type::NUM, "size", "The block size"}, {RPCResult::Type::NUM, "strippedsize", "The block size excluding witness data"}, {RPCResult::Type::NUM, "weight", "The block weight as defined in BIP 141"}, + {RPCResult::Type::OBJ, "coinbase_tx", "Coinbase transaction metadata", + { + {RPCResult::Type::NUM, "version", "The coinbase transaction version"}, + {RPCResult::Type::NUM, "locktime", "The coinbase transaction's locktime (nLockTime)"}, + {RPCResult::Type::NUM, "sequence", "The coinbase input's sequence number (nSequence)"}, + {RPCResult::Type::STR_HEX, "coinbase", "The coinbase input's script"}, + {RPCResult::Type::STR_HEX, "witness", /*optional=*/true, "The coinbase input's first (and only) witness stack element, if present"}, + }}, {RPCResult::Type::NUM, "height", "The block height or index"}, {RPCResult::Type::NUM, "version", "The block version"}, {RPCResult::Type::STR_HEX, "versionHex", "The block version formatted in hexadecimal"}, @@ -863,7 +893,7 @@ std::optional GetPruneHeight(const BlockManager& blockman, const CChain& ch if (!first_block || !chain_tip) return std::nullopt; // If the chain tip is pruned, everything is pruned. - if (!((chain_tip->nStatus & BLOCK_HAVE_MASK) == BLOCK_HAVE_MASK)) return chain_tip->nHeight; + if ((chain_tip->nStatus & BLOCK_HAVE_MASK) != BLOCK_HAVE_MASK) return chain_tip->nHeight; const auto& first_unpruned{blockman.GetFirstBlock(*chain_tip, /*status_mask=*/BLOCK_HAVE_MASK, first_block)}; if (&first_unpruned == first_block) { @@ -1353,7 +1383,7 @@ RPCHelpMan getblockchaininfo() {RPCResult::Type::STR_HEX, "chainwork", "total amount of work in active chain, in hexadecimal"}, {RPCResult::Type::NUM, "size_on_disk", "the estimated size of the block and undo files on disk"}, {RPCResult::Type::BOOL, "pruned", "if the blocks are subject to pruning"}, - {RPCResult::Type::NUM, "pruneheight", /*optional=*/true, "height of the last block pruned, plus one (only present if pruning is enabled)"}, + {RPCResult::Type::NUM, "pruneheight", /*optional=*/true, "the first block unpruned, all previous blocks were pruned (only present if pruning is enabled)"}, {RPCResult::Type::BOOL, "automatic_pruning", /*optional=*/true, "whether automatic pruning is enabled (only present if pruning is enabled)"}, {RPCResult::Type::NUM, "prune_target_size", /*optional=*/true, "the target size used by pruning (only present if automatic pruning is enabled)"}, {RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "the block challenge (aka. block script), in hexadecimal (only present if the current network is a signet)"}, @@ -1595,7 +1625,7 @@ static RPCHelpMan getchaintips() if (active_chain.Contains(block)) { // This block is part of the currently active chain. status = "active"; - } else if (block->nStatus & BLOCK_FAILED_MASK) { + } else if (block->nStatus & BLOCK_FAILED_VALID) { // This block or one of its ancestors is invalid. status = "invalid"; } else if (!block->HaveNumChainTxs()) { diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index d14a43b22d06..efb06ac2d29b 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -9,15 +9,18 @@ #include #include #include +#include #include #include #include #include +#include #include class CBlock; class CBlockIndex; +class CChain; class Chainstate; class UniValue; namespace node { diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 77e5ec080522..8422af1541bf 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -79,7 +79,6 @@ static const CRPCConvertParam vRPCConvertParams[] = { "sendtoaddress", 8, "avoid_reuse" }, { "sendtoaddress", 9, "fee_rate"}, { "sendtoaddress", 10, "verbose"}, - { "settxfee", 0, "amount" }, { "getreceivedbyaddress", 1, "minconf" }, { "getreceivedbyaddress", 2, "include_immature_coinbase" }, { "getreceivedbylabel", 0, "label", ParamFormat::STRING }, @@ -313,6 +312,9 @@ static const CRPCConvertParam vRPCConvertParams[] = { "getmempoolancestors", 1, "verbose" }, { "getmempooldescendants", 1, "verbose" }, { "gettxspendingprevout", 0, "outputs" }, + { "gettxspendingprevout", 1, "options" }, + { "gettxspendingprevout", 1, "mempool_only" }, + { "gettxspendingprevout", 1, "return_spending_tx" }, { "bumpfee", 1, "options" }, { "bumpfee", 1, "conf_target"}, { "bumpfee", 1, "fee_rate"}, diff --git a/src/rpc/fees.cpp b/src/rpc/fees.cpp index 174217fc0c1a..9bad6f4a5977 100644 --- a/src/rpc/fees.cpp +++ b/src/rpc/fees.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 66ce1c615822..34abc3c08e42 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -30,6 +31,7 @@ #include #include +#include #include #include @@ -137,6 +139,126 @@ static RPCHelpMan sendrawtransaction() }; } +static RPCHelpMan getprivatebroadcastinfo() +{ + return RPCHelpMan{ + "getprivatebroadcastinfo", + "Returns information about transactions that are currently being privately broadcast.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::ARR, "transactions", "", + { + {RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"}, + {RPCResult::Type::STR_HEX, "wtxid", "The transaction witness hash in hex"}, + {RPCResult::Type::STR_HEX, "hex", "The serialized, hex-encoded transaction data"}, + {RPCResult::Type::ARR, "peers", "Per-peer send and acknowledgment information for this transaction", + { + {RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR, "address", "The address of the peer to which the transaction was sent"}, + {RPCResult::Type::NUM_TIME, "sent", "The time this transaction was picked for sending to this peer via private broadcast (seconds since epoch)"}, + {RPCResult::Type::NUM_TIME, "received", /*optional=*/true, "The time this peer acknowledged reception of the transaction (seconds since epoch)"}, + }}, + }}, + }}, + }}, + }}, + RPCExamples{ + HelpExampleCli("getprivatebroadcastinfo", "") + + HelpExampleRpc("getprivatebroadcastinfo", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + const NodeContext& node{EnsureAnyNodeContext(request.context)}; + const PeerManager& peerman{EnsurePeerman(node)}; + const auto txs{peerman.GetPrivateBroadcastInfo()}; + + UniValue transactions(UniValue::VARR); + for (const auto& tx_info : txs) { + UniValue o(UniValue::VOBJ); + o.pushKV("txid", tx_info.tx->GetHash().ToString()); + o.pushKV("wtxid", tx_info.tx->GetWitnessHash().ToString()); + o.pushKV("hex", EncodeHexTx(*tx_info.tx)); + UniValue peers(UniValue::VARR); + for (const auto& peer : tx_info.peers) { + UniValue p(UniValue::VOBJ); + p.pushKV("address", peer.address.ToStringAddrPort()); + p.pushKV("sent", TicksSinceEpoch(peer.sent)); + if (peer.received.has_value()) { + p.pushKV("received", TicksSinceEpoch(*peer.received)); + } + peers.push_back(std::move(p)); + } + o.pushKV("peers", std::move(peers)); + transactions.push_back(std::move(o)); + } + + UniValue ret(UniValue::VOBJ); + ret.pushKV("transactions", std::move(transactions)); + return ret; + }, + }; +} + +static RPCHelpMan abortprivatebroadcast() +{ + return RPCHelpMan{ + "abortprivatebroadcast", + "Abort private broadcast attempts for a transaction currently being privately broadcast.\n" + "The transaction will be removed from the private broadcast queue.\n", + { + {"id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A transaction identifier to abort. It will be matched against both txid and wtxid for all transactions in the private broadcast queue.\n" + "If the provided id matches a txid that corresponds to multiple transactions with different wtxids, multiple transactions will be removed and returned."}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::ARR, "removed_transactions", "Transactions removed from the private broadcast queue", + { + {RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"}, + {RPCResult::Type::STR_HEX, "wtxid", "The transaction witness hash in hex"}, + {RPCResult::Type::STR_HEX, "hex", "The serialized, hex-encoded transaction data"}, + }}, + }}, + } + }, + RPCExamples{ + HelpExampleCli("abortprivatebroadcast", "\"id\"") + + HelpExampleRpc("abortprivatebroadcast", "\"id\"") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + const uint256 id{ParseHashV(self.Arg("id"), "id")}; + + const NodeContext& node{EnsureAnyNodeContext(request.context)}; + PeerManager& peerman{EnsurePeerman(node)}; + + const auto removed_txs{peerman.AbortPrivateBroadcast(id)}; + if (removed_txs.empty()) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in private broadcast queue. Check getprivatebroadcastinfo."); + } + + UniValue removed_transactions(UniValue::VARR); + for (const auto& tx : removed_txs) { + UniValue o(UniValue::VOBJ); + o.pushKV("txid", tx->GetHash().ToString()); + o.pushKV("wtxid", tx->GetWitnessHash().ToString()); + o.pushKV("hex", EncodeHexTx(*tx)); + removed_transactions.push_back(std::move(o)); + } + UniValue ret(UniValue::VOBJ); + ret.pushKV("removed_transactions", std::move(removed_transactions)); + return ret; + }, + }; +} + static RPCHelpMan testmempoolaccept() { return RPCHelpMan{ @@ -775,7 +897,7 @@ static RPCHelpMan getmempoolentry() static RPCHelpMan gettxspendingprevout() { return RPCHelpMan{"gettxspendingprevout", - "Scans the mempool to find transactions spending any of the given outputs", + "Scans the mempool (and the txospenderindex, if available) to find transactions spending any of the given outputs", { {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The transaction outputs that we want to check, and within each, the txid (string) vout (numeric).", { @@ -787,6 +909,12 @@ static RPCHelpMan gettxspendingprevout() }, }, }, + {"options", RPCArg::Type::OBJ_NAMED_PARAMS, RPCArg::Optional::OMITTED, "", + { + {"mempool_only", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true if txospenderindex unavailable, otherwise false"}, "If false and mempool lacks a relevant spend, use txospenderindex (throws an exception if not available)."}, + {"return_spending_tx", RPCArg::Type::BOOL, RPCArg::DefaultHint{"false"}, "If true, return the full spending tx."}, + }, + }, }, RPCResult{ RPCResult::Type::ARR, "", "", @@ -796,12 +924,15 @@ static RPCHelpMan gettxspendingprevout() {RPCResult::Type::STR_HEX, "txid", "the transaction id of the checked output"}, {RPCResult::Type::NUM, "vout", "the vout value of the checked output"}, {RPCResult::Type::STR_HEX, "spendingtxid", /*optional=*/true, "the transaction id of the mempool transaction spending this output (omitted if unspent)"}, + {RPCResult::Type::STR_HEX, "spendingtx", /*optional=*/true, "the transaction spending this output (only if return_spending_tx is set, omitted if unspent)"}, + {RPCResult::Type::STR_HEX, "blockhash", /*optional=*/true, "the hash of the spending block (omitted if unspent or the spending tx is not confirmed)"}, }}, } }, RPCExamples{ HelpExampleCli("gettxspendingprevout", "\"[{\\\"txid\\\":\\\"a08e6907dbbd3d809776dbfc5d82e371b764ed838b5655e72f463568df1aadf0\\\",\\\"vout\\\":3}]\"") + HelpExampleRpc("gettxspendingprevout", "\"[{\\\"txid\\\":\\\"a08e6907dbbd3d809776dbfc5d82e371b764ed838b5655e72f463568df1aadf0\\\",\\\"vout\\\":3}]\"") + + HelpExampleCliNamed("gettxspendingprevout", {{"outputs", "[{\"txid\":\"a08e6907dbbd3d809776dbfc5d82e371b764ed838b5655e72f463568df1aadf0\",\"vout\":3}]"}, {"return_spending_tx", true}}) }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { @@ -809,8 +940,22 @@ static RPCHelpMan gettxspendingprevout() if (output_params.empty()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, outputs are missing"); } - - std::vector prevouts; + const UniValue options{request.params[1].isNull() ? UniValue::VOBJ : request.params[1]};\ + RPCTypeCheckObj(options, + { + {"mempool_only", UniValueType(UniValue::VBOOL)}, + {"return_spending_tx", UniValueType(UniValue::VBOOL)}, + }, /*fAllowNull=*/true, /*fStrict=*/true); + + const bool mempool_only{options.exists("mempool_only") ? options["mempool_only"].get_bool() : !g_txospenderindex}; + const bool return_spending_tx{options.exists("return_spending_tx") ? options["return_spending_tx"].get_bool() : false}; + + struct Entry { + const COutPoint prevout; + const UniValue& input; + UniValue output; + }; + std::vector prevouts; prevouts.reserve(output_params.size()); for (unsigned int idx = 0; idx < output_params.size(); idx++) { @@ -827,25 +972,56 @@ static RPCHelpMan gettxspendingprevout() if (nOutput < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, vout cannot be negative"); } - - prevouts.emplace_back(txid, nOutput); + prevouts.emplace_back(COutPoint{txid, uint32_t(nOutput)}, o, UniValue{}); } - const CTxMemPool& mempool = EnsureAnyMemPool(request.context); - LOCK(mempool.cs); - + // search the mempool first + bool missing_from_mempool{false}; + { + const CTxMemPool& mempool = EnsureAnyMemPool(request.context); + LOCK(mempool.cs); + for (auto& entry : prevouts) { + const CTransaction* spendingTx = mempool.GetConflictTx(entry.prevout); + if (spendingTx != nullptr) { + UniValue o{entry.input}; + o.pushKV("spendingtxid", spendingTx->GetHash().ToString()); + if (return_spending_tx) { + o.pushKV("spendingtx", EncodeHexTx(*spendingTx)); + } + entry.output = std::move(o); + } else { + missing_from_mempool = true; + } + } + } + // if search is not limited to the mempool and no spender was found for an outpoint, search the txospenderindex + // we call g_txospenderindex->BlockUntilSyncedToCurrentChain() only if g_txospenderindex is going to be used UniValue result{UniValue::VARR}; - - for (const COutPoint& prevout : prevouts) { - UniValue o(UniValue::VOBJ); - o.pushKV("txid", prevout.hash.ToString()); - o.pushKV("vout", prevout.n); - - const CTransaction* spendingTx = mempool.GetConflictTx(prevout); - if (spendingTx != nullptr) { - o.pushKV("spendingtxid", spendingTx->GetHash().ToString()); + bool txospenderindex_ready{mempool_only || !missing_from_mempool || (g_txospenderindex && g_txospenderindex->BlockUntilSyncedToCurrentChain())}; + for (auto& entry : prevouts) { + if (!entry.output.isNull()) { + result.push_back(std::move(entry.output)); + continue; + } + UniValue o{entry.input}; + if (mempool_only) { + // do nothing, caller has selected to only query the mempool + } else if (!txospenderindex_ready) { + throw JSONRPCError(RPC_MISC_ERROR, strprintf("No spending tx for the outpoint %s:%d in mempool, and txospenderindex is unavailable.", entry.prevout.hash.GetHex(), entry.prevout.n)); + } else { + // no spending tx in mempool, query txospender index + const auto spender{g_txospenderindex->FindSpender(entry.prevout)}; + if (!spender) { + throw JSONRPCError(RPC_MISC_ERROR, spender.error()); + } + if (spender.value()) { + o.pushKV("spendingtxid", spender.value()->tx->GetHash().GetHex()); + o.pushKV("blockhash", spender.value()->block_hash.GetHex()); + if (return_spending_tx) { + o.pushKV("spendingtx", EncodeHexTx(*spender.value()->tx)); + } + } } - result.push_back(std::move(o)); } @@ -874,6 +1050,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool) ret.pushKV("maxdatacarriersize", pool.m_opts.max_datacarrier_bytes.value_or(0)); ret.pushKV("limitclustercount", pool.m_opts.limits.cluster_count); ret.pushKV("limitclustersize", pool.m_opts.limits.cluster_size_vbytes); + ret.pushKV("optimal", pool.m_txgraph->DoWork(0)); // 0 work is a quick check for known optimality return ret; } @@ -900,6 +1077,7 @@ static RPCHelpMan getmempoolinfo() {RPCResult::Type::NUM, "maxdatacarriersize", "Maximum number of bytes that can be used by OP_RETURN outputs in the mempool"}, {RPCResult::Type::NUM, "limitclustercount", "Maximum number of transactions that can be in a cluster (configured by -limitclustercount)"}, {RPCResult::Type::NUM, "limitclustersize", "Maximum size of a cluster in virtual bytes (configured by -limitclustersize)"}, + {RPCResult::Type::BOOL, "optimal", "If the mempool is in a known-optimal transaction ordering"}, }}, RPCExamples{ HelpExampleCli("getmempoolinfo", "") @@ -1329,6 +1507,8 @@ void RegisterMempoolRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ {"rawtransactions", &sendrawtransaction}, + {"rawtransactions", &getprivatebroadcastinfo}, + {"rawtransactions", &abortprivatebroadcast}, {"rawtransactions", &testmempoolaccept}, {"blockchain", &getmempoolancestors}, {"blockchain", &getmempooldescendants}, diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index ee80b903248d..043e11ff63ad 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -165,7 +165,7 @@ static UniValue generateBlocks(ChainstateManager& chainman, Mining& miner, const { UniValue blockHashes(UniValue::VARR); while (nGenerate > 0 && !chainman.m_interrupt) { - std::unique_ptr block_template(miner.createNewBlock({ .coinbase_output_script = coinbase_output_script, .include_dummy_extranonce = true })); + std::unique_ptr block_template(miner.createNewBlock({ .coinbase_output_script = coinbase_output_script, .include_dummy_extranonce = true }, /*cooldown=*/false)); CHECK_NONFATAL(block_template); std::shared_ptr block_out; @@ -376,7 +376,7 @@ static RPCHelpMan generateblock() { LOCK(chainman.GetMutex()); { - std::unique_ptr block_template{miner.createNewBlock({.use_mempool = false, .coinbase_output_script = coinbase_output_script, .include_dummy_extranonce = true})}; + std::unique_ptr block_template{miner.createNewBlock({.use_mempool = false, .coinbase_output_script = coinbase_output_script, .include_dummy_extranonce = true}, /*cooldown=*/false)}; CHECK_NONFATAL(block_template); block = block_template->getBlock(); @@ -742,7 +742,7 @@ static RPCHelpMan getblocktemplate() if (pindex) { if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) return "duplicate"; - if (pindex->nStatus & BLOCK_FAILED_MASK) + if (pindex->nStatus & BLOCK_FAILED_VALID) return "duplicate-invalid"; return "duplicate-inconclusive"; } @@ -870,8 +870,11 @@ static RPCHelpMan getblocktemplate() CBlockIndex* pindexPrevNew = chainman.m_blockman.LookupBlockIndex(tip); time_start = GetTime(); - // Create new block - block_template = miner.createNewBlock({.include_dummy_extranonce = true}); + // Create new block. Opt-out of cooldown mechanism, because it would add + // a delay to each getblocktemplate call. This differs from typical + // long-lived IPC usage, where the overhead is paid only when creating + // the initial template. + block_template = miner.createNewBlock({.include_dummy_extranonce = true}, /*cooldown=*/false); CHECK_NONFATAL(block_template); @@ -1015,8 +1018,9 @@ static RPCHelpMan getblocktemplate() result.pushKV("signet_challenge", HexStr(consensusParams.signet_challenge)); } - if (!block_template->getCoinbaseCommitment().empty()) { - result.pushKV("default_witness_commitment", HexStr(block_template->getCoinbaseCommitment())); + if (auto coinbase{block_template->getCoinbaseTx()}; coinbase.required_outputs.size() > 0) { + CHECK_NONFATAL(coinbase.required_outputs.size() == 1); // Only one output is currently expected + result.pushKV("default_witness_commitment", HexStr(coinbase.required_outputs[0].scriptPubKey)); } return result; diff --git a/src/rpc/node.cpp b/src/rpc/node.cpp index 93d860d4dd02..2379ae07e06b 100644 --- a/src/rpc/node.cpp +++ b/src/rpc/node.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -397,6 +398,10 @@ static RPCHelpMan getindexinfo() result.pushKVs(SummaryToJSON(g_coin_stats_index->GetSummary(), index_name)); } + if (g_txospenderindex) { + result.pushKVs(SummaryToJSON(g_txospenderindex->GetSummary(), index_name)); + } + ForEachBlockFilterIndex([&result, &index_name](const BlockFilterIndex& index) { result.pushKVs(SummaryToJSON(index.GetSummary(), index_name)); }); diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 8558c8e29d74..5bbf7517a901 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -161,12 +161,11 @@ typedef std::vector KeyPath; /** Interface for public key objects in descriptors. */ struct PubkeyProvider { -protected: +public: //! Index of this key expression in the descriptor //! E.g. If this PubkeyProvider is key1 in multi(2, key1, key2, key3), then m_expr_index = 0 - uint32_t m_expr_index; + const uint32_t m_expr_index; -public: explicit PubkeyProvider(uint32_t exp_index) : m_expr_index(exp_index) {} virtual ~PubkeyProvider() = default; @@ -229,6 +228,9 @@ struct PubkeyProvider /** Whether this PubkeyProvider is a BIP 32 extended key that can be derived from */ virtual bool IsBIP32() const = 0; + + /** Get the count of keys known by this PubkeyProvider. Usually one, but may be more for key aggregation schemes */ + virtual size_t GetKeyCount() const { return 1; } }; class OriginPubkeyProvider final : public PubkeyProvider @@ -366,9 +368,9 @@ class ConstPubkeyProvider final : public PubkeyProvider }; enum class DeriveType { - NO, - UNHARDENED, - HARDENED, + NON_RANGED, + UNHARDENED_RANGED, + HARDENED_RANGED, }; /** An object representing a parsed extended public key in a descriptor. */ @@ -408,7 +410,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider bool IsHardened() const { - if (m_derive == DeriveType::HARDENED) return true; + if (m_derive == DeriveType::HARDENED_RANGED) return true; for (auto entry : m_path) { if (entry >> 31) return true; } @@ -417,7 +419,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider public: BIP32PubkeyProvider(uint32_t exp_index, const CExtPubKey& extkey, KeyPath path, DeriveType derive, bool apostrophe) : PubkeyProvider(exp_index), m_root_extkey(extkey), m_path(std::move(path)), m_derive(derive), m_apostrophe(apostrophe) {} - bool IsRange() const override { return m_derive != DeriveType::NO; } + bool IsRange() const override { return m_derive != DeriveType::NON_RANGED; } size_t GetSize() const override { return 33; } bool IsBIP32() const override { return true; } std::optional GetPubKey(int pos, const SigningProvider& arg, FlatSigningProvider& out, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) const override @@ -426,8 +428,8 @@ class BIP32PubkeyProvider final : public PubkeyProvider CKeyID keyid = m_root_extkey.pubkey.GetID(); std::copy(keyid.begin(), keyid.begin() + sizeof(info.fingerprint), info.fingerprint); info.path = m_path; - if (m_derive == DeriveType::UNHARDENED) info.path.push_back((uint32_t)pos); - if (m_derive == DeriveType::HARDENED) info.path.push_back(((uint32_t)pos) | 0x80000000L); + if (m_derive == DeriveType::UNHARDENED_RANGED) info.path.push_back((uint32_t)pos); + if (m_derive == DeriveType::HARDENED_RANGED) info.path.push_back(((uint32_t)pos) | 0x80000000L); // Derive keys or fetch them from cache CExtPubKey final_extkey = m_root_extkey; @@ -436,19 +438,19 @@ class BIP32PubkeyProvider final : public PubkeyProvider bool der = true; if (read_cache) { if (!read_cache->GetCachedDerivedExtPubKey(m_expr_index, pos, final_extkey)) { - if (m_derive == DeriveType::HARDENED) return std::nullopt; + if (m_derive == DeriveType::HARDENED_RANGED) return std::nullopt; // Try to get the derivation parent if (!read_cache->GetCachedParentExtPubKey(m_expr_index, parent_extkey)) return std::nullopt; final_extkey = parent_extkey; - if (m_derive == DeriveType::UNHARDENED) der = parent_extkey.Derive(final_extkey, pos); + if (m_derive == DeriveType::UNHARDENED_RANGED) der = parent_extkey.Derive(final_extkey, pos); } } else if (IsHardened()) { CExtKey xprv; CExtKey lh_xprv; if (!GetDerivedExtKey(arg, xprv, lh_xprv)) return std::nullopt; parent_extkey = xprv.Neuter(); - if (m_derive == DeriveType::UNHARDENED) der = xprv.Derive(xprv, pos); - if (m_derive == DeriveType::HARDENED) der = xprv.Derive(xprv, pos | 0x80000000UL); + if (m_derive == DeriveType::UNHARDENED_RANGED) der = xprv.Derive(xprv, pos); + if (m_derive == DeriveType::HARDENED_RANGED) der = xprv.Derive(xprv, pos | 0x80000000UL); final_extkey = xprv.Neuter(); if (lh_xprv.key.IsValid()) { last_hardened_extkey = lh_xprv.Neuter(); @@ -458,8 +460,8 @@ class BIP32PubkeyProvider final : public PubkeyProvider if (!parent_extkey.Derive(parent_extkey, entry)) return std::nullopt; } final_extkey = parent_extkey; - if (m_derive == DeriveType::UNHARDENED) der = parent_extkey.Derive(final_extkey, pos); - assert(m_derive != DeriveType::HARDENED); + if (m_derive == DeriveType::UNHARDENED_RANGED) der = parent_extkey.Derive(final_extkey, pos); + assert(m_derive != DeriveType::HARDENED_RANGED); } if (!der) return std::nullopt; @@ -468,7 +470,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider if (write_cache) { // Only cache parent if there is any unhardened derivation - if (m_derive != DeriveType::HARDENED) { + if (m_derive != DeriveType::HARDENED_RANGED) { write_cache->CacheParentExtPubKey(m_expr_index, parent_extkey); // Cache last hardened xpub if we have it if (last_hardened_extkey.pubkey.IsValid()) { @@ -488,7 +490,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider std::string ret = EncodeExtPubKey(m_root_extkey) + FormatHDKeypath(m_path, /*apostrophe=*/use_apostrophe); if (IsRange()) { ret += "/*"; - if (m_derive == DeriveType::HARDENED) ret += use_apostrophe ? '\'' : 'h'; + if (m_derive == DeriveType::HARDENED_RANGED) ret += use_apostrophe ? '\'' : 'h'; } return ret; } @@ -506,13 +508,13 @@ class BIP32PubkeyProvider final : public PubkeyProvider out = EncodeExtKey(key) + FormatHDKeypath(m_path, /*apostrophe=*/m_apostrophe); if (IsRange()) { out += "/*"; - if (m_derive == DeriveType::HARDENED) out += m_apostrophe ? '\'' : 'h'; + if (m_derive == DeriveType::HARDENED_RANGED) out += m_apostrophe ? '\'' : 'h'; } return true; } bool ToNormalizedString(const SigningProvider& arg, std::string& out, const DescriptorCache* cache) const override { - if (m_derive == DeriveType::HARDENED) { + if (m_derive == DeriveType::HARDENED_RANGED) { out = ToString(StringType::PUBLIC, /*normalized=*/true); return true; @@ -564,7 +566,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider out = "[" + origin_str + "]" + EncodeExtPubKey(xpub) + FormatHDKeypath(end_path); if (IsRange()) { out += "/*"; - assert(m_derive == DeriveType::UNHARDENED); + assert(m_derive == DeriveType::UNHARDENED_RANGED); } return true; } @@ -573,8 +575,8 @@ class BIP32PubkeyProvider final : public PubkeyProvider CExtKey extkey; CExtKey dummy; if (!GetDerivedExtKey(arg, extkey, dummy)) return; - if (m_derive == DeriveType::UNHARDENED && !extkey.Derive(extkey, pos)) return; - if (m_derive == DeriveType::HARDENED && !extkey.Derive(extkey, pos | 0x80000000UL)) return; + if (m_derive == DeriveType::UNHARDENED_RANGED && !extkey.Derive(extkey, pos)) return; + if (m_derive == DeriveType::HARDENED_RANGED && !extkey.Derive(extkey, pos | 0x80000000UL)) return; out.keys.emplace(extkey.key.GetPubKey().GetID(), extkey.key); } std::optional GetRootPubKey() const override @@ -605,7 +607,7 @@ class MuSigPubkeyProvider final : public PubkeyProvider const DeriveType m_derive; const bool m_ranged_participants; - bool IsRangedDerivation() const { return m_derive != DeriveType::NO; } + bool IsRangedDerivation() const { return m_derive != DeriveType::NON_RANGED; } public: MuSigPubkeyProvider( @@ -623,7 +625,7 @@ class MuSigPubkeyProvider final : public PubkeyProvider if (!Assume(!(m_ranged_participants && IsRangedDerivation()))) { throw std::runtime_error("musig(): Cannot have both ranged participants and ranged derivation"); } - if (!Assume(m_derive != DeriveType::HARDENED)) { + if (!Assume(m_derive != DeriveType::HARDENED_RANGED)) { throw std::runtime_error("musig(): Cannot have hardened derivation"); } } @@ -788,6 +790,10 @@ class MuSigPubkeyProvider final : public PubkeyProvider // musig() can only be a BIP 32 key if all participants are bip32 too return std::all_of(m_participants.begin(), m_participants.end(), [](const auto& pubkey) { return pubkey->IsBIP32(); }); } + size_t GetKeyCount() const override + { + return 1 + m_participants.size(); + } }; /** Base class for all Descriptor implementations. */ @@ -1041,6 +1047,40 @@ class DescriptorImpl : public Descriptor } return all; } + + uint32_t GetMaxKeyExpr() const final + { + uint32_t max_key_expr{0}; + std::vector todo = {this}; + while (!todo.empty()) { + const DescriptorImpl* desc = todo.back(); + todo.pop_back(); + for (const auto& p : desc->m_pubkey_args) { + max_key_expr = std::max(max_key_expr, p->m_expr_index); + } + for (const auto& s : desc->m_subdescriptor_args) { + todo.push_back(s.get()); + } + } + return max_key_expr; + } + + size_t GetKeyCount() const final + { + size_t count{0}; + std::vector todo = {this}; + while (!todo.empty()) { + const DescriptorImpl* desc = todo.back(); + todo.pop_back(); + for (const auto& p : desc->m_pubkey_args) { + count += p->GetKeyCount(); + } + for (const auto& s : desc->m_subdescriptor_args) { + todo.push_back(s.get()); + } + } + return count; + } }; /** A parsed addr(A) descriptor. */ @@ -1820,20 +1860,20 @@ std::optional ParseKeyPathNum(std::span elem, bool& apostr static DeriveType ParseDeriveType(std::vector>& split, bool& apostrophe) { - DeriveType type = DeriveType::NO; + DeriveType type = DeriveType::NON_RANGED; if (std::ranges::equal(split.back(), std::span{"*"}.first(1))) { split.pop_back(); - type = DeriveType::UNHARDENED; + type = DeriveType::UNHARDENED_RANGED; } else if (std::ranges::equal(split.back(), std::span{"*'"}.first(2)) || std::ranges::equal(split.back(), std::span{"*h"}.first(2))) { apostrophe = std::ranges::equal(split.back(), std::span{"*'"}.first(2)); split.pop_back(); - type = DeriveType::HARDENED; + type = DeriveType::HARDENED_RANGED; } return type; } /** Parse a public key that excludes origin information. */ -std::vector> ParsePubkeyInner(uint32_t key_exp_index, const std::span& sp, ParseScriptContext ctx, FlatSigningProvider& out, bool& apostrophe, std::string& error) +std::vector> ParsePubkeyInner(uint32_t& key_exp_index, const std::span& sp, ParseScriptContext ctx, FlatSigningProvider& out, bool& apostrophe, std::string& error) { std::vector> ret; bool permit_uncompressed = ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH; @@ -1858,6 +1898,7 @@ std::vector> ParsePubkeyInner(uint32_t key_exp_i if (pubkey.IsFullyValid()) { if (permit_uncompressed || pubkey.IsCompressed()) { ret.emplace_back(std::make_unique(key_exp_index, pubkey, false)); + ++key_exp_index; return ret; } else { error = "Uncompressed keys are not allowed"; @@ -1869,6 +1910,7 @@ std::vector> ParsePubkeyInner(uint32_t key_exp_i pubkey.Set(std::begin(fullkey), std::end(fullkey)); if (pubkey.IsFullyValid()) { ret.emplace_back(std::make_unique(key_exp_index, pubkey, true)); + ++key_exp_index; return ret; } } @@ -1881,6 +1923,7 @@ std::vector> ParsePubkeyInner(uint32_t key_exp_i CPubKey pubkey = key.GetPubKey(); out.keys.emplace(pubkey.GetID(), key); ret.emplace_back(std::make_unique(key_exp_index, pubkey, ctx == ParseScriptContext::P2TR)); + ++key_exp_index; return ret; } else { error = "Uncompressed keys are not allowed"; @@ -1904,6 +1947,7 @@ std::vector> ParsePubkeyInner(uint32_t key_exp_i for (auto& path : paths) { ret.emplace_back(std::make_unique(key_exp_index, extpubkey, std::move(path), type, apostrophe)); } + ++key_exp_index; return ret; } @@ -1961,7 +2005,6 @@ std::vector> ParsePubkey(uint32_t& key_exp_index max_multipath_len = std::max(max_multipath_len, pk.size()); providers.emplace_back(std::move(pk)); - key_exp_index++; } if (!any_key_parsed) { error = "musig(): Must contain key expressions"; @@ -1969,7 +2012,7 @@ std::vector> ParsePubkey(uint32_t& key_exp_index } // Parse any derivation - DeriveType deriv_type = DeriveType::NO; + DeriveType deriv_type = DeriveType::NON_RANGED; std::vector derivation_multipaths; if (split.size() == 2 && Const("/", split.at(1), /*skip=*/false)) { if (!all_bip32) { @@ -1983,7 +2026,7 @@ std::vector> ParsePubkey(uint32_t& key_exp_index bool dummy = false; auto deriv_split = Split(split.at(1), '/'); deriv_type = ParseDeriveType(deriv_split, dummy); - if (deriv_type == DeriveType::HARDENED) { + if (deriv_type == DeriveType::HARDENED_RANGED) { error = "musig(): Cannot have hardened child derivation"; return {}; } @@ -2053,6 +2096,7 @@ std::vector> ParsePubkey(uint32_t& key_exp_index // No multipath derivation, MuSigPubkeyProvider uses the first (and only) participant pubkey providers, and the first (and only) path emplace_final_provider(0, 0); } + ++key_exp_index; // Increment key expression index for the MuSigPubkeyProvider too return ret; } @@ -2093,7 +2137,7 @@ std::vector> ParsePubkey(uint32_t& key_exp_index if (providers.empty()) return {}; ret.reserve(providers.size()); for (auto& prov : providers) { - ret.emplace_back(std::make_unique(key_exp_index, info, std::move(prov), apostrophe)); + ret.emplace_back(std::make_unique(prov->m_expr_index, info, std::move(prov), apostrophe)); } return ret; } @@ -2143,12 +2187,12 @@ struct KeyParser { mutable std::string m_key_parsing_error; //! The script context we're operating within (Tapscript or P2WSH). const miniscript::MiniscriptContext m_script_ctx; - //! The number of keys that were parsed before starting to parse this Miniscript descriptor. - uint32_t m_offset; + //! The current key expression index + uint32_t& m_expr_index; KeyParser(FlatSigningProvider* out LIFETIMEBOUND, const SigningProvider* in LIFETIMEBOUND, - miniscript::MiniscriptContext ctx, uint32_t offset = 0) - : m_out(out), m_in(in), m_script_ctx(ctx), m_offset(offset) {} + miniscript::MiniscriptContext ctx, uint32_t& key_exp_index LIFETIMEBOUND) + : m_out(out), m_in(in), m_script_ctx(ctx), m_expr_index(key_exp_index) {} bool KeyCompare(const Key& a, const Key& b) const { return *m_keys.at(a).at(0) < *m_keys.at(b).at(0); @@ -2162,12 +2206,11 @@ struct KeyParser { assert(false); } - template std::optional FromString(I begin, I end) const + std::optional FromString(std::span& in) const { assert(m_out); Key key = m_keys.size(); - uint32_t exp_index = m_offset + key; - auto pk = ParsePubkey(exp_index, {&*begin, &*end}, ParseContext(), *m_out, m_key_parsing_error); + auto pk = ParsePubkey(m_expr_index, in, ParseContext(), *m_out, m_key_parsing_error); if (pk.empty()) return {}; m_keys.emplace_back(std::move(pk)); return key; @@ -2239,7 +2282,6 @@ std::vector> ParseScript(uint32_t& key_exp_index error = strprintf("pk(): %s", error); return {}; } - ++key_exp_index; for (auto& pubkey : pubkeys) { ret.emplace_back(std::make_unique(std::move(pubkey), ctx == ParseScriptContext::P2TR)); } @@ -2251,7 +2293,6 @@ std::vector> ParseScript(uint32_t& key_exp_index error = strprintf("pkh(): %s", error); return {}; } - ++key_exp_index; for (auto& pubkey : pubkeys) { ret.emplace_back(std::make_unique(std::move(pubkey))); } @@ -2263,7 +2304,6 @@ std::vector> ParseScript(uint32_t& key_exp_index error = strprintf("combo(): %s", error); return {}; } - ++key_exp_index; for (auto& pubkey : pubkeys) { ret.emplace_back(std::make_unique(std::move(pubkey))); } @@ -2303,7 +2343,6 @@ std::vector> ParseScript(uint32_t& key_exp_index script_size += pks.at(0)->GetSize() + 1; max_providers_len = std::max(max_providers_len, pks.size()); providers.emplace_back(std::move(pks)); - key_exp_index++; } if ((multi || sortedmulti) && (providers.empty() || providers.size() > MAX_PUBKEYS_PER_MULTISIG)) { error = strprintf("Cannot have %u keys in multisig; must have between 1 and %d keys, inclusive", providers.size(), MAX_PUBKEYS_PER_MULTISIG); @@ -2373,7 +2412,6 @@ std::vector> ParseScript(uint32_t& key_exp_index error = strprintf("wpkh(): %s", error); return {}; } - key_exp_index++; for (auto& pubkey : pubkeys) { ret.emplace_back(std::make_unique(std::move(pubkey))); } @@ -2426,7 +2464,6 @@ std::vector> ParseScript(uint32_t& key_exp_index return {}; } size_t max_providers_len = internal_keys.size(); - ++key_exp_index; std::vector>> subscripts; //!< list of multipath expanded script subexpressions std::vector depths; //!< depth in the tree of each subexpression (same length subscripts) if (expr.size()) { @@ -2530,7 +2567,6 @@ std::vector> ParseScript(uint32_t& key_exp_index error = strprintf("rawtr(): %s", error); return {}; } - ++key_exp_index; for (auto& pubkey : output_keys) { ret.emplace_back(std::make_unique(std::move(pubkey))); } @@ -2594,7 +2630,6 @@ std::vector> ParseScript(uint32_t& key_exp_index // A signature check is required for a miniscript to be sane. Therefore no sane miniscript // may have an empty list of public keys. CHECK_NONFATAL(!parser.m_keys.empty()); - key_exp_index += parser.m_keys.size(); // Make sure all vecs are of the same length, or exactly length 1 // For length 1 vectors, clone subdescs until vector is the same length size_t num_multipath = std::max_element(parser.m_keys.begin(), parser.m_keys.end(), @@ -2769,7 +2804,8 @@ std::unique_ptr InferScript(const CScript& script, ParseScriptCo if (ctx == ParseScriptContext::P2WSH || ctx == ParseScriptContext::P2TR) { const auto script_ctx{ctx == ParseScriptContext::P2WSH ? miniscript::MiniscriptContext::P2WSH : miniscript::MiniscriptContext::TAPSCRIPT}; - KeyParser parser(/* out = */nullptr, /* in = */&provider, /* ctx = */script_ctx); + uint32_t key_exp_index = 0; + KeyParser parser(/* out = */nullptr, /* in = */&provider, /* ctx = */script_ctx, key_exp_index); auto node = miniscript::FromScript(script, parser); if (node && node->IsSane()) { std::vector> keys; diff --git a/src/script/descriptor.h b/src/script/descriptor.h index fe33476da52b..3ac748a1746e 100644 --- a/src/script/descriptor.h +++ b/src/script/descriptor.h @@ -181,6 +181,12 @@ struct Descriptor { /** Semantic/safety warnings (includes subdescriptors). */ virtual std::vector Warnings() const = 0; + + /** Get the maximum key expression index. Used only for tests */ + virtual uint32_t GetMaxKeyExpr() const = 0; + + /** Get the number of key expressions in this descriptor. Used only for tests */ + virtual size_t GetKeyCount() const = 0; }; /** Parse a `descriptor` string. Included private keys are put in `out`. diff --git a/src/script/miniscript.h b/src/script/miniscript.h index 1b7e84f471d0..8733d3471369 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -1806,29 +1806,27 @@ enum class ParseContext { int FindNextChar(std::span in, char m); -/** Parse a key string ending at the end of the fragment's text representation. */ +/** Parse a key expression fully contained within a fragment with the name given by 'func' */ template -std::optional> ParseKeyEnd(std::span in, const Ctx& ctx) +std::optional ParseKey(const std::string& func, std::span& in, const Ctx& ctx) { - int key_size = FindNextChar(in, ')'); - if (key_size < 1) return {}; - auto key = ctx.FromString(in.begin(), in.begin() + key_size); - if (!key) return {}; - return {{std::move(*key), key_size}}; + std::span expr = script::Expr(in); + if (!script::Func(func, expr)) return {}; + return ctx.FromString(expr); } -/** Parse a hex string ending at the end of the fragment's text representation. */ +/** Parse a hex string fully contained within a fragment with the name given by 'func' */ template -std::optional, int>> ParseHexStrEnd(std::span in, const size_t expected_size, +std::optional> ParseHexStr(const std::string& func, std::span& in, const size_t expected_size, const Ctx& ctx) { - int hash_size = FindNextChar(in, ')'); - if (hash_size < 1) return {}; - std::string val = std::string(in.begin(), in.begin() + hash_size); + std::span expr = script::Expr(in); + if (!script::Func(func, expr)) return {}; + std::string val = std::string(expr.begin(), expr.end()); if (!IsHex(val)) return {}; auto hash = ParseHex(val); if (hash.size() != expected_size) return {}; - return {{std::move(hash), hash_size}}; + return hash; } /** BuildBack pops the last two elements off `constructed` and wraps them in the specified Fragment */ @@ -1891,7 +1889,8 @@ inline std::optional> Parse(std::span in, const Ctx& ctx) next_comma = FindNextChar(in, ','); int key_length = (next_comma == -1) ? FindNextChar(in, ')') : next_comma; if (key_length < 1) return false; - auto key = ctx.FromString(in.begin(), in.begin() + key_length); + std::span sp{in.begin(), in.begin() + key_length}; + auto key = ctx.FromString(sp); if (!key) return false; keys.push_back(std::move(*key)); in = in.subspan(key_length + 1); @@ -1978,77 +1977,59 @@ inline std::optional> Parse(std::span in, const Ctx& ctx) constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0); } else if (Const("1", in)) { constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_1); - } else if (Const("pk(", in)) { - auto res = ParseKeyEnd(in, ctx); - if (!res) return {}; - auto& [key, key_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(Node(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(key))))); - in = in.subspan(key_size + 1); + } else if (Const("pk(", in, /*skip=*/false)) { + std::optional key = ParseKey("pk", in, ctx); + if (!key) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(Node(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(*key))))); script_size += IsTapscript(ctx.MsContext()) ? 33 : 34; - } else if (Const("pkh(", in)) { - auto res = ParseKeyEnd(in, ctx); - if (!res) return {}; - auto& [key, key_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(Node(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(key))))); - in = in.subspan(key_size + 1); + } else if (Const("pkh(", in, /*skip=*/false)) { + std::optional key = ParseKey("pkh", in, ctx); + if (!key) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(Node(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(*key))))); script_size += 24; - } else if (Const("pk_k(", in)) { - auto res = ParseKeyEnd(in, ctx); - if (!res) return {}; - auto& [key, key_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(key))); - in = in.subspan(key_size + 1); + } else if (Const("pk_k(", in, /*skip=*/false)) { + std::optional key = ParseKey("pk_k", in, ctx); + if (!key) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(*key))); script_size += IsTapscript(ctx.MsContext()) ? 32 : 33; - } else if (Const("pk_h(", in)) { - auto res = ParseKeyEnd(in, ctx); - if (!res) return {}; - auto& [key, key_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(key))); - in = in.subspan(key_size + 1); + } else if (Const("pk_h(", in, /*skip=*/false)) { + std::optional key = ParseKey("pk_h", in, ctx); + if (!key) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(*key))); script_size += 23; - } else if (Const("sha256(", in)) { - auto res = ParseHexStrEnd(in, 32, ctx); - if (!res) return {}; - auto& [hash, hash_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::SHA256, std::move(hash)); - in = in.subspan(hash_size + 1); + } else if (Const("sha256(", in, /*skip=*/false)) { + std::optional> hash = ParseHexStr("sha256", in, 32, ctx); + if (!hash) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::SHA256, std::move(*hash)); script_size += 38; - } else if (Const("ripemd160(", in)) { - auto res = ParseHexStrEnd(in, 20, ctx); - if (!res) return {}; - auto& [hash, hash_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::RIPEMD160, std::move(hash)); - in = in.subspan(hash_size + 1); + } else if (Const("ripemd160(", in, /*skip=*/false)) { + std::optional> hash = ParseHexStr("ripemd160", in, 20, ctx); + if (!hash) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::RIPEMD160, std::move(*hash)); script_size += 26; - } else if (Const("hash256(", in)) { - auto res = ParseHexStrEnd(in, 32, ctx); - if (!res) return {}; - auto& [hash, hash_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH256, std::move(hash)); - in = in.subspan(hash_size + 1); + } else if (Const("hash256(", in, /*skip=*/false)) { + std::optional> hash = ParseHexStr("hash256", in, 32, ctx); + if (!hash) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH256, std::move(*hash)); script_size += 38; - } else if (Const("hash160(", in)) { - auto res = ParseHexStrEnd(in, 20, ctx); - if (!res) return {}; - auto& [hash, hash_size] = *res; - constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH160, std::move(hash)); - in = in.subspan(hash_size + 1); + } else if (Const("hash160(", in, /*skip=*/false)) { + std::optional> hash = ParseHexStr("hash160", in, 20, ctx); + if (!hash) return {}; + constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH160, std::move(*hash)); script_size += 26; - } else if (Const("after(", in)) { - int arg_size = FindNextChar(in, ')'); - if (arg_size < 1) return {}; - const auto num{ToIntegral(std::string_view(in.data(), arg_size))}; + } else if (Const("after(", in, /*skip=*/false)) { + auto expr = Expr(in); + if (!Func("after", expr)) return {}; + const auto num{ToIntegral(std::string_view(expr.begin(), expr.end()))}; if (!num.has_value() || *num < 1 || *num >= 0x80000000L) return {}; constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::AFTER, *num); - in = in.subspan(arg_size + 1); script_size += 1 + (*num > 16) + (*num > 0x7f) + (*num > 0x7fff) + (*num > 0x7fffff); - } else if (Const("older(", in)) { - int arg_size = FindNextChar(in, ')'); - if (arg_size < 1) return {}; - const auto num{ToIntegral(std::string_view(in.data(), arg_size))}; + } else if (Const("older(", in, /*skip=*/false)) { + auto expr = Expr(in); + if (!Func("older", expr)) return {}; + const auto num{ToIntegral(std::string_view(expr.begin(), expr.end()))}; if (!num.has_value() || *num < 1 || *num >= 0x80000000L) return {}; constructed.emplace_back(internal::NoDupCheck{}, ctx.MsContext(), Fragment::OLDER, *num); - in = in.subspan(arg_size + 1); script_size += 1 + (*num > 16) + (*num > 0x7f) + (*num > 0x7fff) + (*num > 0x7fffff); } else if (Const("multi(", in)) { if (!parse_multi_exp(in, /* is_multi_a = */false)) return {}; diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index d5f2776a4d8d..36e13a7be115 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -33,6 +33,7 @@ add_executable(test_bitcoin coins_tests.cpp coinscachepair_tests.cpp coinstatsindex_tests.cpp + coinsviewoverlay_tests.cpp common_url_tests.cpp compress_tests.cpp crypto_tests.cpp @@ -115,6 +116,7 @@ add_executable(test_bitcoin txdownload_tests.cpp txgraph_tests.cpp txindex_tests.cpp + txospenderindex_tests.cpp txpackage_tests.cpp txreconciliation_tests.cpp txrequest_tests.cpp diff --git a/src/test/amount_tests.cpp b/src/test/amount_tests.cpp index b500c9686d75..e1630b41948d 100644 --- a/src/test/amount_tests.cpp +++ b/src/test/amount_tests.cpp @@ -138,8 +138,8 @@ BOOST_AUTO_TEST_CASE(ToStringTest) CFeeRate feeRate; feeRate = CFeeRate(1); BOOST_CHECK_EQUAL(feeRate.ToString(), "0.00000001 BTC/kvB"); - BOOST_CHECK_EQUAL(feeRate.ToString(FeeEstimateMode::BTC_KVB), "0.00000001 BTC/kvB"); - BOOST_CHECK_EQUAL(feeRate.ToString(FeeEstimateMode::SAT_VB), "0.001 sat/vB"); + BOOST_CHECK_EQUAL(feeRate.ToString(FeeRateFormat::BTC_KVB), "0.00000001 BTC/kvB"); + BOOST_CHECK_EQUAL(feeRate.ToString(FeeRateFormat::SAT_VB), "0.001 sat/vB"); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp index 0a15458041ed..016b46587191 100644 --- a/src/test/arith_uint256_tests.cpp +++ b/src/test/arith_uint256_tests.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include +#include #include #include diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp index c6b35e32d8a3..15038f1c4791 100644 --- a/src/test/blockchain_tests.cpp +++ b/src/test/blockchain_tests.cpp @@ -80,7 +80,7 @@ BOOST_AUTO_TEST_CASE(get_difficulty_for_very_high_target) //! Prune chain from height down to genesis block and check that //! GetPruneHeight returns the correct value -static void CheckGetPruneHeight(node::BlockManager& blockman, CChain& chain, int height) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) +static void CheckGetPruneHeight(const node::BlockManager& blockman, const CChain& chain, int height) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); @@ -98,8 +98,8 @@ static void CheckGetPruneHeight(node::BlockManager& blockman, CChain& chain, int BOOST_FIXTURE_TEST_CASE(get_prune_height, TestChain100Setup) { LOCK(::cs_main); - auto& chain = m_node.chainman->ActiveChain(); - auto& blockman = m_node.chainman->m_blockman; + const auto& chain = m_node.chainman->ActiveChain(); + const auto& blockman = m_node.chainman->m_blockman; // Fresh chain of 100 blocks without any pruned blocks, so std::nullopt should be returned BOOST_CHECK(!GetPruneHeight(blockman, chain).has_value()); @@ -130,28 +130,21 @@ BOOST_FIXTURE_TEST_CASE(invalidate_block, TestChain100Setup) // tip_to_invalidate just got invalidated, so it's BLOCK_FAILED_VALID WITH_LOCK(::cs_main, assert(tip_to_invalidate->nStatus & BLOCK_FAILED_VALID)); - WITH_LOCK(::cs_main, assert((tip_to_invalidate->nStatus & BLOCK_FAILED_CHILD) == 0)); // check all ancestors of the invalidated block are validated up to BLOCK_VALID_TRANSACTIONS and are not invalid auto pindex = tip_to_invalidate->pprev; while (pindex) { WITH_LOCK(::cs_main, assert(pindex->IsValid(BLOCK_VALID_TRANSACTIONS))); - WITH_LOCK(::cs_main, assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0)); + WITH_LOCK(::cs_main, assert((pindex->nStatus & BLOCK_FAILED_VALID) == 0)); pindex = pindex->pprev; } - // check all descendants of the invalidated block are BLOCK_FAILED_CHILD + // check all descendants of the invalidated block are BLOCK_FAILED_VALID pindex = orig_tip; while (pindex && pindex != tip_to_invalidate) { - WITH_LOCK(::cs_main, assert((pindex->nStatus & BLOCK_FAILED_VALID) == 0)); - WITH_LOCK(::cs_main, assert(pindex->nStatus & BLOCK_FAILED_CHILD)); + WITH_LOCK(::cs_main, assert(pindex->nStatus & BLOCK_FAILED_VALID)); pindex = pindex->pprev; } - - // don't mark already invalidated block (orig_tip is BLOCK_FAILED_CHILD) with BLOCK_FAILED_VALID again - m_node.chainman->ActiveChainstate().InvalidateBlock(state, orig_tip); - WITH_LOCK(::cs_main, assert(orig_tip->nStatus & BLOCK_FAILED_CHILD)); - WITH_LOCK(::cs_main, assert((orig_tip->nStatus & BLOCK_FAILED_VALID) == 0)); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 0fbe0f3c1347..e4200cace259 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp index d7d10dfb1ae7..25762e070db4 100644 --- a/src/test/blockfilter_index_tests.cpp +++ b/src/test/blockfilter_index_tests.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/src/test/blockfilter_tests.cpp b/src/test/blockfilter_tests.cpp index c8334dabe101..0fc7d5b29eee 100644 --- a/src/test/blockfilter_tests.cpp +++ b/src/test/blockfilter_tests.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include +#include #include #include diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index db59931ba787..68178dec3ebf 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -126,6 +127,14 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) CBlockIndex* lower_block = chainman->ActiveChain()[tip.nHeight / 2]; BOOST_CHECK(blockman.CheckBlockDataAvailability(tip, *lower_block)); + // Ensure we don't fail due to the expected absence of undo data in the genesis block + CBlockIndex* upper_block = chainman->ActiveChain()[2]; + CBlockIndex* genesis = chainman->ActiveChain()[0]; + BOOST_CHECK(blockman.CheckBlockDataAvailability(*upper_block, *genesis, BlockStatus{BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO})); + // Ensure we detect absence of undo data in the first block + chainman->ActiveChain()[1]->nStatus &= ~BLOCK_HAVE_UNDO; + BOOST_CHECK(!blockman.CheckBlockDataAvailability(tip, *genesis, BlockStatus{BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO})); + // Prune half of the blocks int height_to_prune = tip.nHeight / 2; CBlockIndex* first_available_block = chainman->ActiveChain()[height_to_prune + 1]; @@ -136,6 +145,12 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) BOOST_CHECK_EQUAL(&blockman.GetFirstBlock(tip, BLOCK_HAVE_DATA), first_available_block); BOOST_CHECK(blockman.CheckBlockDataAvailability(tip, *first_available_block)); BOOST_CHECK(!blockman.CheckBlockDataAvailability(tip, *last_pruned_block)); + + // Simulate that the first available block is missing undo data and + // detect this by using a status mask. + first_available_block->nStatus &= ~BLOCK_HAVE_UNDO; + BOOST_CHECK(!blockman.CheckBlockDataAvailability(tip, *first_available_block, BlockStatus{BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO})); + BOOST_CHECK(blockman.CheckBlockDataAvailability(tip, *first_available_block, BlockStatus{BLOCK_HAVE_DATA})); } BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_part, TestChain100Setup) diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp index 8e02cfd08c64..ed333c34c0d9 100644 --- a/src/test/bloom_tests.cpp +++ b/src/test/bloom_tests.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/cluster_linearize_tests.cpp b/src/test/cluster_linearize_tests.cpp index e14b5ebc94ac..4f851c1d5ff7 100644 --- a/src/test/cluster_linearize_tests.cpp +++ b/src/test/cluster_linearize_tests.cpp @@ -88,9 +88,15 @@ void TestOptimalLinearization(std::span enc, std::initializer_lis is_topological = false; break; } - std::tie(lin, opt, cost) = Linearize(depgraph, 1000000000000, rng.rand64(), IndexTxOrder{}, lin, is_topological); + std::tie(lin, opt, cost) = Linearize( + /*depgraph=*/depgraph, + /*max_cost=*/1000000000000, + /*rng_seed=*/rng.rand64(), + /*fallback_order=*/IndexTxOrder{}, + /*old_linearization=*/lin, + /*is_topological=*/is_topological); BOOST_CHECK(opt); - BOOST_CHECK(cost <= MaxOptimalLinearizationIters(depgraph.TxCount())); + BOOST_CHECK(cost <= MaxOptimalLinearizationCost(depgraph.TxCount())); SanityCheck(depgraph, lin); BOOST_CHECK(std::ranges::equal(lin, optimal_linearization)); } diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index 8c0756d8528b..8321bf6a18cf 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -95,6 +96,7 @@ class CCoinsViewCacheTest : public CCoinsViewCache CCoinsMap& map() const { return cacheCoins; } CoinsCachePair& sentinel() const { return m_sentinel; } size_t& usage() const { return cachedCoinsUsage; } + size_t& dirty() const { return m_dirty_count; } }; } // namespace @@ -189,8 +191,11 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) (coin.IsSpent() ? added_an_entry : updated_an_entry) = true; coin = newcoin; } - bool is_overwrite = !coin.IsSpent() || m_rng.rand32() & 1; - stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), is_overwrite); + if (COutPoint op(txid, 0); !stack.back()->map().contains(op) && !newcoin.out.scriptPubKey.IsUnspendable() && m_rng.randbool()) { + stack.back()->EmplaceCoinInternalDANGER(std::move(op), std::move(newcoin)); + } else { + stack.back()->AddCoin(op, std::move(newcoin), /*possible_overwrite=*/!coin.IsSpent() || m_rng.randbool()); + } } else { // Spend the coin. removed_an_entry = true; @@ -653,8 +658,10 @@ static void WriteCoinsViewEntry(CCoinsView& view, const MaybeCoin& cache_coin) CCoinsMapMemoryResource resource; CCoinsMap map{0, CCoinsMap::hasher{}, CCoinsMap::key_equal{}, &resource}; if (cache_coin) InsertCoinsMapEntry(map, sentinel, *cache_coin); - auto cursor{CoinsViewCacheCursor(sentinel, map, /*will_erase=*/true)}; + size_t dirty_count{cache_coin && cache_coin->IsDirty()}; + auto cursor{CoinsViewCacheCursor(dirty_count, sentinel, map, /*will_erase=*/true)}; view.BatchWrite(cursor, {}); + BOOST_CHECK_EQUAL(dirty_count, 0U); } class SingleEntryCacheTest @@ -664,7 +671,10 @@ class SingleEntryCacheTest { auto base_cache_coin{base_value == ABSENT ? MISSING : CoinEntry{base_value, CoinEntry::State::DIRTY}}; WriteCoinsViewEntry(base, base_cache_coin); - if (cache_coin) cache.usage() += InsertCoinsMapEntry(cache.map(), cache.sentinel(), *cache_coin); + if (cache_coin) { + cache.usage() += InsertCoinsMapEntry(cache.map(), cache.sentinel(), *cache_coin); + cache.dirty() += cache_coin->IsDirty(); + } } CCoinsView root; @@ -1125,6 +1135,7 @@ BOOST_AUTO_TEST_CASE(ccoins_reset_guard) const Coin coin{CTxOut{m_rng.randrange(10), CScript{} << m_rng.randbytes(CScriptBase::STATIC_SIZE + 1)}, 1, false}; cache.EmplaceCoinInternalDANGER(COutPoint{outpoint}, Coin{coin}); + BOOST_CHECK_EQUAL(cache.GetDirtyCount(), 1U); uint256 cache_best_block{m_rng.rand256()}; cache.SetBestBlock(cache_best_block); @@ -1134,12 +1145,14 @@ BOOST_AUTO_TEST_CASE(ccoins_reset_guard) BOOST_CHECK(cache.AccessCoin(outpoint) == coin); BOOST_CHECK(!cache.AccessCoin(outpoint).IsSpent()); BOOST_CHECK_EQUAL(cache.GetCacheSize(), 1); + BOOST_CHECK_EQUAL(cache.GetDirtyCount(), 1); BOOST_CHECK_EQUAL(cache.GetBestBlock(), cache_best_block); BOOST_CHECK(!root_cache.HaveCoinInCache(outpoint)); } BOOST_CHECK(cache.AccessCoin(outpoint).IsSpent()); BOOST_CHECK_EQUAL(cache.GetCacheSize(), 0); + BOOST_CHECK_EQUAL(cache.GetDirtyCount(), 0); BOOST_CHECK_EQUAL(cache.GetBestBlock(), base_best_block); BOOST_CHECK(!root_cache.HaveCoinInCache(outpoint)); @@ -1150,8 +1163,34 @@ BOOST_AUTO_TEST_CASE(ccoins_reset_guard) BOOST_CHECK(cache.AccessCoin(outpoint).IsSpent()); BOOST_CHECK_EQUAL(cache.GetCacheSize(), 0); + BOOST_CHECK_EQUAL(cache.GetDirtyCount(), 0U); BOOST_CHECK_EQUAL(cache.GetBestBlock(), base_best_block); BOOST_CHECK(!root_cache.HaveCoinInCache(outpoint)); + + // Flush should be a no-op after reset. + cache.Flush(); + BOOST_CHECK_EQUAL(cache.GetDirtyCount(), 0U); +} + +BOOST_AUTO_TEST_CASE(ccoins_peekcoin) +{ + CCoinsViewTest base{m_rng}; + + // Populate the base view with a coin. + const COutPoint outpoint{Txid::FromUint256(m_rng.rand256()), m_rng.rand32()}; + const Coin coin{CTxOut{m_rng.randrange(10), CScript{}}, 1, false}; + { + CCoinsViewCache cache{&base}; + cache.AddCoin(outpoint, Coin{coin}, /*possible_overwrite=*/false); + cache.Flush(); + } + + // Verify PeekCoin can read through the cache stack without mutating the intermediate cache. + CCoinsViewCacheTest main_cache{&base}; + const auto fetched{main_cache.PeekCoin(outpoint)}; + BOOST_CHECK(fetched.has_value()); + BOOST_CHECK(*fetched == coin); + BOOST_CHECK(!main_cache.HaveCoinInCache(outpoint)); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/coinsviewoverlay_tests.cpp b/src/test/coinsviewoverlay_tests.cpp new file mode 100644 index 000000000000..6b20b31211a6 --- /dev/null +++ b/src/test/coinsviewoverlay_tests.cpp @@ -0,0 +1,165 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(coinsviewoverlay_tests) + +namespace { + +CBlock CreateBlock() noexcept +{ + static constexpr auto NUM_TXS{100}; + CBlock block; + CMutableTransaction coinbase; + coinbase.vin.emplace_back(); + block.vtx.push_back(MakeTransactionRef(coinbase)); + + for (const auto i : std::views::iota(1, NUM_TXS)) { + CMutableTransaction tx; + Txid txid{Txid::FromUint256(uint256(i))}; + tx.vin.emplace_back(txid, 0); + block.vtx.push_back(MakeTransactionRef(tx)); + } + + return block; +} + +void PopulateView(const CBlock& block, CCoinsView& view, bool spent = false) +{ + CCoinsViewCache cache{&view}; + cache.SetBestBlock(uint256::ONE); + + for (const auto& tx : block.vtx | std::views::drop(1)) { + for (const auto& in : tx->vin) { + Coin coin{}; + if (!spent) coin.out.nValue = 1; + cache.EmplaceCoinInternalDANGER(COutPoint{in.prevout}, std::move(coin)); + } + } + + cache.Flush(); +} + +void CheckCache(const CBlock& block, const CCoinsViewCache& cache) +{ + uint32_t counter{0}; + + for (const auto& tx : block.vtx) { + if (tx->IsCoinBase()) { + BOOST_CHECK(!cache.HaveCoinInCache(tx->vin[0].prevout)); + } else { + for (const auto& in : tx->vin) { + const auto& outpoint{in.prevout}; + const auto& first{cache.AccessCoin(outpoint)}; + const auto& second{cache.AccessCoin(outpoint)}; + BOOST_CHECK_EQUAL(&first, &second); + ++counter; + BOOST_CHECK(cache.HaveCoinInCache(outpoint)); + } + } + } + BOOST_CHECK_EQUAL(cache.GetCacheSize(), counter); +} + +} // namespace + +BOOST_AUTO_TEST_CASE(fetch_inputs_from_db) +{ + const auto block{CreateBlock()}; + CCoinsViewDB db{{.path = "", .cache_bytes = 1_MiB, .memory_only = true}, {}}; + PopulateView(block, db); + CCoinsViewCache main_cache{&db}; + CoinsViewOverlay view{&main_cache}; + const auto& outpoint{block.vtx[1]->vin[0].prevout}; + + BOOST_CHECK(view.HaveCoin(outpoint)); + BOOST_CHECK(view.GetCoin(outpoint).has_value()); + BOOST_CHECK(!main_cache.HaveCoinInCache(outpoint)); + + CheckCache(block, view); + // Check that no coins have been moved up to main cache from db + for (const auto& tx : block.vtx) { + for (const auto& in : tx->vin) { + BOOST_CHECK(!main_cache.HaveCoinInCache(in.prevout)); + } + } + + view.SetBestBlock(uint256::ONE); + BOOST_CHECK(view.SpendCoin(outpoint)); + view.Flush(); + BOOST_CHECK(!main_cache.PeekCoin(outpoint).has_value()); +} + +BOOST_AUTO_TEST_CASE(fetch_inputs_from_cache) +{ + const auto block{CreateBlock()}; + CCoinsViewDB db{{.path = "", .cache_bytes = 1_MiB, .memory_only = true}, {}}; + CCoinsViewCache main_cache{&db}; + PopulateView(block, main_cache); + CoinsViewOverlay view{&main_cache}; + CheckCache(block, view); + + const auto& outpoint{block.vtx[1]->vin[0].prevout}; + view.SetBestBlock(uint256::ONE); + BOOST_CHECK(view.SpendCoin(outpoint)); + view.Flush(); + BOOST_CHECK(!main_cache.PeekCoin(outpoint).has_value()); +} + +// Test for the case where a block spends coins that are spent in the cache, but +// the spentness has not been flushed to the db. +BOOST_AUTO_TEST_CASE(fetch_no_double_spend) +{ + const auto block{CreateBlock()}; + CCoinsViewDB db{{.path = "", .cache_bytes = 1_MiB, .memory_only = true}, {}}; + PopulateView(block, db); + CCoinsViewCache main_cache{&db}; + // Add all inputs as spent already in cache + PopulateView(block, main_cache, /*spent=*/true); + CoinsViewOverlay view{&main_cache}; + for (const auto& tx : block.vtx) { + for (const auto& in : tx->vin) { + const auto& c{view.AccessCoin(in.prevout)}; + BOOST_CHECK(c.IsSpent()); + BOOST_CHECK(!view.HaveCoin(in.prevout)); + BOOST_CHECK(!view.GetCoin(in.prevout)); + } + } + // Coins are not added to the view, even though they exist unspent in the parent db + BOOST_CHECK_EQUAL(view.GetCacheSize(), 0); +} + +BOOST_AUTO_TEST_CASE(fetch_no_inputs) +{ + const auto block{CreateBlock()}; + CCoinsViewDB db{{.path = "", .cache_bytes = 1_MiB, .memory_only = true}, {}}; + CCoinsViewCache main_cache{&db}; + CoinsViewOverlay view{&main_cache}; + for (const auto& tx : block.vtx) { + for (const auto& in : tx->vin) { + const auto& c{view.AccessCoin(in.prevout)}; + BOOST_CHECK(c.IsSpent()); + BOOST_CHECK(!view.HaveCoin(in.prevout)); + BOOST_CHECK(!view.GetCoin(in.prevout)); + } + } + BOOST_CHECK_EQUAL(view.GetCacheSize(), 0); +} + +BOOST_AUTO_TEST_SUITE_END() + diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp index 5588d4cdbc66..b348793bfb63 100644 --- a/src/test/crypto_tests.cpp +++ b/src/test/crypto_tests.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index d3a9e54348b2..3896ea64da54 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include #include diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 8ff44d13dcc1..a646fc834760 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -283,6 +283,14 @@ void DoCheck(std::string prv, std::string pub, const std::string& norm_pub, int BOOST_CHECK_EQUAL(parse_pub->IsRange(), (flags & RANGE) != 0); BOOST_CHECK_EQUAL(parse_priv->IsRange(), (flags & RANGE) != 0); + // Check that the highest key expression index matches the number of keys in the descriptor + BOOST_TEST_INFO("Pub desc: " + pub); + uint32_t key_exprs = parse_pub->GetMaxKeyExpr(); + BOOST_CHECK_EQUAL(key_exprs + 1, parse_pub->GetKeyCount()); + BOOST_TEST_INFO("Priv desc: " + prv); + BOOST_CHECK_EQUAL(key_exprs, parse_priv->GetMaxKeyExpr()); + BOOST_CHECK_EQUAL(key_exprs + 1, parse_priv->GetKeyCount()); + // * For ranged descriptors, the `scripts` parameter is a list of expected result outputs, for subsequent // positions to evaluate the descriptors on (so the first element of `scripts` is for evaluating the // descriptor at 0; the second at 1; and so on). To verify this, we evaluate the descriptors once for @@ -1267,7 +1275,7 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("tr(musig(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/*)/0)","tr(musig(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/*)/0)", "tr(): musig(): Cannot have ranged participant keys if musig() also has derivation"); // Fuzzer crash test cases - CheckUnparsable("pk(musig(dd}uue/00/)k(", "pk(musig(dd}uue/00/)k(", "Invalid musig() expression"); + CheckUnparsable("pk(musig(dd}uue/00/)k(", "pk(musig(dd}uue/00/)k(", "'pk(musig(dd}uue/00/)k(' is not a valid descriptor function"); CheckUnparsable("tr(musig(tuus(oldepk(gg)ggggfgg)<,z(((((((((((((((((((((st)", "tr(musig(tuus(oldepk(gg)ggggfgg)<,z(((((((((((((((((((((st)","tr(): Too many ')' in musig() expression"); } diff --git a/src/test/fuzz/block_index_tree.cpp b/src/test/fuzz/block_index_tree.cpp index a4bbe9584000..5112a7e0f2ea 100644 --- a/src/test/fuzz/block_index_tree.cpp +++ b/src/test/fuzz/block_index_tree.cpp @@ -61,7 +61,7 @@ FUZZ_TARGET(block_index_tree, .init = initialize_block_index_tree) // Receive a header building on an existing valid one. This assumes headers are valid, so PoW is not relevant here. LOCK(cs_main); CBlockIndex* prev_block = PickValue(fuzzed_data_provider, blocks); - if (!(prev_block->nStatus & BLOCK_FAILED_MASK)) { + if (!(prev_block->nStatus & BLOCK_FAILED_VALID)) { CBlockHeader header = ConsumeBlockHeader(fuzzed_data_provider, prev_block->GetBlockHash(), nonce_counter); CBlockIndex* index = blockman.AddToBlockIndex(header, chainman.m_best_header); assert(index->nStatus & BLOCK_VALID_TREE); @@ -74,7 +74,7 @@ FUZZ_TARGET(block_index_tree, .init = initialize_block_index_tree) LOCK(cs_main); CBlockIndex* index = PickValue(fuzzed_data_provider, blocks); // Must be new to us and not known to be invalid (e.g. because of an invalid ancestor). - if (index->nTx == 0 && !(index->nStatus & BLOCK_FAILED_MASK)) { + if (index->nTx == 0 && !(index->nStatus & BLOCK_FAILED_VALID)) { if (fuzzed_data_provider.ConsumeBool()) { // Invalid BlockValidationState state; state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "consensus-invalid"); @@ -124,7 +124,7 @@ FUZZ_TARGET(block_index_tree, .init = initialize_block_index_tree) } // Connect blocks, possibly fail for (CBlockIndex* block : to_connect | std::views::reverse) { - assert(!(block->nStatus & BLOCK_FAILED_MASK)); + assert(!(block->nStatus & BLOCK_FAILED_VALID)); assert(block->nStatus & BLOCK_HAVE_DATA); if (!block->IsValid(BLOCK_VALID_SCRIPTS)) { if (fuzzed_data_provider.ConsumeBool()) { // Invalid diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp index 23499dd0797f..4d3a6f423559 100644 --- a/src/test/fuzz/chain.cpp +++ b/src/test/fuzz/chain.cpp @@ -50,8 +50,6 @@ FUZZ_TARGET(chain) BlockStatus::BLOCK_HAVE_UNDO, BlockStatus::BLOCK_HAVE_MASK, BlockStatus::BLOCK_FAILED_VALID, - BlockStatus::BLOCK_FAILED_CHILD, - BlockStatus::BLOCK_FAILED_MASK, BlockStatus::BLOCK_OPT_WITNESS, }); if (block_status & ~BLOCK_VALID_MASK) { diff --git a/src/test/fuzz/cluster_linearize.cpp b/src/test/fuzz/cluster_linearize.cpp index b4df22a53724..b735940100a9 100644 --- a/src/test/fuzz/cluster_linearize.cpp +++ b/src/test/fuzz/cluster_linearize.cpp @@ -919,7 +919,7 @@ FUZZ_TARGET(clusterlin_sfl) if (rng.randbits(4) == 0) { // Perform sanity checks from time to time (too computationally expensive to do after // every step). - sfl.SanityCheck(depgraph); + sfl.SanityCheck(); } auto diagram = sfl.GetDiagram(); if (rng.randbits(4) == 0) { @@ -984,7 +984,7 @@ FUZZ_TARGET(clusterlin_sfl) // Verify that optimality is reached within an expected amount of work. This protects against // hypothetical bugs that hugely increase the amount of work needed to reach optimality. - assert(sfl.GetCost() <= MaxOptimalLinearizationIters(depgraph.TxCount())); + assert(sfl.GetCost() <= MaxOptimalLinearizationCost(depgraph.TxCount())); // The result must be as good as SimpleLinearize. auto [simple_linearization, simple_optimal] = SimpleLinearize(depgraph, MAX_SIMPLE_ITERATIONS / 10); @@ -1011,16 +1011,17 @@ FUZZ_TARGET(clusterlin_linearize) { // Verify the behavior of Linearize(). - // Retrieve an RNG seed, an iteration count, a depgraph, and whether to make it connected from - // the fuzz input. + // Retrieve an RNG seed, a maximum amount of work, a depgraph, and whether to make it connected + // from the fuzz input. SpanReader reader(buffer); DepGraph depgraph; uint64_t rng_seed{0}; - uint64_t iter_count{0}; + uint64_t max_cost{0}; uint8_t flags{7}; try { - reader >> VARINT(iter_count) >> Using(depgraph) >> rng_seed >> flags; + reader >> VARINT(max_cost) >> Using(depgraph) >> rng_seed >> flags; } catch (const std::ios_base::failure&) {} + if (depgraph.TxCount() <= 1) return; bool make_connected = flags & 1; // The following 3 booleans have 4 combinations: // - (flags & 6) == 0: do not provide input linearization. @@ -1043,8 +1044,14 @@ FUZZ_TARGET(clusterlin_linearize) } // Invoke Linearize(). - iter_count &= 0x7ffff; - auto [linearization, optimal, cost] = Linearize(depgraph, iter_count, rng_seed, IndexTxOrder{}, old_linearization, /*is_topological=*/claim_topological_input); + max_cost &= 0x3fffff; + auto [linearization, optimal, cost] = Linearize( + /*depgraph=*/depgraph, + /*max_cost=*/max_cost, + /*rng_seed=*/rng_seed, + /*fallback_order=*/IndexTxOrder{}, + /*old_linearization=*/old_linearization, + /*is_topological=*/claim_topological_input); SanityCheck(depgraph, linearization); auto chunking = ChunkLinearization(depgraph, linearization); @@ -1056,8 +1063,8 @@ FUZZ_TARGET(clusterlin_linearize) assert(cmp >= 0); } - // If the iteration count is sufficiently high, an optimal linearization must be found. - if (iter_count > MaxOptimalLinearizationIters(depgraph.TxCount())) { + // If the maximum amount of work is sufficiently high, an optimal linearization must be found. + if (max_cost > MaxOptimalLinearizationCost(depgraph.TxCount())) { assert(optimal); } @@ -1145,7 +1152,7 @@ FUZZ_TARGET(clusterlin_linearize) // Redo from scratch with a different rng_seed. The resulting linearization should be // deterministic, if both are optimal. - auto [linearization2, optimal2, cost2] = Linearize(depgraph, MaxOptimalLinearizationIters(depgraph.TxCount()) + 1, rng_seed ^ 0x1337, IndexTxOrder{}); + auto [linearization2, optimal2, cost2] = Linearize(depgraph, MaxOptimalLinearizationCost(depgraph.TxCount()) + 1, rng_seed ^ 0x1337, IndexTxOrder{}); assert(optimal2); assert(linearization2 == linearization); } @@ -1236,7 +1243,7 @@ FUZZ_TARGET(clusterlin_postlinearize_tree) // Try to find an even better linearization directly. This must not change the diagram for the // same reason. - auto [opt_linearization, _optimal, _cost] = Linearize(depgraph_tree, 100000, rng_seed, IndexTxOrder{}, post_linearization); + auto [opt_linearization, _optimal, _cost] = Linearize(depgraph_tree, 1000000, rng_seed, IndexTxOrder{}, post_linearization); auto opt_chunking = ChunkLinearization(depgraph_tree, opt_linearization); auto cmp_opt = CompareChunks(opt_chunking, post_chunking); assert(cmp_opt == 0); diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index 699a45e2c490..a47dc3706c87 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -18,10 +18,13 @@ #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -35,6 +38,51 @@ bool operator==(const Coin& a, const Coin& b) if (a.IsSpent() && b.IsSpent()) return true; return a.fCoinBase == b.fCoinBase && a.nHeight == b.nHeight && a.out == b.out; } + +/** + * MutationGuardCoinsViewCache asserts that nothing mutates cacheCoins until + * BatchWrite is called. It keeps a snapshot of the cacheCoins state, which it + * uses for the assertion in BatchWrite. After the call to the superclass + * CCoinsViewCache::BatchWrite returns, it recomputes the snapshot at that + * moment. + */ +class MutationGuardCoinsViewCache final : public CCoinsViewCache +{ +private: + struct CacheCoinSnapshot { + COutPoint outpoint; + bool dirty{false}; + bool fresh{false}; + Coin coin; + bool operator==(const CacheCoinSnapshot&) const = default; + }; + + std::vector ComputeCacheCoinsSnapshot() const + { + std::vector snapshot; + snapshot.reserve(cacheCoins.size()); + + for (const auto& [outpoint, entry] : cacheCoins) { + snapshot.emplace_back(outpoint, entry.IsDirty(), entry.IsFresh(), entry.coin); + } + + std::ranges::sort(snapshot, std::less<>{}, &CacheCoinSnapshot::outpoint); + return snapshot; + } + + mutable std::vector m_expected_snapshot{ComputeCacheCoinsSnapshot()}; + +public: + void BatchWrite(CoinsViewCacheCursor& cursor, const uint256& block_hash) override + { + // Nothing must modify cacheCoins other than BatchWrite. + assert(ComputeCacheCoinsSnapshot() == m_expected_snapshot); + CCoinsViewCache::BatchWrite(cursor, block_hash); + m_expected_snapshot = ComputeCacheCoinsSnapshot(); + } + + using CCoinsViewCache::CCoinsViewCache; +}; } // namespace void initialize_coins_view() @@ -42,11 +90,10 @@ void initialize_coins_view() static const auto testing_setup = MakeNoLogFileContext<>(); } -void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend_coins_view, bool is_db) +void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsViewCache& coins_view_cache, CCoinsView& backend_coins_view, bool is_db) { bool good_data{true}; - CCoinsViewCache coins_view_cache{&backend_coins_view, /*deterministic=*/true}; if (is_db) coins_view_cache.SetBestBlock(uint256::ONE); COutPoint random_out_point; Coin random_coin; @@ -62,13 +109,9 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend COutPoint outpoint{random_out_point}; Coin coin{random_coin}; if (fuzzed_data_provider.ConsumeBool()) { - const bool possible_overwrite{fuzzed_data_provider.ConsumeBool()}; - try { - coins_view_cache.AddCoin(outpoint, std::move(coin), possible_overwrite); - } catch (const std::logic_error& e) { - assert(e.what() == std::string{"Attempted to overwrite an unspent coin (when possible_overwrite is false)"}); - assert(!possible_overwrite); - } + // We can only skip the check if no unspent coin exists for this outpoint. + const bool possible_overwrite{coins_view_cache.PeekCoin(outpoint) || fuzzed_data_provider.ConsumeBool()}; + coins_view_cache.AddCoin(outpoint, std::move(coin), possible_overwrite); } else { coins_view_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin)); } @@ -139,13 +182,12 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend [&] { CoinsCachePair sentinel{}; sentinel.second.SelfRef(sentinel); + size_t dirty_count{0}; CCoinsMapMemoryResource resource; CCoinsMap coins_map{0, SaltedOutpointHasher{/*deterministic=*/true}, CCoinsMap::key_equal{}, &resource}; LIMITED_WHILE(good_data && fuzzed_data_provider.ConsumeBool(), 10'000) { CCoinsCacheEntry coins_cache_entry; - const auto dirty{fuzzed_data_provider.ConsumeBool()}; - const auto fresh{fuzzed_data_provider.ConsumeBool()}; if (fuzzed_data_provider.ConsumeBool()) { coins_cache_entry.coin = random_coin; } else { @@ -156,53 +198,23 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend } coins_cache_entry.coin = *opt_coin; } + // Avoid setting FRESH for an outpoint that already exists unspent in the parent view. + bool fresh{!coins_view_cache.PeekCoin(random_out_point) && fuzzed_data_provider.ConsumeBool()}; + bool dirty{fresh || fuzzed_data_provider.ConsumeBool()}; auto it{coins_map.emplace(random_out_point, std::move(coins_cache_entry)).first}; if (dirty) CCoinsCacheEntry::SetDirty(*it, sentinel); if (fresh) CCoinsCacheEntry::SetFresh(*it, sentinel); + dirty_count += dirty; } - bool expected_code_path = false; - try { - auto cursor{CoinsViewCacheCursor(sentinel, coins_map, /*will_erase=*/true)}; - uint256 best_block{coins_view_cache.GetBestBlock()}; - if (fuzzed_data_provider.ConsumeBool()) best_block = ConsumeUInt256(fuzzed_data_provider); - // Set best block hash to non-null to satisfy the assertion in CCoinsViewDB::BatchWrite(). - if (is_db && best_block.IsNull()) best_block = uint256::ONE; - coins_view_cache.BatchWrite(cursor, best_block); - expected_code_path = true; - } catch (const std::logic_error& e) { - if (e.what() == std::string{"FRESH flag misapplied to coin that exists in parent cache"}) { - expected_code_path = true; - } - } - assert(expected_code_path); + auto cursor{CoinsViewCacheCursor(dirty_count, sentinel, coins_map, /*will_erase=*/true)}; + uint256 best_block{coins_view_cache.GetBestBlock()}; + if (fuzzed_data_provider.ConsumeBool()) best_block = ConsumeUInt256(fuzzed_data_provider); + // Set best block hash to non-null to satisfy the assertion in CCoinsViewDB::BatchWrite(). + if (is_db && best_block.IsNull()) best_block = uint256::ONE; + coins_view_cache.BatchWrite(cursor, best_block); }); } - { - const Coin& coin_using_access_coin = coins_view_cache.AccessCoin(random_out_point); - const bool exists_using_access_coin = !(coin_using_access_coin == EMPTY_COIN); - const bool exists_using_have_coin = coins_view_cache.HaveCoin(random_out_point); - const bool exists_using_have_coin_in_cache = coins_view_cache.HaveCoinInCache(random_out_point); - if (auto coin{coins_view_cache.GetCoin(random_out_point)}) { - assert(*coin == coin_using_access_coin); - assert(exists_using_access_coin && exists_using_have_coin_in_cache && exists_using_have_coin); - } else { - assert(!exists_using_access_coin && !exists_using_have_coin_in_cache && !exists_using_have_coin); - } - // If HaveCoin on the backend is true, it must also be on the cache if the coin wasn't spent. - const bool exists_using_have_coin_in_backend = backend_coins_view.HaveCoin(random_out_point); - if (!coin_using_access_coin.IsSpent() && exists_using_have_coin_in_backend) { - assert(exists_using_have_coin); - } - if (auto coin{backend_coins_view.GetCoin(random_out_point)}) { - assert(exists_using_have_coin_in_backend); - // Note we can't assert that `coin_using_get_coin == *coin` because the coin in - // the cache may have been modified but not yet flushed. - } else { - assert(!exists_using_have_coin_in_backend); - } - } - { bool expected_code_path = false; try { @@ -220,8 +232,10 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend } { - std::unique_ptr coins_view_cursor = backend_coins_view.Cursor(); - assert(is_db == !!coins_view_cursor); + if (is_db) { + std::unique_ptr coins_view_cursor = backend_coins_view.Cursor(); + assert(!!coins_view_cursor); + } (void)backend_coins_view.EstimateSize(); (void)backend_coins_view.GetBestBlock(); (void)backend_coins_view.GetHeadBlocks(); @@ -243,19 +257,14 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend // coins.cpp:69: void CCoinsViewCache::AddCoin(const COutPoint &, Coin &&, bool): Assertion `!coin.IsSpent()' failed. return; } - bool expected_code_path = false; const int height{int(fuzzed_data_provider.ConsumeIntegral() >> 1)}; - const bool possible_overwrite = fuzzed_data_provider.ConsumeBool(); - try { - AddCoins(coins_view_cache, transaction, height, possible_overwrite); - expected_code_path = true; - } catch (const std::logic_error& e) { - if (e.what() == std::string{"Attempted to overwrite an unspent coin (when possible_overwrite is false)"}) { - assert(!possible_overwrite); - expected_code_path = true; + const bool check_for_overwrite{transaction.IsCoinBase() || [&] { + for (uint32_t i{0}; i < transaction.vout.size(); ++i) { + if (coins_view_cache.PeekCoin(COutPoint{transaction.GetHash(), i})) return true; } - } - assert(expected_code_path); + return fuzzed_data_provider.ConsumeBool(); + }()}; // We can only skip the check if the current txid has no unspent outputs + AddCoins(coins_view_cache, transaction, height, check_for_overwrite); }, [&] { (void)AreInputsStandard(CTransaction{random_mutable_transaction}, coins_view_cache); @@ -306,13 +315,39 @@ void TestCoinsView(FuzzedDataProvider& fuzzed_data_provider, CCoinsView& backend (void)IsWitnessStandard(CTransaction{random_mutable_transaction}, coins_view_cache); }); } + + { + const Coin& coin_using_access_coin = coins_view_cache.AccessCoin(random_out_point); + const bool exists_using_access_coin = !(coin_using_access_coin == EMPTY_COIN); + const bool exists_using_have_coin = coins_view_cache.HaveCoin(random_out_point); + const bool exists_using_have_coin_in_cache = coins_view_cache.HaveCoinInCache(random_out_point); + if (auto coin{coins_view_cache.GetCoin(random_out_point)}) { + assert(*coin == coin_using_access_coin); + assert(exists_using_access_coin && exists_using_have_coin_in_cache && exists_using_have_coin); + } else { + assert(!exists_using_access_coin && !exists_using_have_coin_in_cache && !exists_using_have_coin); + } + // If HaveCoin on the backend is true, it must also be on the cache if the coin wasn't spent. + const bool exists_using_have_coin_in_backend = backend_coins_view.HaveCoin(random_out_point); + if (!coin_using_access_coin.IsSpent() && exists_using_have_coin_in_backend) { + assert(exists_using_have_coin); + } + if (auto coin{backend_coins_view.GetCoin(random_out_point)}) { + assert(exists_using_have_coin_in_backend); + // Note we can't assert that `coin_using_get_coin == *coin` because the coin in + // the cache may have been modified but not yet flushed. + } else { + assert(!exists_using_have_coin_in_backend); + } + } } FUZZ_TARGET(coins_view, .init = initialize_coins_view) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; CCoinsView backend_coins_view; - TestCoinsView(fuzzed_data_provider, backend_coins_view, /*is_db=*/false); + CCoinsViewCache coins_view_cache{&backend_coins_view, /*deterministic=*/true}; + TestCoinsView(fuzzed_data_provider, coins_view_cache, backend_coins_view, /*is_db=*/false); } FUZZ_TARGET(coins_view_db, .init = initialize_coins_view) @@ -323,6 +358,20 @@ FUZZ_TARGET(coins_view_db, .init = initialize_coins_view) .cache_bytes = 1_MiB, .memory_only = true, }; - CCoinsViewDB coins_db{std::move(db_params), CoinsViewOptions{}}; - TestCoinsView(fuzzed_data_provider, coins_db, /*is_db=*/true); + CCoinsViewDB backend_coins_view{std::move(db_params), CoinsViewOptions{}}; + CCoinsViewCache coins_view_cache{&backend_coins_view, /*deterministic=*/true}; + TestCoinsView(fuzzed_data_provider, coins_view_cache, backend_coins_view, /*is_db=*/true); +} + +// Creates a CoinsViewOverlay and a MutationGuardCoinsViewCache as the base. +// This allows us to exercise all methods on a CoinsViewOverlay, while also +// ensuring that nothing can mutate the underlying cache until Flush or Sync is +// called. +FUZZ_TARGET(coins_view_overlay, .init = initialize_coins_view) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + CCoinsView backend_base_coins_view; + MutationGuardCoinsViewCache backend_cache{&backend_base_coins_view, /*deterministic=*/true}; + CoinsViewOverlay coins_view_cache{&backend_cache, /*deterministic=*/true}; + TestCoinsView(fuzzed_data_provider, coins_view_cache, backend_cache, /*is_db=*/false); } diff --git a/src/test/fuzz/coinscache_sim.cpp b/src/test/fuzz/coinscache_sim.cpp index c46c91dc4c19..c8534e4f60c8 100644 --- a/src/test/fuzz/coinscache_sim.cpp +++ b/src/test/fuzz/coinscache_sim.cpp @@ -257,7 +257,9 @@ FUZZ_TARGET(coinscache_sim) // Look up in simulation data. auto sim = lookup(outpointidx); // Look up in real caches. - auto realcoin = caches.back()->GetCoin(data.outpoints[outpointidx]); + auto realcoin = provider.ConsumeBool() ? + caches.back()->PeekCoin(data.outpoints[outpointidx]) : + caches.back()->GetCoin(data.outpoints[outpointidx]); // Compare results. if (!sim.has_value()) { assert(!realcoin); @@ -372,7 +374,11 @@ FUZZ_TARGET(coinscache_sim) [&]() { // Add a cache level (if not already at the max). if (caches.size() != MAX_CACHES) { // Apply to real caches. - caches.emplace_back(new CCoinsViewCache(&*caches.back(), /*deterministic=*/true)); + if (provider.ConsumeBool()) { + caches.emplace_back(new CCoinsViewCache(&*caches.back(), /*deterministic=*/true)); + } else { + caches.emplace_back(new CoinsViewOverlay(&*caches.back(), /*deterministic=*/true)); + } // Apply to simulation data. sim_caches[caches.size()].Wipe(); } diff --git a/src/test/fuzz/descriptor_parse.cpp b/src/test/fuzz/descriptor_parse.cpp index eeba5f1369e6..6b3084e23d15 100644 --- a/src/test/fuzz/descriptor_parse.cpp +++ b/src/test/fuzz/descriptor_parse.cpp @@ -61,6 +61,10 @@ static void TestDescriptor(const Descriptor& desc, FlatSigningProvider& sig_prov const bool is_nontop_or_nonsolvable{!*is_solvable || !desc.GetOutputType()}; const bool is_input_size_info_set{max_sat_maxsig && max_sat_nonmaxsig && max_elems}; assert(is_input_size_info_set || is_nontop_or_nonsolvable); + + auto max_key_expr = desc.GetMaxKeyExpr(); + auto key_count = desc.GetKeyCount(); + assert((max_key_expr == 0 && key_count == 0) || max_key_expr + 1 == key_count); } void initialize_descriptor_parse() diff --git a/src/test/fuzz/fees.cpp b/src/test/fuzz/fees.cpp index f295dd12c8d9..1bd5c67a2046 100644 --- a/src/test/fuzz/fees.cpp +++ b/src/test/fuzz/fees.cpp @@ -26,6 +26,6 @@ FUZZ_TARGET(fees) const CAmount rounded_fee = fee_filter_rounder.round(current_minimum_fee); assert(MoneyRange(rounded_fee)); } - const FeeReason fee_reason = fuzzed_data_provider.PickValueInArray({FeeReason::NONE, FeeReason::HALF_ESTIMATE, FeeReason::FULL_ESTIMATE, FeeReason::DOUBLE_ESTIMATE, FeeReason::CONSERVATIVE, FeeReason::MEMPOOL_MIN, FeeReason::PAYTXFEE, FeeReason::FALLBACK, FeeReason::REQUIRED}); + const FeeReason fee_reason = fuzzed_data_provider.PickValueInArray({FeeReason::NONE, FeeReason::HALF_ESTIMATE, FeeReason::FULL_ESTIMATE, FeeReason::DOUBLE_ESTIMATE, FeeReason::CONSERVATIVE, FeeReason::MEMPOOL_MIN, FeeReason::FALLBACK, FeeReason::REQUIRED}); (void)StringForFeeReason(fee_reason); } diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index d20bf9fbd844..a7934f7e50fd 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -154,10 +154,9 @@ struct ParserContext { return {h.begin(), h.end()}; } - template - std::optional FromString(I first, I last) const { - if (last - first != 2) return {}; - auto idx = ParseHex(std::string(first, last)); + std::optional FromString(std::span& in) const { + if (in.size() != 2) return {}; + auto idx = ParseHex(std::string(in.begin(), in.end())); if (idx.size() != 1) return {}; return TEST_DATA.dummy_keys[idx[0]]; } diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index ba052d3c738a..e8daf9d390ac 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -90,6 +90,7 @@ const std::vector RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ // RPC commands which are safe for fuzzing. const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ + "abortprivatebroadcast", "analyzepsbt", "clearbanned", "combinepsbt", @@ -147,6 +148,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "getorphantxs", "getpeerinfo", "getprioritisedtransactions", + "getprivatebroadcastinfo", "getrawaddrman", "getrawmempool", "getrawtransaction", diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp index 5d59eb340849..d31563369738 100644 --- a/src/test/fuzz/string.cpp +++ b/src/test/fuzz/string.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -34,8 +35,6 @@ #include #include -enum class FeeEstimateMode; - using common::AmountErrMsg; using common::AmountHighWarn; using common::FeeModeFromString; diff --git a/src/test/fuzz/threadpool.cpp b/src/test/fuzz/threadpool.cpp index 293aa63e0feb..a5b01db13839 100644 --- a/src/test/fuzz/threadpool.cpp +++ b/src/test/fuzz/threadpool.cpp @@ -87,10 +87,10 @@ FUZZ_TARGET(threadpool, .init = setup_threadpool_test) EXCLUSIVE_LOCKS_REQUIRED( std::future fut; if (will_throw) { expected_fail_tasks++; - fut = g_pool.Submit(ThrowTask{}); + fut = *Assert(g_pool.Submit(ThrowTask{})); } else { expected_task_counter++; - fut = g_pool.Submit(CounterTask{task_counter}); + fut = *Assert(g_pool.Submit(CounterTask{task_counter})); } // If caller wants to wait immediately, consume the future here (safe). diff --git a/src/test/fuzz/txgraph.cpp b/src/test/fuzz/txgraph.cpp index 59c65bde8b7d..0fc4e047814e 100644 --- a/src/test/fuzz/txgraph.cpp +++ b/src/test/fuzz/txgraph.cpp @@ -325,8 +325,8 @@ FUZZ_TARGET(txgraph) auto max_cluster_count = provider.ConsumeIntegralInRange(1, MAX_CLUSTER_COUNT_LIMIT); /** The maximum total size of transactions in a (non-oversized) cluster. */ auto max_cluster_size = provider.ConsumeIntegralInRange(1, 0x3fffff * MAX_CLUSTER_COUNT_LIMIT); - /** The number of iterations to consider a cluster acceptably linearized. */ - auto acceptable_iters = provider.ConsumeIntegralInRange(0, 10000); + /** The amount of work to consider a cluster acceptably linearized. */ + auto acceptable_cost = provider.ConsumeIntegralInRange(0, 10000); /** The set of uint64_t "txid"s that have been assigned before. */ std::set assigned_txids; @@ -342,7 +342,7 @@ FUZZ_TARGET(txgraph) auto real = MakeTxGraph( /*max_cluster_count=*/max_cluster_count, /*max_cluster_size=*/max_cluster_size, - /*acceptable_iters=*/acceptable_iters, + /*acceptable_cost=*/acceptable_cost, /*fallback_order=*/fallback_order); std::vector sims; @@ -758,9 +758,9 @@ FUZZ_TARGET(txgraph) break; } else if (command-- == 0) { // DoWork. - uint64_t iters = provider.ConsumeIntegralInRange(0, alt ? 10000 : 255); - bool ret = real->DoWork(iters); - uint64_t iters_for_optimal{0}; + uint64_t max_cost = provider.ConsumeIntegralInRange(0, alt ? 10000 : 255); + bool ret = real->DoWork(max_cost); + uint64_t cost_for_optimal{0}; for (unsigned level = 0; level < sims.size(); ++level) { // DoWork() will not optimize oversized levels, or the main level if a builder // is present. Note that this impacts the DoWork() return value, as true means @@ -773,24 +773,24 @@ FUZZ_TARGET(txgraph) if (ret) { sims[level].real_is_optimal = true; } - // Compute how many iterations would be needed to make everything optimal. + // Compute how much work would be needed to make everything optimal. for (auto component : sims[level].GetComponents()) { - auto iters_opt_this_cluster = MaxOptimalLinearizationIters(component.Count()); - if (iters_opt_this_cluster > acceptable_iters) { - // If the number of iterations required to linearize this cluster - // optimally exceeds acceptable_iters, DoWork() may process it in two + auto cost_opt_this_cluster = MaxOptimalLinearizationCost(component.Count()); + if (cost_opt_this_cluster > acceptable_cost) { + // If the amount of work required to linearize this cluster + // optimally exceeds acceptable_cost, DoWork() may process it in two // stages: once to acceptable, and once to optimal. - iters_for_optimal += iters_opt_this_cluster + acceptable_iters; + cost_for_optimal += cost_opt_this_cluster + acceptable_cost; } else { - iters_for_optimal += iters_opt_this_cluster; + cost_for_optimal += cost_opt_this_cluster; } } } if (!ret) { - // DoWork can only have more work left if the requested number of iterations + // DoWork can only have more work left if the requested amount of work // was insufficient to linearize everything optimally within the levels it is // allowed to touch. - assert(iters <= iters_for_optimal); + assert(max_cost <= cost_for_optimal); } break; } else if (sims.size() == 2 && !sims[0].IsOversized() && !sims[1].IsOversized() && command-- == 0) { @@ -1165,7 +1165,7 @@ FUZZ_TARGET(txgraph) auto real_redo = MakeTxGraph( /*max_cluster_count=*/max_cluster_count, /*max_cluster_size=*/max_cluster_size, - /*acceptable_iters=*/acceptable_iters, + /*acceptable_cost=*/acceptable_cost, /*fallback_order=*/fallback_order); /** Vector (indexed by SimTxGraph::Pos) of TxObjects in real_redo). */ std::vector> txobjects_redo; diff --git a/src/test/getarg_tests.cpp b/src/test/getarg_tests.cpp index ee369272370a..d349ceea44b6 100644 --- a/src/test/getarg_tests.cpp +++ b/src/test/getarg_tests.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -247,26 +248,41 @@ BOOST_AUTO_TEST_CASE(intarg) const auto foo = std::make_pair("-foo", ArgsManager::ALLOW_ANY); const auto bar = std::make_pair("-bar", ArgsManager::ALLOW_ANY); SetupArgs(local_args, {foo, bar}); + ResetArgs(local_args, ""); + BOOST_CHECK(!local_args.GetArg("-foo").has_value()); + BOOST_CHECK(!local_args.GetArg("-bar").has_value()); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 11), 11); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 0), 0); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{222}), 222); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{0}), 0); ResetArgs(local_args, "-foo -bar"); + BOOST_CHECK_EQUAL(local_args.GetArg("-foo"), 0); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar"), 0); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 11), 0); - BOOST_CHECK_EQUAL(local_args.GetIntArg("-bar", 11), 0); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{222}), 0); // Check under-/overflow behavior. ResetArgs(local_args, "-foo=-9223372036854775809 -bar=9223372036854775808"); + BOOST_CHECK_EQUAL(local_args.GetArg("-foo"), std::numeric_limits::min()); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar"), std::numeric_limits::max()); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 0), std::numeric_limits::min()); BOOST_CHECK_EQUAL(local_args.GetIntArg("-bar", 0), std::numeric_limits::max()); + BOOST_CHECK_EQUAL(local_args.GetArg("-foo", uint8_t{0}), std::numeric_limits::min()); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{0}), std::numeric_limits::max()); ResetArgs(local_args, "-foo=11 -bar=12"); + BOOST_CHECK_EQUAL(local_args.GetArg("-foo"), 11); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar"), 12); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 0), 11); - BOOST_CHECK_EQUAL(local_args.GetIntArg("-bar", 11), 12); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{11}), 12); ResetArgs(local_args, "-foo=NaN -bar=NotANumber"); + BOOST_CHECK_EQUAL(local_args.GetArg("-foo"), 0); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar"), 0); BOOST_CHECK_EQUAL(local_args.GetIntArg("-foo", 1), 0); - BOOST_CHECK_EQUAL(local_args.GetIntArg("-bar", 11), 0); + BOOST_CHECK_EQUAL(local_args.GetArg("-bar", uint8_t{11}), 0); } BOOST_AUTO_TEST_CASE(patharg) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index f9426fa31147..bba612f8b46b 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/src/test/httpserver_tests.cpp b/src/test/httpserver_tests.cpp index 2770c0c3729c..030d48db4ab8 100644 --- a/src/test/httpserver_tests.cpp +++ b/src/test/httpserver_tests.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include diff --git a/src/test/interfaces_tests.cpp b/src/test/interfaces_tests.cpp index da0f5eeccef0..1a98256ce2e5 100644 --- a/src/test/interfaces_tests.cpp +++ b/src/test/interfaces_tests.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include