diff --git a/.github/ci-test-each-commit-exec.py b/.github/ci-test-each-commit-exec.py
index cdbf4ff438d1..aed1526b6436 100755
--- a/.github/ci-test-each-commit-exec.py
+++ b/.github/ci-test-each-commit-exec.py
@@ -62,6 +62,7 @@ def main():
f"./{build_dir}/test/functional/test_runner.py",
"-j",
str(num_procs * 2),
+ "--failfast",
"--combinedlogslen=99999999",
])
diff --git a/.github/ci-windows-cross.py b/.github/ci-windows-cross.py
index 13ca3b49456a..90cd59c7fe7d 100755
--- a/.github/ci-windows-cross.py
+++ b/.github/ci-windows-cross.py
@@ -134,13 +134,13 @@ def run_unit_tests():
def main():
parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.")
- steps = [
- "print_version",
- "check_manifests",
- "prepare_tests",
- "run_unit_tests",
- "run_functional_tests",
- ]
+ steps = list(map(lambda f: f.__name__, [
+ print_version,
+ check_manifests,
+ prepare_tests,
+ run_unit_tests,
+ run_functional_tests,
+ ]))
parser.add_argument("step", choices=steps, help="CI step to perform.")
args = parser.parse_args()
@@ -149,16 +149,7 @@ def main():
str(Path.cwd() / "previous_releases"),
)
- if args.step == "print_version":
- print_version()
- elif args.step == "check_manifests":
- check_manifests()
- elif args.step == "prepare_tests":
- prepare_tests()
- elif args.step == "run_unit_tests":
- run_unit_tests()
- elif args.step == "run_functional_tests":
- run_functional_tests()
+ exec(f'{args.step}()')
if __name__ == "__main__":
diff --git a/.github/ci-windows.py b/.github/ci-windows.py
index caa2d52c7754..16d7db7a2ed2 100755
--- a/.github/ci-windows.py
+++ b/.github/ci-windows.py
@@ -38,6 +38,29 @@ def run(cmd, **kwargs):
}
+def github_import_vs_env(_ci_type):
+ vswhere_path = Path(os.environ["ProgramFiles(x86)"]) / "Microsoft Visual Studio" / "Installer" / "vswhere.exe"
+ installation_path = run(
+ [str(vswhere_path), "-latest", "-property", "installationPath"],
+ capture_output=True,
+ text=True,
+ ).stdout.strip()
+ vsdevcmd = Path(installation_path) / "Common7" / "Tools" / "vsdevcmd.bat"
+ comspec = os.environ["COMSPEC"]
+ output = run(
+ f'"{comspec}" /s /c ""{vsdevcmd}" -arch=x64 -no_logo && set"',
+ capture_output=True,
+ text=True,
+ ).stdout
+ github_env = os.environ["GITHUB_ENV"]
+ with open(github_env, "a") as env_file:
+ for line in output.splitlines():
+ if "=" not in line:
+ continue
+ name, value = line.split("=", 1)
+ env_file.write(f"{name}={value}\n")
+
+
def generate(ci_type):
command = [
"cmake",
@@ -50,7 +73,7 @@ def generate(ci_type):
run(command)
-def build():
+def build(_ci_type):
command = [
"cmake",
"--build",
@@ -180,26 +203,18 @@ def run_tests(ci_type):
def main():
parser = argparse.ArgumentParser(description="Utility to run Windows CI steps.")
parser.add_argument("ci_type", choices=GENERATE_OPTIONS, help="CI type to run.")
- steps = [
- "generate",
- "build",
- "check_manifests",
- "prepare_tests",
- "run_tests",
- ]
+ steps = list(map(lambda f: f.__name__, [
+ github_import_vs_env,
+ generate,
+ build,
+ check_manifests,
+ prepare_tests,
+ run_tests,
+ ]))
parser.add_argument("step", choices=steps, help="CI step to perform.")
args = parser.parse_args()
- if args.step == "generate":
- generate(args.ci_type)
- elif args.step == "build":
- build()
- elif args.step == "check_manifests":
- check_manifests(args.ci_type)
- elif args.step == "prepare_tests":
- prepare_tests(args.ci_type)
- elif args.step == "run_tests":
- run_tests(args.ci_type)
+ exec(f'{args.step}("{args.ci_type}")')
if __name__ == "__main__":
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9bc798dc976c..1fedde674e21 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -56,12 +56,13 @@ jobs:
fi
test-each-commit:
- name: 'test max 6 ancestor commits'
- runs-on: ubuntu-24.04
- if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1
- timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below.
+ name: 'test ancestor commits'
+ needs: runners
+ runs-on: ${{ needs.runners.outputs.provider == 'cirrus' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' || 'ubuntu-24.04' }}
env:
- MAX_COUNT: 6 # Keep in sync with name above
+ TEST_RUNNER_PORT_MIN: "14000" # Use a larger port, to avoid colliding with CIRRUS_CACHE_HOST port 12321.
+ if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1
+ timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes.
steps:
- name: Determine fetch depth
run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV"
@@ -72,25 +73,35 @@ jobs:
fetch-depth: ${{ env.FETCH_DEPTH }}
- name: Determine commit range
run: |
- # Checkout HEAD~ and find the test base commit
- # Checkout HEAD~ because it would be wasteful to rerun tests on the PR
- # head commit that are already run by other jobs.
+ # Checkout HEAD~ and find the test base commit.
+ # Checkout HEAD~ because it would be wasteful to rerun
+ # tests on the PR head commit that are already run
+ # by other jobs.
git checkout HEAD~
- # Figure out test base commit by listing ancestors of HEAD, excluding
- # ancestors of the most recent merge commit, limiting the list to the
- # newest MAX_COUNT ancestors, ordering it from oldest to newest, and
- # taking the first one.
+ # Moreover, pull requests that contain a merge commit
+ # are generally draft pull requests that merge in other
+ # pull requests, so only check the relevant commits
+ # after the last merge commit. A merge commit could
+ # also be a subtree merge commit, which may be
+ # worthwhile to check. However, it is rare that the
+ # subtree merge commit is not the top commit (which
+ # would be skipped anyway by this task, because it is
+ # run by all other tasks). Also, `git rebase --exec`
+ # does not work on merge commits, so if this was
+ # important to check, the logic would have to be
+ # rewritten.
#
- # If the branch contains up to MAX_COUNT ancestor commits after the
- # most recent merge commit, all of those commits will be tested. If it
- # contains more, only the most recent MAX_COUNT commits will be
- # tested.
+ # Figure out test base commit by listing ancestors of
+ # HEAD, excluding ancestors of the most recent merge
+ # commit, ordering them from oldest to newest, and
+ # taking the first one.
#
- # In the command below, the ^@ suffix is used to refer to all parents
- # of the merge commit as described in:
+ # In the command below, the ^@ suffix is used to refer
+ # to all parents of the merge commit as described in:
# https://git-scm.com/docs/git-rev-parse#_other_rev_parent_shorthand_notations
- # and the ^ prefix is used to exclude these parents and all their
- # ancestors from the rev-list output as described in:
+ # and the ^ prefix is used to exclude these parents
+ # and all their ancestors from the rev-list output
+ # as described in:
# https://git-scm.com/docs/git-rev-list
MERGE_BASE=$(git rev-list -n1 --merges HEAD)
EXCLUDE_MERGE_BASE_ANCESTORS=
@@ -98,7 +109,7 @@ jobs:
if test -n "$MERGE_BASE"; then
EXCLUDE_MERGE_BASE_ANCESTORS=^${MERGE_BASE}^@
fi
- echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV"
+ echo "TEST_BASE=$(git rev-list -n${{ github.event.pull_request.commits }} --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV"
- run: |
git fetch origin "${GITHUB_BASE_REF}"
git config user.email "ci@example.com"
@@ -227,26 +238,19 @@ jobs:
- *CHECKOUT
- - &SET_UP_VS
- name: Set up VS Developer Prompt
- shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'"
- run: |
- $vswherePath = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe"
- $installationPath = & $vswherePath -latest -property installationPath
- & "${env:COMSPEC}" /s /c "`"$installationPath\Common7\Tools\vsdevcmd.bat`" -arch=x64 -no_logo && set" | foreach-object {
- $name, $value = $_ -split '=', 2
- echo "$name=$value" >> $env:GITHUB_ENV
- }
+ - &IMPORT_VS_ENV
+ name: Import Visual Studio env vars
+ run: py -3 .github/ci-windows.py "standard" github_import_vs_env
- name: Get tool information
- shell: pwsh
run: |
- cmake -version | Tee-Object -FilePath "cmake_version"
- Write-Output "---"
- msbuild -version | Tee-Object -FilePath "msbuild_version"
- $env:VCToolsVersion | Tee-Object -FilePath "toolset_version"
+ set -o errexit -o pipefail -o xtrace -o nounset
+
+ cmake -version | tee cmake_version
+ echo '---'
+ msbuild.exe -version | tee msbuild_version
+ echo "${VCToolsVersion-}" | tee toolset_version
py -3 --version
- Write-Host "PowerShell version $($PSVersionTable.PSVersion.ToString())"
bash --version
- name: Using vcpkg with MSBuild
@@ -287,7 +291,8 @@ jobs:
- name: Save vcpkg tools cache
uses: actions/cache/save@v5
- if: github.event_name != 'pull_request' && github.ref_name == github.event.repository.default_branch && steps.vcpkg-tools-cache.outputs.cache-hit != 'true'
+ # Only save cache from one job as they share tools. If the matrix is expanded to jobs with unique tools, this may need amending.
+ if: github.event_name != 'pull_request' && github.ref_name == github.event.repository.default_branch && steps.vcpkg-tools-cache.outputs.cache-hit != 'true' && matrix.job-type == 'standard'
with:
path: C:/vcpkg/downloads/tools
key: ${{ github.job }}-vcpkg-tools-${{ github.run_id }}
@@ -418,7 +423,7 @@ jobs:
- name: Run bitcoind.exe
run: py -3 .github/ci-windows-cross.py print_version
- - *SET_UP_VS
+ - *IMPORT_VS_ENV
- name: Check executable manifests
run: py -3 .github/ci-windows-cross.py check_manifests
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f992a8d6af4e..f0f101b2098b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -119,6 +119,7 @@ endif()
cmake_dependent_option(BUILD_WALLET_TOOL "Build bitcoin-wallet tool." ${BUILD_TESTS} "ENABLE_WALLET" OFF)
option(REDUCE_EXPORTS "Attempt to reduce exported symbols in the resulting executables." OFF)
+option(CMAKE_COMPILE_WARNING_AS_ERROR "Treat compiler warnings as errors." OFF)
option(WITH_CCACHE "Attempt to use ccache for compiling." ON)
option(WITH_ZMQ "Enable ZMQ notifications." OFF)
@@ -126,6 +127,8 @@ if(WITH_ZMQ)
find_package(ZeroMQ 4.0.0 MODULE REQUIRED)
endif()
+option(WITH_EMBEDDED_ASMAP "Embed default ASMap data." ON)
+
option(WITH_USDT "Enable tracepoints for Userspace, Statically Defined Tracing." OFF)
if(WITH_USDT)
find_package(USDT MODULE REQUIRED)
@@ -215,6 +218,7 @@ if(BUILD_FOR_FUZZING)
set(BUILD_GUI OFF)
set(ENABLE_EXTERNAL_SIGNER OFF)
set(WITH_ZMQ OFF)
+ set(WITH_EMBEDDED_ASMAP OFF)
set(BUILD_TESTS OFF)
set(BUILD_GUI_TESTS OFF)
set(BUILD_BENCH OFF)
@@ -653,6 +657,7 @@ else()
set(ipc_status OFF)
endif()
message(" IPC ................................. ${ipc_status}")
+message(" Embedded ASMap ...................... ${WITH_EMBEDDED_ASMAP}")
message(" USDT tracing ........................ ${WITH_USDT}")
message(" QR code (GUI) ....................... ${WITH_QRENCODE}")
message(" DBus (GUI) .......................... ${WITH_DBUS}")
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 0e732240cce1..2465c70bb487 100755
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -19,7 +19,7 @@ else
fi
export CONTAINER_NAME=ci_native_asan
-export APT_LLVM_V="21"
+export APT_LLVM_V="22"
export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev mold python3-zmq qt6-base-dev qt6-tools-dev qt6-l10n-tools libevent-dev libboost-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE} libcapnp-dev capnproto python3-pip"
export PIP_PACKAGES="--break-system-packages pycapnp"
export NO_DEPENDS=1
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index b3ad36a8bab7..cdfe2a0d35b3 100755
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
export CONTAINER_NAME=ci_native_fuzz
-export APT_LLVM_V="21"
+export APT_LLVM_V="22"
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev libcapnp-dev capnproto"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh
index 7529bbc69f78..c6923896c562 100755
--- a/ci/test/00_setup_env_native_fuzz_with_msan.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh
@@ -7,7 +7,7 @@
export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
-export APT_LLVM_V="21"
+export APT_LLVM_V="22"
LIBCXX_DIR="/cxx_build/"
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
# -lstdc++ to resolve link issues due to upstream packaging
diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh
index c0559fc69cc8..4a17fb496d60 100755
--- a/ci/test/00_setup_env_native_msan.sh
+++ b/ci/test/00_setup_env_native_msan.sh
@@ -7,7 +7,7 @@
export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
-export APT_LLVM_V="21"
+export APT_LLVM_V="22"
LIBCXX_DIR="/cxx_build/"
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 28446a705dc3..0b21ab226a91 100755
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -17,4 +17,5 @@ export BITCOIN_CONFIG="\
--preset=dev-mode \
-DREDUCE_EXPORTS=ON \
-DENABLE_WALLET=OFF \
+ -DWITH_EMBEDDED_ASMAP=OFF \
"
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index b0429ce8fe79..29dd06bf0d9b 100755
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_tsan
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
-export APT_LLVM_V="21"
+export APT_LLVM_V="22"
LIBCXX_DIR="/cxx_build/"
LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq python3-pip"
diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh
index d62221a7b77f..f8d337f6c902 100755
--- a/ci/test/01_base_install.sh
+++ b/ci/test/01_base_install.sh
@@ -22,11 +22,6 @@ if [ -n "$DPKG_ADD_ARCH" ]; then
fi
if [ -n "${APT_LLVM_V}" ]; then
- # Temporarily work around Sequoia PGP policy deadline for legacy repositories.
- # See https://github.com/llvm/llvm-project/issues/153385.
- if [ -f /usr/share/apt/default-sequoia.config ]; then
- sed -i 's/\(sha1\.second_preimage_resistance =\).*/\1 9999-01-01/' /usr/share/apt/default-sequoia.config
- fi
${CI_RETRY_EXE} apt-get update
${CI_RETRY_EXE} apt-get install curl -y
curl "https://apt.llvm.org/llvm-snapshot.gpg.key" | tee "/etc/apt/trusted.gpg.d/apt.llvm.org.asc"
@@ -62,7 +57,7 @@ if [ -n "$PIP_PACKAGES" ]; then
fi
if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then
- ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.5" /llvm-project
+ ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-22.1.0" /llvm-project
cmake -G Ninja -B /cxx_build/ \
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
diff --git a/ci/test/wrap-valgrind.sh b/ci/test/wrap-valgrind.sh
index e351cf24839b..4ed3f2d66c54 100755
--- a/ci/test/wrap-valgrind.sh
+++ b/ci/test/wrap-valgrind.sh
@@ -12,7 +12,7 @@ for b_name in "${BASE_OUTDIR}/bin"/*; do
echo "Wrap $b ..."
mv "$b" "${b}_orig"
echo '#!/usr/bin/env bash' > "$b"
- echo "exec valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/contrib/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b"
+ echo "exec valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b"
chmod +x "$b"
done
done
diff --git a/cmake/module/AddBoostIfNeeded.cmake b/cmake/module/AddBoostIfNeeded.cmake
index b3f248009d8c..80a6d2e89117 100644
--- a/cmake/module/AddBoostIfNeeded.cmake
+++ b/cmake/module/AddBoostIfNeeded.cmake
@@ -32,12 +32,14 @@ function(add_boost_if_needed)
find_package(Boost 1.74.0 REQUIRED CONFIG)
mark_as_advanced(Boost_INCLUDE_DIR boost_headers_DIR)
# Workaround for a bug in NetBSD pkgsrc.
- # See: https://github.com/NetBSD/pkgsrc/issues/167.
+ # See https://gnats.netbsd.org/59856.
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
get_filename_component(_boost_include_dir "${boost_headers_DIR}/../../../include/" ABSOLUTE)
- set_target_properties(Boost::headers PROPERTIES
- INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir}
- )
+ if(_boost_include_dir MATCHES "^/usr/pkg/")
+ set_target_properties(Boost::headers PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES ${_boost_include_dir}
+ )
+ endif()
unset(_boost_include_dir)
endif()
set_target_properties(Boost::headers PROPERTIES IMPORTED_GLOBAL TRUE)
diff --git a/cmake/script/GenerateHeaderFromRaw.cmake b/cmake/script/GenerateHeaderFromRaw.cmake
index d373d1c4f870..2c40e419f60b 100644
--- a/cmake/script/GenerateHeaderFromRaw.cmake
+++ b/cmake/script/GenerateHeaderFromRaw.cmake
@@ -18,6 +18,5 @@ ${formatted_bytes}
};
inline constexpr std::span ${raw_source_basename}{detail_${raw_source_basename}_raw};
-}
-")
+}")
file(WRITE ${HEADER_PATH} "${header_content}")
diff --git a/contrib/README.md b/contrib/README.md
index f23d7ac557bb..037ea2f0690d 100644
--- a/contrib/README.md
+++ b/contrib/README.md
@@ -18,6 +18,9 @@ A Linux bash script that will set up traffic control (tc) to limit the outgoing
### [Seeds](/contrib/seeds) ###
Utility to generate the pnSeed[] array that is compiled into the client.
+### [ASMap](/contrib/asmap) ###
+Utilities to analyze and process asmap files.
+
Build Tools and Keys
---------------------
diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md
index 833d54037f11..a157acb4b75c 100644
--- a/contrib/guix/INSTALL.md
+++ b/contrib/guix/INSTALL.md
@@ -69,13 +69,13 @@ distros, please see: https://repology.org/project/guix/versions
### Debian / Ubuntu
-Guix is available as a distribution package in various versions of [Debian
-](https://packages.debian.org/search?keywords=guix) and [Ubuntu
-](https://packages.ubuntu.com/search?keywords=guix).
+Currently, the `guix` package is no longer present in recent Debian or Ubuntu
+repositories. Any other installation option mentioned in this document may be
+used.
-To install:
+If you previously installed `guix` via `apt`, you can remove it with:
```sh
-sudo apt install guix
+sudo apt purge guix
```
### Arch Linux
diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign
index ec8fbc0cf96b..791b75c540bc 100755
--- a/contrib/guix/guix-codesign
+++ b/contrib/guix/guix-codesign
@@ -289,7 +289,7 @@ INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}:
EOF
- # Run the build script 'contrib/guix/libexec/build.sh' in the build
+ # Run the build script 'contrib/guix/libexec/codesign.sh' in the build
# container specified by 'contrib/guix/manifest.scm'.
#
# Explanation of `guix shell` flags:
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 1eaa86aeb3ee..072e5b91ba61 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -4,6 +4,9 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
set -e -o pipefail
+
+# Environment variables for determinism
+export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name"
export TZ=UTC
# Although Guix _does_ set umask when building its own packages (in our case,
@@ -157,10 +160,6 @@ case "$HOST" in
;;
esac
-# Environment variables for determinism
-export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name"
-export TZ="UTC"
-
####################
# Depends Building #
####################
@@ -401,12 +400,14 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
|| ( rm -rf "$ACTUAL_OUTDIR" && exit 1 )
(
+ tmp="$(mktemp)"
cd /outdir-base
{
echo "$GIT_ARCHIVE"
find "$ACTUAL_OUTDIR" -type f
} | xargs realpath --relative-base="$PWD" \
- | xargs sha256sum \
- | sort -k2 \
- | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
+ | xargs sha256sum \
+ | sort -k2 \
+ > "$tmp";
+ mv "$tmp" "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh
index 9ea683b9fa9c..9b7f085d3ab9 100755
--- a/contrib/guix/libexec/codesign.sh
+++ b/contrib/guix/libexec/codesign.sh
@@ -141,6 +141,7 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
|| ( rm -rf "$ACTUAL_OUTDIR" && exit 1 )
(
+ tmp="$(mktemp)"
cd /outdir-base
{
echo "$CODESIGNING_TARBALL"
@@ -149,5 +150,6 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
} | xargs realpath --relative-base="$PWD" \
| xargs sha256sum \
| sort -k2 \
- | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
+ > "$tmp";
+ mv "$tmp" "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index ea1ffe5d983e..9cadd410537c 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -521,36 +521,6 @@ inspecting signatures in Mach-O binaries.")
(("^install-others =.*$")
(string-append "install-others = " out "/etc/rpc\n")))))))))))))
-;; The sponge tool from moreutils.
-(define-public sponge
- (package
- (name "sponge")
- (version "0.69")
- (source (origin
- (method url-fetch)
- (uri (string-append
- "https://git.joeyh.name/index.cgi/moreutils.git/snapshot/
- moreutils-" version ".tar.gz"))
- (file-name (string-append "moreutils-" version ".tar.gz"))
- (sha256
- (base32
- "1l859qnzccslvxlh5ghn863bkq2vgmqgnik6jr21b9kc6ljmsy8g"))))
- (build-system gnu-build-system)
- (arguments
- (list #:phases
- #~(modify-phases %standard-phases
- (delete 'configure)
- (replace 'install
- (lambda* (#:key outputs #:allow-other-keys)
- (let ((bin (string-append (assoc-ref outputs "out") "/bin")))
- (install-file "sponge" bin)))))
- #:make-flags
- #~(list "sponge" (string-append "CC=" #$(cc-for-target)))))
- (home-page "https://joeyh.name/code/moreutils/")
- (synopsis "Miscellaneous general-purpose command-line tools")
- (description "Just sponge")
- (license license:gpl2+)))
-
(packages->manifest
(append
(list ;; The Basics
@@ -565,7 +535,6 @@ inspecting signatures in Mach-O binaries.")
patch
gawk
sed
- sponge
;; Compression and archiving
tar
gzip
diff --git a/doc/asmap-data.md b/doc/asmap-data.md
new file mode 100644
index 000000000000..09e2f95c9749
--- /dev/null
+++ b/doc/asmap-data.md
@@ -0,0 +1,59 @@
+# Embedded ASMap data
+
+## Background
+
+The ASMap feature (available via `-asmap`) makes it possible to use a peer's AS Number (ASN), an ISP/hoster identifier,
+in netgroup bucketing in order to ensure a higher diversity in the peer
+set. When not using this, the default behavior is to have the buckets formed
+based on IP prefixes but this does not
+prevent having connections dominated by peers at the same large-scale hoster,
+for example, since such companies usually control many diverse IP ranges.
+In order to use ASMap, the mapping between IP prefixes and AS Numbers needs
+to be available. This mapping data can be provided through an external file
+but Bitcoin Core also embeds a default map in its builds to make the feature
+available to users when they are unable to provide a file.
+
+## Data sourcing and tools
+
+ASMap is a mapping of IP prefix to ASN, essentially a snapshot of the
+internet routing table at some point in time. Due to the high volatility
+of parts of this routing table and the known vulnerabilities in the BGP
+protocol it is challenging to collect this data and prove its consistency.
+Sourcing the data from a single trusted source is problematic as well.
+
+The [Kartograf](https://github.com/asmap/kartograf) tool was created to
+deal with these uncertainties as good as possible. The mapping data is sourced from RPKI, IRR and
+Routeviews. The former two are themselves used as security mechanisms to
+protect against BGP security issues, which is why they are considered more secure and
+their data takes precedence. The latter is a trusted collector of BGP traffic
+and only used for IP space that is not covered by RPKI and IRR.
+
+The process in which the Kartograf project parses, processes and merges these
+data sources is deterministic. Given the raw download files from these
+different sources, anyone can build their own map file and verify the content
+matches with other users' results. Before the map is usable by Bitcoin Core
+it needs to be encoded as well. This is done using `asmap-tool.py` in `contrib/asmap`
+and this step is deterministic as well.
+
+When it comes to obtaining the initial input data, the high volatility remains
+a challenge if users don't want to trust a single creator of the used ASMap file.
+To overcome this, multiple users can start the download process at the exact
+same time which leads to a high likelihood that their downloaded data will be
+similar enough that they receive the same output at the end of the process.
+This process is regularly coordinated at the [asmap-data](https://github.com/asmap/asmap-data)
+project. If enough participants have joined the effort (5 or more is recommended) and a majority of the
+participants have received the same result, the resulting ASMap file is added
+to the repository for public use. Files will not be merged to the repository
+without at least two additional reviewers confirming that the process described
+above was followed as expected and that the encoding step yielded the same
+file hash. New files are created on an ongoing basis but without any central planning
+or an explicit schedule.
+
+## Release process
+
+As an upcoming release approaches the embedded ASMap data should be updated
+by replacing the `ip_asn.dat` with a newer ASMap file from the asmap-data
+repository so that its data is embedded in the release. Ideally, there may be a file
+already created recently that can be selected for an upcoming release. Alternatively,
+a new creation process can be initiated with the goal of obtaining a fresh map
+for use in the upcoming release.
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 97ffc0cd1b69..31ffd370c2f9 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -384,13 +384,13 @@ other input.
Valgrind is a programming tool for memory debugging, memory leak detection, and
profiling. The repo contains a Valgrind suppressions file
-([`valgrind.supp`](https://github.com/bitcoin/bitcoin/blob/master/contrib/valgrind.supp))
+([`valgrind.supp`](/test/sanitizer_suppressions/valgrind.supp))
which includes known Valgrind warnings in our dependencies that cannot be fixed
in-tree. Example use:
```shell
-$ valgrind --suppressions=contrib/valgrind.supp build/bin/test_bitcoin
-$ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
+$ valgrind --suppressions=test/sanitizer_suppressions/valgrind.supp build/bin/test_bitcoin
+$ valgrind --suppressions=test/sanitizer_suppressions/valgrind.supp --leak-check=full \
--show-leak-kinds=all build/bin/test_bitcoin --log_level=test_suite
$ valgrind -v --leak-check=full build/bin/bitcoind -printtoconsole
$ ./build/test/functional/test_runner.py --valgrind
diff --git a/doc/files.md b/doc/files.md
index 12c6cefbc6d6..27fe5615dddc 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -55,6 +55,7 @@ Subdirectory | File(s) | Description
`blocks/` | `xor.dat` | Rolling XOR pattern for block and undo data files
`chainstate/` | LevelDB database | Blockchain state (a compact representation of all currently unspent transaction outputs (UTXOs) and metadata about the transactions they are from)
`indexes/txindex/` | LevelDB database | Transaction index; *optional*, used if `-txindex=1`
+`indexes/txospenderindex/` | LevelDB database | Transaction spender index; *optional*, used if `-txospenderindex=1`
`indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
`indexes/blockfilter/basic/` | `fltrNNNNN.dat`[\[2\]](#note2) | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
`indexes/coinstatsindex/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1`
diff --git a/doc/productivity.md b/doc/productivity.md
index 4509e23cde1f..c436811a0a86 100644
--- a/doc/productivity.md
+++ b/doc/productivity.md
@@ -191,7 +191,12 @@ Then a simple `git pr 12345` will fetch and check out that pr from upstream.
### Diff the diffs with `git range-diff`
-It is very common for contributors to rebase their pull requests, or make changes to commits (perhaps in response to review) that are not at the head of their branch. This poses a problem for reviewers as when the contributor force pushes, the reviewer is no longer sure that his previous reviews of commits are still valid (as the commit hashes can now be different even though the diff is semantically the same). [git range-diff](https://git-scm.com/docs/git-range-diff) (Git >= 2.19) can help solve this problem by diffing the diffs.
+It is very common for contributors to rebase their pull requests, or make changes to commits (perhaps in response to review) that are not at the head of their branch. This poses a problem for reviewers as when the contributor force pushes, the reviewer is no longer sure that their previous reviews of commits are still valid (as the commit hashes can now be different even though the diff is semantically the same). [git range-diff](https://git-scm.com/docs/git-range-diff) (Git >= 2.19) can help solve this problem by diffing the diffs.
+
+> [!NOTE]
+> If `git range-diff` cannot match a commit in the old range to a commit in the new range, it will show it as "removed" (`<`) and "added" (`>`), without showing the patch contents.
+> This does not mean there were no code changes.
+> It means the commit was considered unrelated, and should be reviewed in full like a new commit.
For example, to identify the differences between your previously reviewed diffs P1-5, and the new diffs P1-2,N3-4 as illustrated below:
```
@@ -207,6 +212,12 @@ You can do:
git range-diff master previously-reviewed-head new-head
```
+If you expected `git range-diff` to match a commit, but it shows it as a deletion and an addition, try re-running with a higher creation factor:
+
+```sh
+git range-diff --creation-factor=95
+```
+
Note that `git range-diff` also works for rebases:
```
diff --git a/doc/release-notes-24539.md b/doc/release-notes-24539.md
new file mode 100644
index 000000000000..63b4d70eafd7
--- /dev/null
+++ b/doc/release-notes-24539.md
@@ -0,0 +1,14 @@
+New settings
+------------
+- `-txospenderindex` enables the creation of a transaction output spender
+ index that, if present, will be scanned by `gettxspendingprevout` if a
+ spending transaction was not found in the mempool.
+ (#24539)
+
+Updated RPCs
+------------
+- `gettxspendingprevout` has 2 new optional arguments: `mempool_only` and `return_spending_tx`.
+ If `mempool_only` is true it will limit scans to the mempool even if `txospenderindex` is available.
+ If `return_spending_tx` is true, the full spending tx will be returned.
+ In addition if `txospenderindex` is available and a confirmed spending transaction is found,
+ its block hash will be returned. (#24539)
diff --git a/doc/release-notes-29415.md b/doc/release-notes-29415.md
index d5040a3193d8..c0e0f3dc8881 100644
--- a/doc/release-notes-29415.md
+++ b/doc/release-notes-29415.md
@@ -12,3 +12,8 @@ P2P and network changes
2. If the originator sends two otherwise unrelated transactions, they
will not be linkable. This is because a separate connection is used
for broadcasting each transaction. (#29415)
+
+- New RPCs have been added to introspect and control private broadcast:
+ `getprivatebroadcastinfo` reports transactions currently being privately
+ broadcast, and `abortprivatebroadcast` removes matching
+ transactions from the private broadcast queue.
diff --git a/doc/release-notes-32138.md b/doc/release-notes-32138.md
new file mode 100644
index 000000000000..566998508806
--- /dev/null
+++ b/doc/release-notes-32138.md
@@ -0,0 +1,3 @@
+RPC and Startup Option
+---
+The `-paytxfee` startup option and the `settxfee` RPC are now deleted after being deprecated in Bitcoin Core 30.0. They used to allow the user to set a static fee rate for wallet transactions, which could potentially lead to overpaying or underpaying. Users should instead rely on fee estimation or specify a fee rate per transaction using the `fee_rate` argument in RPCs such as `fundrawtransaction`, `sendtoaddress`, `send`, `sendall`, and `sendmany`. (#32138)
diff --git a/doc/release-notes-33199.md b/doc/release-notes-33199.md
new file mode 100644
index 000000000000..90246d782730
--- /dev/null
+++ b/doc/release-notes-33199.md
@@ -0,0 +1,9 @@
+Fee Estimation
+========================
+
+- The Bitcoin Core fee estimator minimum fee rate bucket was updated from **1 sat/vB** to **0.1 sat/vB**,
+ which matches the node’s default `minrelayfee`.
+ This means that for a given confirmation target, if a sub-1 sat/vB fee rate bucket is the minimum tracked
+ with sufficient data, its average value will be returned as the fee rate estimate.
+
+- Note: Restarting a node with this change invalidates previously saved estimates in `fee_estimates.dat`, the fee estimator will start tracking fresh stats.
diff --git a/doc/release-notes-33819.md b/doc/release-notes-33819.md
deleted file mode 100644
index 79ed1f707492..000000000000
--- a/doc/release-notes-33819.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Mining IPC
-----------
-
-- The `getCoinbaseTx()` method is renamed to `getCoinbaseRawTx()` and deprecated.
- IPC clients do not use the function name, so they're not affected. (#33819)
-- Adds `getCoinbaseTx()` which clients should use instead of `getCoinbaseRawTx()`. It
- contains all fields required to construct a coinbase transaction, and omits the
- dummy output which Bitcoin Core uses internally. (#33819)
diff --git a/doc/release-notes-34184.md b/doc/release-notes-34184.md
new file mode 100644
index 000000000000..c582023eea2a
--- /dev/null
+++ b/doc/release-notes-34184.md
@@ -0,0 +1,8 @@
+Mining IPC
+----------
+
+- `Mining.createNewBlock` now has a `cooldown` behavior (enabled by default)
+ that waits for IBD to finish and for the tip to catch up. This usually
+ prevents a flood of templates during startup, but is not guaranteed. (#34184)
+- `Mining.interrupt()` can be used to interrupt `Mining.waitTipChanged` and
+ `Mining.createNewBlock`. (#34184)
diff --git a/doc/release-notes-34512.md b/doc/release-notes-34512.md
new file mode 100644
index 000000000000..b863448853f6
--- /dev/null
+++ b/doc/release-notes-34512.md
@@ -0,0 +1,8 @@
+Updated RPCs
+------------
+
+- The `getblock` RPC now returns a `coinbase_tx` object at verbosity levels 1, 2,
+ and 3. It contains `version`, `locktime`, `sequence`, `coinbase` and
+ `witness`. This allows for efficiently querying coinbase
+ transaction properties without fetching the full transaction data at
+ verbosity 2+. (#34512)
diff --git a/doc/release-notes-34568.md b/doc/release-notes-34568.md
new file mode 100644
index 000000000000..e48772330c1e
--- /dev/null
+++ b/doc/release-notes-34568.md
@@ -0,0 +1,11 @@
+Mining IPC
+----------
+
+The IPC mining interface now requires mining clients to use the latest `mining.capnp` schema. Clients built against older schemas will fail when calling `Init.makeMining` and receive an RPC error indicating the old mining interface is no longer supported. Mining clients must update to the latest schema and regenerate bindings to continue working. (#34568)
+
+Notable IPC mining interface changes since the last release:
+- `Mining.createNewBlock` and `Mining.checkBlock` now require a `context` parameter.
+- `Mining.waitTipChanged` now has a default `timeout` (effectively infinite / `maxDouble`) if the client omits it.
+- `BlockTemplate.getCoinbaseTx()` now returns a structured `CoinbaseTx` instead of raw bytes.
+- Removed `BlockTemplate.getCoinbaseCommitment()` and `BlockTemplate.getWitnessCommitmentIndex()`.
+- Cap’n Proto default values were updated to match the corresponding C++ defaults for mining-related option structs (e.g. `BlockCreateOptions`, `BlockWaitOptions`, `BlockCheckOptions`).
diff --git a/doc/release-process.md b/doc/release-process.md
index 272f36eadcfc..90ffd852e1a2 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -30,6 +30,7 @@ Release Process
* Update translations see [translation_process.md](/doc/translation_process.md#synchronising-translations).
* Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/27488) for an example.
+* Update embedded asmap data at `/src/node/data/ip_asn.dat`, see [asmap data documentation](./asmap-data.md).
* Update the following variables in [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp) for mainnet, testnet, and signet:
- `m_assumed_blockchain_size` and `m_assumed_chain_state_size` with the current size plus some overhead (see
[this](#how-to-calculate-assumed-blockchain-and-chain-state-size) for information on how to calculate them).
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index cf1f26c9f24c..ad18115bbc5f 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -193,6 +193,7 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL
index/blockfilterindex.cpp
index/coinstatsindex.cpp
index/txindex.cpp
+ index/txospenderindex.cpp
init.cpp
kernel/chain.cpp
kernel/checks.cpp
@@ -286,6 +287,13 @@ target_link_libraries(bitcoin_node
$
$
)
+if(WITH_EMBEDDED_ASMAP)
+ target_compile_definitions(bitcoin_node PRIVATE ENABLE_EMBEDDED_ASMAP=1)
+ include(TargetDataSources)
+ target_raw_data_sources(bitcoin_node NAMESPACE node::data
+ node/data/ip_asn.dat
+ )
+endif()
# Bitcoin wrapper executable that can call other executables.
if(BUILD_BITCOIN_BIN)
diff --git a/src/bench/cluster_linearize.cpp b/src/bench/cluster_linearize.cpp
index d345a7cad1d9..0799cc2c2e9d 100644
--- a/src/bench/cluster_linearize.cpp
+++ b/src/bench/cluster_linearize.cpp
@@ -55,7 +55,7 @@ void BenchLinearizeOptimallyTotal(benchmark::Bench& bench, const std::string& na
// Benchmark the total time to optimal.
uint64_t rng_seed = 0;
bench.name(bench_name).run([&] {
- auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++, IndexTxOrder{});
+ auto [_lin, optimal, _cost] = Linearize(depgraph, /*max_cost=*/10000000, rng_seed++, IndexTxOrder{});
assert(optimal);
});
}
@@ -72,7 +72,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string&
// Determine the cost of 100 rng_seeds.
uint64_t total_cost = 0;
for (uint64_t iter = 0; iter < 100; ++iter) {
- auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{});
+ auto [_lin, optimal, cost] = Linearize(depgraph, /*max_cost=*/10000000, /*rng_seed=*/iter, IndexTxOrder{});
total_cost += cost;
}
@@ -80,7 +80,7 @@ void BenchLinearizeOptimallyPerCost(benchmark::Bench& bench, const std::string&
bench.name(bench_name).unit("cost").batch(total_cost).run([&] {
uint64_t recompute_cost = 0;
for (uint64_t iter = 0; iter < 100; ++iter) {
- auto [_lin, optimal, cost] = Linearize(depgraph, /*max_iterations=*/10000000, /*rng_seed=*/iter, IndexTxOrder{});
+ auto [_lin, optimal, cost] = Linearize(depgraph, /*max_cost=*/10000000, /*rng_seed=*/iter, IndexTxOrder{});
assert(optimal);
recompute_cost += cost;
}
diff --git a/src/bench/txgraph.cpp b/src/bench/txgraph.cpp
index 00074db658fb..c56a284aa25d 100644
--- a/src/bench/txgraph.cpp
+++ b/src/bench/txgraph.cpp
@@ -51,9 +51,9 @@ void BenchTxGraphTrim(benchmark::Bench& bench)
static constexpr int NUM_DEPS_PER_BOTTOM_TX = 100;
/** Set a very large cluster size limit so that only the count limit is triggered. */
static constexpr int32_t MAX_CLUSTER_SIZE = 100'000 * 100;
- /** Set a very high number for acceptable iterations, so that we certainly benchmark optimal
+ /** Set a very high number for acceptable cost, so that we certainly benchmark optimal
* linearization. */
- static constexpr uint64_t NUM_ACCEPTABLE_ITERS = 100'000'000;
+ static constexpr uint64_t HIGH_ACCEPTABLE_COST = 100'000'000;
/** Refs to all top transactions. */
std::vector top_refs;
@@ -65,7 +65,7 @@ void BenchTxGraphTrim(benchmark::Bench& bench)
std::vector top_components;
InsecureRandomContext rng(11);
- auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, NUM_ACCEPTABLE_ITERS, PointerComparator);
+ auto graph = MakeTxGraph(MAX_CLUSTER_COUNT, MAX_CLUSTER_SIZE, HIGH_ACCEPTABLE_COST, PointerComparator);
// Construct the top chains.
for (int chain = 0; chain < NUM_TOP_CHAINS; ++chain) {
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 724620aa7306..909ed09faaca 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -367,7 +367,6 @@ struct GetinfoRequestHandler : BaseRequestHandler {
if (!batch[ID_WALLETINFO]["result"]["unlocked_until"].isNull()) {
result.pushKV("unlocked_until", batch[ID_WALLETINFO]["result"]["unlocked_until"]);
}
- result.pushKV("paytxfee", batch[ID_WALLETINFO]["result"]["paytxfee"]);
}
if (!batch[ID_BALANCES]["result"].isNull()) {
result.pushKV("balance", batch[ID_BALANCES]["result"]["mine"]["trusted"]);
@@ -1152,7 +1151,6 @@ static void ParseGetInfoResult(UniValue& result)
if (!result["unlocked_until"].isNull()) {
result_string += strprintf("Unlocked until: %s\n", result["unlocked_until"].getValStr());
}
- result_string += strprintf("Transaction fee rate (-paytxfee) (%s/kvB): %s\n\n", CURRENCY_UNIT, result["paytxfee"].getValStr());
}
if (!result["balance"].isNull()) {
result_string += strprintf("%sBalance:%s %s\n\n", CYAN, RESET, result["balance"].getValStr());
diff --git a/src/chain.h b/src/chain.h
index 7b65c76d7b46..c27829208c4a 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -77,8 +77,7 @@ enum BlockStatus : uint32_t {
BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO,
BLOCK_FAILED_VALID = 32, //!< stage after last reached validness failed
- BLOCK_FAILED_CHILD = 64, //!< descends from failed block
- BLOCK_FAILED_MASK = BLOCK_FAILED_VALID | BLOCK_FAILED_CHILD,
+ BLOCK_FAILED_CHILD = 64, //!< Unused flag that was previously set when descending from failed block
BLOCK_OPT_WITNESS = 128, //!< block data in blk*.dat was received with a witness-enforcing client
@@ -253,7 +252,7 @@ class CBlockIndex
{
AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
- if (nStatus & BLOCK_FAILED_MASK)
+ if (nStatus & BLOCK_FAILED_VALID)
return false;
return ((nStatus & BLOCK_VALID_MASK) >= nUpTo);
}
@@ -264,7 +263,7 @@ class CBlockIndex
{
AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
- if (nStatus & BLOCK_FAILED_MASK) return false;
+ if (nStatus & BLOCK_FAILED_VALID) return false;
if ((nStatus & BLOCK_VALID_MASK) < nUpTo) {
nStatus = (nStatus & ~BLOCK_VALID_MASK) | nUpTo;
diff --git a/src/cluster_linearize.h b/src/cluster_linearize.h
index 00627d6f1672..774bc61734f0 100644
--- a/src/cluster_linearize.h
+++ b/src/cluster_linearize.h
@@ -472,6 +472,77 @@ concept StrongComparator =
* Linearize(), which just sorts by DepGraphIndex. */
using IndexTxOrder = std::compare_three_way;
+/** A default cost model for SFL for SetType=BitSet<64>, based on benchmarks.
+ *
+ * The numbers here were obtained in February 2026 by:
+ * - For a variety of machines:
+ * - Running a fixed collection of ~385000 clusters found through random generation and fuzzing,
+ * optimizing for difficulty of linearization.
+ * - Linearize each ~3000 times, with different random seeds. Sometimes without input
+ * linearization, sometimes with a bad one.
+ * - Gather cycle counts for each of the operations included in this cost model,
+ * broken down by their parameters.
+ * - Correct the data by subtracting the runtime of obtaining the cycle count.
+ * - Drop the 5% top and bottom samples from each cycle count dataset, and compute the average
+ * of the remaining samples.
+ * - For each operation, fit a least-squares linear function approximation through the samples.
+ * - Rescale all machine expressions to make their total time match, as we only care about
+ * relative cost of each operation.
+ * - Take the per-operation average of operation expressions across all machines, to construct
+ * expressions for an average machine.
+ * - Approximate the result with integer coefficients. Each cost unit corresponds to somewhere
+ * between 0.5 ns and 2.5 ns, depending on the hardware.
+ */
+class SFLDefaultCostModel
+{
+ uint64_t m_cost{0};
+
+public:
+ inline void InitializeBegin() noexcept {}
+ inline void InitializeEnd(int num_txns, int num_deps) noexcept
+ {
+ // Cost of initialization.
+ m_cost += 39 * num_txns;
+ // Cost of producing linearization at the end.
+ m_cost += 48 * num_txns + 4 * num_deps;
+ }
+ inline void GetLinearizationBegin() noexcept {}
+ inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept
+ {
+ // Note that we account for the cost of the final linearization at the beginning (see
+ // InitializeEnd), because the cost budget decision needs to be made before calling
+ // GetLinearization.
+ // This function exists here to allow overriding it easily for benchmark purposes.
+ }
+ inline void MakeTopologicalBegin() noexcept {}
+ inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
+ {
+ m_cost += 20 * num_chunks + 28 * num_steps;
+ }
+ inline void StartOptimizingBegin() noexcept {}
+ inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; }
+ inline void ActivateBegin() noexcept {}
+ inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; }
+ inline void DeactivateBegin() noexcept {}
+ inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; }
+ inline void MergeChunksBegin() noexcept {}
+ inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; }
+ inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; }
+ inline void PickMergeCandidateBegin() noexcept {}
+ inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; }
+ inline void PickChunkToOptimizeBegin() noexcept {}
+ inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; }
+ inline void PickDependencyToSplitBegin() noexcept {}
+ inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; }
+ inline void StartMinimizingBegin() noexcept {}
+ inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; }
+ inline void MinimizeStepBegin() noexcept {}
+ inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; }
+ inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; }
+
+ inline uint64_t GetCost() const noexcept { return m_cost; }
+};
+
/** Class to represent the internal state of the spanning-forest linearization (SFL) algorithm.
*
* At all times, each dependency is marked as either "active" or "inactive". The subset of active
@@ -631,6 +702,11 @@ using IndexTxOrder = std::compare_three_way;
* - Inside the selected chunk (see above), among the dependencies whose top feerate is strictly
* higher than its bottom feerate in the selected chunk, if any, a uniformly random dependency
* is deactivated.
+ * - After every split, it is possible that the top and the bottom chunk merge with each other
+ * again in the merge sequence (through a top->bottom dependency, not through the deactivated
+ * one, which was bottom->top). Call this a self-merge. If a self-merge does not occur after
+ * a split, the resulting linearization is strictly improved (the area under the convexified
+ * feerate diagram increases by at least gain/2), while self-merges do not change it.
*
* - How to decide the exact output linearization:
* - When there are multiple equal-feerate chunks with no dependencies between them, output a
@@ -638,7 +714,7 @@ using IndexTxOrder = std::compare_three_way;
* - Within chunks, repeatedly pick a uniformly random transaction among those with no missing
* dependencies.
*/
-template
+template
class SpanningForestState
{
private:
@@ -647,60 +723,64 @@ class SpanningForestState
/** Data type to represent indexing into m_tx_data. */
using TxIdx = DepGraphIndex;
- /** Data type to represent indexing into m_dep_data. */
- using DepIdx = uint32_t;
-
- /** Structure with information about a single transaction. For transactions that are the
- * representative for the chunk they are in, this also stores chunk information. */
+ /** Data type to represent indexing into m_set_info. Use the smallest type possible to improve
+ * cache locality. */
+ using SetIdx = std::conditional_t<(SetType::Size() <= 0xff),
+ uint8_t,
+ std::conditional_t<(SetType::Size() <= 0xffff),
+ uint16_t,
+ uint32_t>>;
+ /** An invalid SetIdx. */
+ static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1);
+
+ /** Structure with information about a single transaction. */
struct TxData {
- /** The dependencies to children of this transaction. Immutable after construction. */
- std::vector child_deps;
+ /** The top set for every active child dependency this transaction has, indexed by child
+ * TxIdx. Only defined for indexes in active_children. */
+ std::array dep_top_idx;
/** The set of parent transactions of this transaction. Immutable after construction. */
SetType parents;
/** The set of child transactions of this transaction. Immutable after construction. */
SetType children;
- /** Which transaction holds the chunk_setinfo for the chunk this transaction is in
- * (the representative for the chunk). */
- TxIdx chunk_rep;
- /** (Only if this transaction is the representative for the chunk it is in) the total
- * chunk set and feerate. */
- SetInfo chunk_setinfo;
- };
-
- /** Structure with information about a single dependency. */
- struct DepData {
- /** Whether this dependency is active. */
- bool active;
- /** What the parent and child transactions are. Immutable after construction. */
- TxIdx parent, child;
- /** (Only if this dependency is active) the would-be top chunk and its feerate that would
- * be formed if this dependency were to be deactivated. */
- SetInfo top_setinfo;
+ /** The set of child transactions reachable through an active dependency. */
+ SetType active_children;
+ /** Which chunk this transaction belongs to. */
+ SetIdx chunk_idx;
};
/** The set of all TxIdx's of transactions in the cluster indexing into m_tx_data. */
SetType m_transaction_idxs;
+ /** The set of all chunk SetIdx's. This excludes the SetIdxs that refer to active
+ * dependencies' tops. */
+ SetType m_chunk_idxs;
+ /** The set of all SetIdx's that appear in m_suboptimal_chunks. Note that they do not need to
+ * be chunks: some of these sets may have been converted to a dependency's top set since being
+ * added to m_suboptimal_chunks. */
+ SetType m_suboptimal_idxs;
/** Information about each transaction (and chunks). Keeps the "holes" from DepGraph during
* construction. Indexed by TxIdx. */
std::vector m_tx_data;
- /** Information about each dependency. Indexed by DepIdx. */
- std::vector m_dep_data;
- /** A FIFO of chunk representatives of chunks that may be improved still. */
- VecDeque m_suboptimal_chunks;
- /** A FIFO of chunk representatives with a pivot transaction in them, and a flag to indicate
- * their status:
+ /** Information about each set (chunk, or active dependency top set). Indexed by SetIdx. */
+ std::vector> m_set_info;
+ /** For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions, in the
+ * upwards (.first) and downwards (.second) direction. */
+ std::vector> m_reachable;
+ /** A FIFO of chunk SetIdxs for chunks that may be improved still. */
+ VecDeque m_suboptimal_chunks;
+ /** A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their
+ * status:
* - bit 1: currently attempting to move the pivot down, rather than up.
* - bit 2: this is the second stage, so we have already tried moving the pivot in the other
* direction.
*/
- VecDeque> m_nonminimal_chunks;
-
- /** The number of updated transactions in activations/deactivations. */
- uint64_t m_cost{0};
+ VecDeque> m_nonminimal_chunks;
/** The DepGraph we are trying to linearize. */
const DepGraph& m_depgraph;
+ /** Accounting for the cost of this computation. */
+ CostModel m_cost;
+
/** Pick a random transaction within a set (which must be non-empty). */
TxIdx PickRandomTx(const SetType& tx_idxs) noexcept
{
@@ -714,58 +794,40 @@ class SpanningForestState
return TxIdx(-1);
}
- /** Update a chunk:
- * - All transactions have their chunk representative set to `chunk_rep`.
- * - All dependencies which have `query` in their top_setinfo get `dep_change` added to it
- * (if `!Subtract`) or removed from it (if `Subtract`).
- */
- template
- void UpdateChunk(const SetType& chunk, TxIdx query, TxIdx chunk_rep, const SetInfo& dep_change) noexcept
+ /** Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and
+ * downwards direction. Only used by SanityCheck to verify the precomputed reachable sets in
+ * m_reachable that are maintained by Activate/Deactivate. */
+ std::pair GetReachable(const SetType& tx_idxs) const noexcept
{
- // Iterate over all the chunk's transactions.
- for (auto tx_idx : chunk) {
- auto& tx_data = m_tx_data[tx_idx];
- // Update the chunk representative.
- tx_data.chunk_rep = chunk_rep;
- // Iterate over all active dependencies with tx_idx as parent. Combined with the outer
- // loop this iterates over all internal active dependencies of the chunk.
- auto child_deps = std::span{tx_data.child_deps};
- for (auto dep_idx : child_deps) {
- auto& dep_entry = m_dep_data[dep_idx];
- Assume(dep_entry.parent == tx_idx);
- // Skip inactive dependencies.
- if (!dep_entry.active) continue;
- // If this dependency's top_setinfo contains query, update it to add/remove
- // dep_change.
- if (dep_entry.top_setinfo.transactions[query]) {
- if constexpr (Subtract) {
- dep_entry.top_setinfo -= dep_change;
- } else {
- dep_entry.top_setinfo |= dep_change;
- }
- }
- }
+ SetType parents, children;
+ for (auto tx_idx : tx_idxs) {
+ const auto& tx_data = m_tx_data[tx_idx];
+ parents |= tx_data.parents;
+ children |= tx_data.children;
}
+ return {parents - tx_idxs, children - tx_idxs};
}
- /** Make a specified inactive dependency active. Returns the merged chunk representative. */
- TxIdx Activate(DepIdx dep_idx) noexcept
+ /** Make the inactive dependency from child to parent, which must not be in the same chunk
+ * already, active. Returns the merged chunk idx. */
+ SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
{
- auto& dep_data = m_dep_data[dep_idx];
- Assume(!dep_data.active);
- auto& child_tx_data = m_tx_data[dep_data.child];
- auto& parent_tx_data = m_tx_data[dep_data.parent];
-
- // Gather information about the parent and child chunks.
- Assume(parent_tx_data.chunk_rep != child_tx_data.chunk_rep);
- auto& par_chunk_data = m_tx_data[parent_tx_data.chunk_rep];
- auto& chl_chunk_data = m_tx_data[child_tx_data.chunk_rep];
- TxIdx top_rep = parent_tx_data.chunk_rep;
- auto top_part = par_chunk_data.chunk_setinfo;
- auto bottom_part = chl_chunk_data.chunk_setinfo;
- // Update the parent chunk to also contain the child.
- par_chunk_data.chunk_setinfo |= bottom_part;
- m_cost += par_chunk_data.chunk_setinfo.transactions.Count();
+ m_cost.ActivateBegin();
+ // Gather and check information about the parent and child transactions.
+ auto& parent_data = m_tx_data[parent_idx];
+ auto& child_data = m_tx_data[child_idx];
+ Assume(parent_data.children[child_idx]);
+ Assume(!parent_data.active_children[child_idx]);
+ // Get the set index of the chunks the parent and child are currently in. The parent chunk
+ // will become the top set of the newly activated dependency, while the child chunk will be
+ // grown to become the merged chunk.
+ auto parent_chunk_idx = parent_data.chunk_idx;
+ auto child_chunk_idx = child_data.chunk_idx;
+ Assume(parent_chunk_idx != child_chunk_idx);
+ Assume(m_chunk_idxs[parent_chunk_idx]);
+ Assume(m_chunk_idxs[child_chunk_idx]);
+ auto& top_info = m_set_info[parent_chunk_idx];
+ auto& bottom_info = m_set_info[child_chunk_idx];
// Consider the following example:
//
@@ -782,234 +844,364 @@ class SpanningForestState
// dependency being activated (E->C here) in its top set, will have the opposite part added
// to it. This is true for B->A and F->E, but not for C->A and F->D.
//
- // Let UpdateChunk traverse the old parent chunk top_part (ABC in example), and add
- // bottom_part (DEF) to every dependency's top_set which has the parent (C) in it. The
- // representative of each of these transactions was already top_rep, so that is not being
- // changed here.
- UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent,
- /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part);
- // Let UpdateChunk traverse the old child chunk bottom_part (DEF in example), and add
- // top_part (ABC) to every dependency's top_set which has the child (E) in it. At the same
- // time, change the representative of each of these transactions to be top_rep, which
- // becomes the representative for the merged chunk.
- UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child,
- /*chunk_rep=*/top_rep, /*dep_change=*/top_part);
- // Make active.
- dep_data.active = true;
- dep_data.top_setinfo = top_part;
- return top_rep;
+ // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to
+ // every dependency's top set which has the parent (C) in it. At the same time, change the
+ // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk.
+ for (auto tx_idx : top_info.transactions) {
+ auto& tx_data = m_tx_data[tx_idx];
+ tx_data.chunk_idx = child_chunk_idx;
+ for (auto dep_child_idx : tx_data.active_children) {
+ auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
+ if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info;
+ }
+ }
+ // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to
+ // every dependency's top set which has the child (E) in it.
+ for (auto tx_idx : bottom_info.transactions) {
+ auto& tx_data = m_tx_data[tx_idx];
+ for (auto dep_child_idx : tx_data.active_children) {
+ auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
+ if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info;
+ }
+ }
+ // Merge top_info into bottom_info, which becomes the merged chunk.
+ bottom_info |= top_info;
+ // Compute merged sets of reachable transactions from the new chunk, based on the input
+ // chunks' reachable sets.
+ m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first;
+ m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second;
+ m_reachable[child_chunk_idx].first -= bottom_info.transactions;
+ m_reachable[child_chunk_idx].second -= bottom_info.transactions;
+ // Make parent chunk the set for the new active dependency.
+ parent_data.dep_top_idx[child_idx] = parent_chunk_idx;
+ parent_data.active_children.Set(child_idx);
+ m_chunk_idxs.Reset(parent_chunk_idx);
+ // Return the newly merged chunk.
+ m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1);
+ return child_chunk_idx;
}
- /** Make a specified active dependency inactive. */
- void Deactivate(DepIdx dep_idx) noexcept
+ /** Make a specified active dependency inactive. Returns the created parent and child chunk
+ * indexes. */
+ std::pair Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
{
- auto& dep_data = m_dep_data[dep_idx];
- Assume(dep_data.active);
- auto& parent_tx_data = m_tx_data[dep_data.parent];
- // Make inactive.
- dep_data.active = false;
- // Update representatives.
- auto& chunk_data = m_tx_data[parent_tx_data.chunk_rep];
- m_cost += chunk_data.chunk_setinfo.transactions.Count();
- auto top_part = dep_data.top_setinfo;
- auto bottom_part = chunk_data.chunk_setinfo - top_part;
- TxIdx bottom_rep = dep_data.child;
- auto& bottom_chunk_data = m_tx_data[bottom_rep];
- bottom_chunk_data.chunk_setinfo = bottom_part;
- TxIdx top_rep = dep_data.parent;
- auto& top_chunk_data = m_tx_data[top_rep];
- top_chunk_data.chunk_setinfo = top_part;
-
- // See the comment above in Activate(). We perform the opposite operations here,
- // removing instead of adding.
- //
- // Let UpdateChunk traverse the old parent chunk top_part, and remove bottom_part from
- // every dependency's top_set which has the parent in it. At the same time, change the
- // representative of each of these transactions to be top_rep.
- UpdateChunk(/*chunk=*/top_part.transactions, /*query=*/dep_data.parent,
- /*chunk_rep=*/top_rep, /*dep_change=*/bottom_part);
- // Let UpdateChunk traverse the old child chunk bottom_part, and remove top_part from every
- // dependency's top_set which has the child in it. At the same time, change the
- // representative of each of these transactions to be bottom_rep.
- UpdateChunk(/*chunk=*/bottom_part.transactions, /*query=*/dep_data.child,
- /*chunk_rep=*/bottom_rep, /*dep_change=*/top_part);
+ m_cost.DeactivateBegin();
+ // Gather and check information about the parent transactions.
+ auto& parent_data = m_tx_data[parent_idx];
+ Assume(parent_data.children[child_idx]);
+ Assume(parent_data.active_children[child_idx]);
+ // Get the top set of the active dependency (which will become the parent chunk) and the
+ // chunk set the transactions are currently in (which will become the bottom chunk).
+ auto parent_chunk_idx = parent_data.dep_top_idx[child_idx];
+ auto child_chunk_idx = parent_data.chunk_idx;
+ Assume(parent_chunk_idx != child_chunk_idx);
+ Assume(m_chunk_idxs[child_chunk_idx]);
+ Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk
+ auto& top_info = m_set_info[parent_chunk_idx];
+ auto& bottom_info = m_set_info[child_chunk_idx];
+
+ // Remove the active dependency.
+ parent_data.active_children.Reset(child_idx);
+ m_chunk_idxs.Set(parent_chunk_idx);
+ auto ntx = bottom_info.transactions.Count();
+ // Subtract the top_info from the bottom_info, as it will become the child chunk.
+ bottom_info -= top_info;
+ // See the comment above in Activate(). We perform the opposite operations here, removing
+ // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children.
+ SetType top_parents, top_children;
+ for (auto tx_idx : top_info.transactions) {
+ auto& tx_data = m_tx_data[tx_idx];
+ tx_data.chunk_idx = parent_chunk_idx;
+ top_parents |= tx_data.parents;
+ top_children |= tx_data.children;
+ for (auto dep_child_idx : tx_data.active_children) {
+ auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
+ if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info;
+ }
+ }
+ SetType bottom_parents, bottom_children;
+ for (auto tx_idx : bottom_info.transactions) {
+ auto& tx_data = m_tx_data[tx_idx];
+ bottom_parents |= tx_data.parents;
+ bottom_children |= tx_data.children;
+ for (auto dep_child_idx : tx_data.active_children) {
+ auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
+ if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info;
+ }
+ }
+ // Compute the new sets of reachable transactions for each new chunk, based on the
+ // top/bottom parents and children computed above.
+ m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions;
+ m_reachable[parent_chunk_idx].second = top_children - top_info.transactions;
+ m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions;
+ m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions;
+ // Return the two new set idxs.
+ m_cost.DeactivateEnd(/*num_deps=*/ntx - 1);
+ return {parent_chunk_idx, child_chunk_idx};
}
- /** Activate a dependency from the chunk represented by bottom_idx to the chunk represented by
- * top_idx. Return the representative of the merged chunk, or TxIdx(-1) if no merge is
- * possible. */
- TxIdx MergeChunks(TxIdx top_rep, TxIdx bottom_rep) noexcept
+ /** Activate a dependency from the bottom set to the top set, which must exist. Return the
+ * index of the merged chunk. */
+ SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
{
- auto& top_chunk = m_tx_data[top_rep];
- Assume(top_chunk.chunk_rep == top_rep);
- auto& bottom_chunk = m_tx_data[bottom_rep];
- Assume(bottom_chunk.chunk_rep == bottom_rep);
+ m_cost.MergeChunksBegin();
+ Assume(m_chunk_idxs[top_idx]);
+ Assume(m_chunk_idxs[bottom_idx]);
+ auto& top_chunk_info = m_set_info[top_idx];
+ auto& bottom_chunk_info = m_set_info[bottom_idx];
// Count the number of dependencies between bottom_chunk and top_chunk.
- TxIdx num_deps{0};
- for (auto tx : top_chunk.chunk_setinfo.transactions) {
- auto& tx_data = m_tx_data[tx];
- num_deps += (tx_data.children & bottom_chunk.chunk_setinfo.transactions).Count();
+ unsigned num_deps{0};
+ for (auto tx_idx : top_chunk_info.transactions) {
+ auto& tx_data = m_tx_data[tx_idx];
+ num_deps += (tx_data.children & bottom_chunk_info.transactions).Count();
}
- if (num_deps == 0) return TxIdx(-1);
+ m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count());
+ Assume(num_deps > 0);
// Uniformly randomly pick one of them and activate it.
- TxIdx pick = m_rng.randrange(num_deps);
- for (auto tx : top_chunk.chunk_setinfo.transactions) {
- auto& tx_data = m_tx_data[tx];
- auto intersect = tx_data.children & bottom_chunk.chunk_setinfo.transactions;
+ unsigned pick = m_rng.randrange(num_deps);
+ unsigned num_steps = 0;
+ for (auto tx_idx : top_chunk_info.transactions) {
+ ++num_steps;
+ auto& tx_data = m_tx_data[tx_idx];
+ auto intersect = tx_data.children & bottom_chunk_info.transactions;
auto count = intersect.Count();
if (pick < count) {
- for (auto dep : tx_data.child_deps) {
- auto& dep_data = m_dep_data[dep];
- if (bottom_chunk.chunk_setinfo.transactions[dep_data.child]) {
- if (pick == 0) return Activate(dep);
- --pick;
+ for (auto child_idx : intersect) {
+ if (pick == 0) {
+ m_cost.MergeChunksEnd(/*num_steps=*/num_steps);
+ return Activate(tx_idx, child_idx);
}
+ --pick;
}
+ Assume(false);
break;
}
pick -= count;
}
Assume(false);
- return TxIdx(-1);
+ return INVALID_SET_IDX;
}
- /** Perform an upward or downward merge step, on the specified chunk representative. Returns
- * the representative of the merged chunk, or TxIdx(-1) if no merge took place. */
+ /** Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency
+ * from merge_chunk_idx to chunk_idx (if DownWard). Return the index of the merged chunk. */
template
- TxIdx MergeStep(TxIdx chunk_rep) noexcept
+ SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
{
- /** Information about the chunk that tx_idx is currently in. */
- auto& chunk_data = m_tx_data[chunk_rep];
- SetType chunk_txn = chunk_data.chunk_setinfo.transactions;
- // Iterate over all transactions in the chunk, figuring out which other chunk each
- // depends on, but only testing each other chunk once. For those depended-on chunks,
+ if constexpr (DownWard) {
+ return MergeChunks(chunk_idx, merge_chunk_idx);
+ } else {
+ return MergeChunks(merge_chunk_idx, chunk_idx);
+ }
+ }
+
+ /** Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none. */
+ template
+ SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
+ {
+ m_cost.PickMergeCandidateBegin();
+ /** Information about the chunk. */
+ Assume(m_chunk_idxs[chunk_idx]);
+ auto& chunk_info = m_set_info[chunk_idx];
+ // Iterate over all chunks reachable from this one. For those depended-on chunks,
// remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one.
// If multiple equal-feerate candidate chunks to merge with exist, pick a random one
// among them.
- /** Which transactions have been reached from this chunk already. Initialize with the
- * chunk itself, so internal dependencies within the chunk are ignored. */
- SetType explored = chunk_txn;
/** The minimum feerate (if downward) or maximum feerate (if upward) to consider when
* looking for candidate chunks to merge with. Initially, this is the original chunk's
* feerate, but is updated to be the current best candidate whenever one is found. */
- FeeFrac best_other_chunk_feerate = chunk_data.chunk_setinfo.feerate;
- /** The representative for the best candidate chunk to merge with. -1 if none. */
- TxIdx best_other_chunk_rep = TxIdx(-1);
+ FeeFrac best_other_chunk_feerate = chunk_info.feerate;
+ /** The chunk index for the best candidate chunk to merge with. INVALID_SET_IDX if none. */
+ SetIdx best_other_chunk_idx = INVALID_SET_IDX;
/** We generate random tiebreak values to pick between equal-feerate candidate chunks.
* This variable stores the tiebreak of the current best candidate. */
uint64_t best_other_chunk_tiebreak{0};
- for (auto tx : chunk_txn) {
- auto& tx_data = m_tx_data[tx];
- /** The transactions reached by following dependencies from tx that have not been
- * explored before. */
- auto newly_reached = (DownWard ? tx_data.children : tx_data.parents) - explored;
- explored |= newly_reached;
- while (newly_reached.Any()) {
- // Find a chunk inside newly_reached, and remove it from newly_reached.
- auto reached_chunk_rep = m_tx_data[newly_reached.First()].chunk_rep;
- auto& reached_chunk = m_tx_data[reached_chunk_rep].chunk_setinfo;
- newly_reached -= reached_chunk.transactions;
- // See if it has an acceptable feerate.
- auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk.feerate)
- : FeeRateCompare(reached_chunk.feerate, best_other_chunk_feerate);
- if (cmp > 0) continue;
- uint64_t tiebreak = m_rng.rand64();
- if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
- best_other_chunk_feerate = reached_chunk.feerate;
- best_other_chunk_rep = reached_chunk_rep;
- best_other_chunk_tiebreak = tiebreak;
- }
+
+ /** Which parent/child transactions we still need to process the chunks for. */
+ auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first;
+ unsigned steps = 0;
+ while (todo.Any()) {
+ ++steps;
+ // Find a chunk for a transaction in todo, and remove all its transactions from todo.
+ auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx;
+ auto& reached_chunk_info = m_set_info[reached_chunk_idx];
+ todo -= reached_chunk_info.transactions;
+ // See if it has an acceptable feerate.
+ auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk_info.feerate)
+ : FeeRateCompare(reached_chunk_info.feerate, best_other_chunk_feerate);
+ if (cmp > 0) continue;
+ uint64_t tiebreak = m_rng.rand64();
+ if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
+ best_other_chunk_feerate = reached_chunk_info.feerate;
+ best_other_chunk_idx = reached_chunk_idx;
+ best_other_chunk_tiebreak = tiebreak;
}
}
- // Stop if there are no candidate chunks to merge with.
- if (best_other_chunk_rep == TxIdx(-1)) return TxIdx(-1);
- if constexpr (DownWard) {
- chunk_rep = MergeChunks(chunk_rep, best_other_chunk_rep);
- } else {
- chunk_rep = MergeChunks(best_other_chunk_rep, chunk_rep);
- }
- Assume(chunk_rep != TxIdx(-1));
- return chunk_rep;
+ Assume(steps <= m_set_info.size());
+
+ m_cost.PickMergeCandidateEnd(/*num_steps=*/steps);
+ return best_other_chunk_idx;
}
+ /** Perform an upward or downward merge step, on the specified chunk. Returns the merged chunk,
+ * or INVALID_SET_IDX if no merge took place. */
+ template
+ SetIdx MergeStep(SetIdx chunk_idx) noexcept
+ {
+ auto merge_chunk_idx = PickMergeCandidate(chunk_idx);
+ if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX;
+ chunk_idx = MergeChunksDirected(chunk_idx, merge_chunk_idx);
+ Assume(chunk_idx != INVALID_SET_IDX);
+ return chunk_idx;
+ }
- /** Perform an upward or downward merge sequence on the specified transaction. */
+ /** Perform an upward or downward merge sequence on the specified chunk. */
template
- void MergeSequence(TxIdx tx_idx) noexcept
+ void MergeSequence(SetIdx chunk_idx) noexcept
{
- auto chunk_rep = m_tx_data[tx_idx].chunk_rep;
+ Assume(m_chunk_idxs[chunk_idx]);
while (true) {
- auto merged_rep = MergeStep(chunk_rep);
- if (merged_rep == TxIdx(-1)) break;
- chunk_rep = merged_rep;
+ auto merged_chunk_idx = MergeStep(chunk_idx);
+ if (merged_chunk_idx == INVALID_SET_IDX) break;
+ chunk_idx = merged_chunk_idx;
+ }
+ // Add the chunk to the queue of improvable chunks, if it wasn't already there.
+ if (!m_suboptimal_idxs[chunk_idx]) {
+ m_suboptimal_idxs.Set(chunk_idx);
+ m_suboptimal_chunks.push_back(chunk_idx);
}
- // Add the chunk to the queue of improvable chunks.
- m_suboptimal_chunks.push_back(chunk_rep);
}
/** Split a chunk, and then merge the resulting two chunks to make the graph topological
* again. */
- void Improve(DepIdx dep_idx) noexcept
+ void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
{
- auto& dep_data = m_dep_data[dep_idx];
- Assume(dep_data.active);
// Deactivate the specified dependency, splitting it into two new chunks: a top containing
// the parent, and a bottom containing the child. The top should have a higher feerate.
- Deactivate(dep_idx);
+ auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx);
// At this point we have exactly two chunks which may violate topology constraints (the
- // parent chunk and child chunk that were produced by deactivating dep_idx). We can fix
+ // parent chunk and child chunk that were produced by deactivation). We can fix
// these using just merge sequences, one upwards and one downwards, avoiding the need for a
// full MakeTopological.
+ const auto& parent_reachable = m_reachable[parent_chunk_idx].first;
+ const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
+ if (parent_reachable.Overlaps(child_chunk_txn)) {
+ // The parent chunk has a dependency on a transaction in the child chunk. In this case,
+ // the parent needs to merge back with the child chunk (a self-merge), and no other
+ // merges are needed. Special-case this, so the overhead of PickMergeCandidate and
+ // MergeSequence can be avoided.
+
+ // In the self-merge, the roles reverse: the parent chunk (from the split) depends
+ // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the
+ // "bottom" for MergeChunks.
+ auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
+ if (!m_suboptimal_idxs[merged_chunk_idx]) {
+ m_suboptimal_idxs.Set(merged_chunk_idx);
+ m_suboptimal_chunks.push_back(merged_chunk_idx);
+ }
+ } else {
+ // Merge the top chunk with lower-feerate chunks it depends on.
+ MergeSequence(parent_chunk_idx);
+ // Merge the bottom chunk with higher-feerate chunks that depend on it.
+ MergeSequence(child_chunk_idx);
+ }
+ }
- // Merge the top chunk with lower-feerate chunks it depends on (which may be the bottom it
- // was just split from, or other pre-existing chunks).
- MergeSequence(dep_data.parent);
- // Merge the bottom chunk with higher-feerate chunks that depend on it.
- MergeSequence(dep_data.child);
+ /** Determine the next chunk to optimize, or INVALID_SET_IDX if none. */
+ SetIdx PickChunkToOptimize() noexcept
+ {
+ m_cost.PickChunkToOptimizeBegin();
+ unsigned steps{0};
+ while (!m_suboptimal_chunks.empty()) {
+ ++steps;
+ // Pop an entry from the potentially-suboptimal chunk queue.
+ SetIdx chunk_idx = m_suboptimal_chunks.front();
+ Assume(m_suboptimal_idxs[chunk_idx]);
+ m_suboptimal_idxs.Reset(chunk_idx);
+ m_suboptimal_chunks.pop_front();
+ if (m_chunk_idxs[chunk_idx]) {
+ m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
+ return chunk_idx;
+ }
+ // If what was popped is not currently a chunk, continue. This may
+ // happen when a split chunk merges in Improve() with one or more existing chunks that
+ // are themselves on the suboptimal queue already.
+ }
+ m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
+ return INVALID_SET_IDX;
+ }
+
+ /** Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none. */
+ std::pair PickDependencyToSplit(SetIdx chunk_idx) noexcept
+ {
+ m_cost.PickDependencyToSplitBegin();
+ Assume(m_chunk_idxs[chunk_idx]);
+ auto& chunk_info = m_set_info[chunk_idx];
+
+ // Remember the best dependency {par, chl} seen so far.
+ std::pair candidate_dep = {TxIdx(-1), TxIdx(-1)};
+ uint64_t candidate_tiebreak = 0;
+ // Iterate over all transactions.
+ for (auto tx_idx : chunk_info.transactions) {
+ const auto& tx_data = m_tx_data[tx_idx];
+ // Iterate over all active child dependencies of the transaction.
+ for (auto child_idx : tx_data.active_children) {
+ auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
+ // Skip if this dependency is ineligible (the top chunk that would be created
+ // does not have higher feerate than the chunk it is currently part of).
+ auto cmp = FeeRateCompare(dep_top_info.feerate, chunk_info.feerate);
+ if (cmp <= 0) continue;
+ // Generate a random tiebreak for this dependency, and reject it if its tiebreak
+ // is worse than the best so far. This means that among all eligible
+ // dependencies, a uniformly random one will be chosen.
+ uint64_t tiebreak = m_rng.rand64();
+ if (tiebreak < candidate_tiebreak) continue;
+ // Remember this as our (new) candidate dependency.
+ candidate_dep = {tx_idx, child_idx};
+ candidate_tiebreak = tiebreak;
+ }
+ }
+ m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count());
+ return candidate_dep;
}
public:
/** Construct a spanning forest for the given DepGraph, with every transaction in its own chunk
* (not topological). */
- explicit SpanningForestState(const DepGraph& depgraph LIFETIMEBOUND, uint64_t rng_seed) noexcept :
- m_rng(rng_seed), m_depgraph(depgraph)
+ explicit SpanningForestState(const DepGraph& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept :
+ m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost)
{
+ m_cost.InitializeBegin();
m_transaction_idxs = depgraph.Positions();
auto num_transactions = m_transaction_idxs.Count();
m_tx_data.resize(depgraph.PositionRange());
- // Reserve the maximum number of (reserved) dependencies the cluster can have, so
- // m_dep_data won't need any reallocations during construction. For a cluster with N
- // transactions, the worst case consists of two sets of transactions, the parents and the
- // children, where each child depends on each parent and nothing else. For even N, both
- // sets can be sized N/2, which means N^2/4 dependencies. For odd N, one can be (N + 1)/2
- // and the other can be (N - 1)/2, meaning (N^2 - 1)/4 dependencies. Because N^2 is odd in
- // this case, N^2/4 (with rounding-down division) is the correct value in both cases.
- m_dep_data.reserve((num_transactions * num_transactions) / 4);
- for (auto tx : m_transaction_idxs) {
+ m_set_info.resize(num_transactions);
+ m_reachable.resize(num_transactions);
+ size_t num_chunks = 0;
+ size_t num_deps = 0;
+ for (auto tx_idx : m_transaction_idxs) {
// Fill in transaction data.
- auto& tx_data = m_tx_data[tx];
- tx_data.chunk_rep = tx;
- tx_data.chunk_setinfo.transactions = SetType::Singleton(tx);
- tx_data.chunk_setinfo.feerate = depgraph.FeeRate(tx);
- // Add its dependencies.
- SetType parents = depgraph.GetReducedParents(tx);
- for (auto par : parents) {
- auto& par_tx_data = m_tx_data[par];
- auto dep_idx = m_dep_data.size();
- // Construct new dependency.
- auto& dep = m_dep_data.emplace_back();
- dep.active = false;
- dep.parent = par;
- dep.child = tx;
- // Add it as parent of the child.
- tx_data.parents.Set(par);
- // Add it as child of the parent.
- par_tx_data.child_deps.push_back(dep_idx);
- par_tx_data.children.Set(tx);
+ auto& tx_data = m_tx_data[tx_idx];
+ tx_data.parents = depgraph.GetReducedParents(tx_idx);
+ for (auto parent_idx : tx_data.parents) {
+ m_tx_data[parent_idx].children.Set(tx_idx);
}
+ num_deps += tx_data.parents.Count();
+ // Create a singleton chunk for it.
+ tx_data.chunk_idx = num_chunks;
+ m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx);
+ }
+ // Set the reachable transactions for each chunk to the transactions' parents and children.
+ for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) {
+ auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()];
+ m_reachable[chunk_idx].first = tx_data.parents;
+ m_reachable[chunk_idx].second = tx_data.children;
}
+ Assume(num_chunks == num_transactions);
+ // Mark all chunk sets as chunks.
+ m_chunk_idxs = SetType::Fill(num_chunks);
+ m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps);
}
/** Load an existing linearization. Must be called immediately after constructor. The result is
@@ -1018,12 +1210,12 @@ class SpanningForestState
void LoadLinearization(std::span old_linearization) noexcept
{
// Add transactions one by one, in order of existing linearization.
- for (DepGraphIndex tx : old_linearization) {
- auto chunk_rep = m_tx_data[tx].chunk_rep;
+ for (DepGraphIndex tx_idx : old_linearization) {
+ auto chunk_idx = m_tx_data[tx_idx].chunk_idx;
// Merge the chunk upwards, as long as merging succeeds.
while (true) {
- chunk_rep = MergeStep(chunk_rep);
- if (chunk_rep == TxIdx(-1)) break;
+ chunk_idx = MergeStep(chunk_idx);
+ if (chunk_idx == INVALID_SET_IDX) break;
}
}
}
@@ -1031,131 +1223,129 @@ class SpanningForestState
/** Make state topological. Can be called after constructing, or after LoadLinearization. */
void MakeTopological() noexcept
{
- for (auto tx : m_transaction_idxs) {
- auto& tx_data = m_tx_data[tx];
- if (tx_data.chunk_rep == tx) {
- m_suboptimal_chunks.emplace_back(tx);
- // Randomize the initial order of suboptimal chunks in the queue.
- TxIdx j = m_rng.randrange(m_suboptimal_chunks.size());
- if (j != m_suboptimal_chunks.size() - 1) {
- std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
- }
+ m_cost.MakeTopologicalBegin();
+ Assume(m_suboptimal_chunks.empty());
+ /** What direction to initially merge chunks in; one of the two directions is enough. This
+ * is sufficient because if a non-topological inactive dependency exists between two
+ * chunks, at least one of the two chunks will eventually be processed in a direction that
+ * discovers it - either the lower chunk tries upward, or the upper chunk tries downward.
+ * Chunks that are the result of the merging are always tried in both directions. */
+ unsigned init_dir = m_rng.randbool();
+ /** Which chunks are the result of merging, and thus need merge attempts in both
+ * directions. */
+ SetType merged_chunks;
+ // Mark chunks as suboptimal.
+ m_suboptimal_idxs = m_chunk_idxs;
+ for (auto chunk_idx : m_chunk_idxs) {
+ m_suboptimal_chunks.emplace_back(chunk_idx);
+ // Randomize the initial order of suboptimal chunks in the queue.
+ SetIdx j = m_rng.randrange(m_suboptimal_chunks.size());
+ if (j != m_suboptimal_chunks.size() - 1) {
+ std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
}
}
+ unsigned chunks = m_chunk_idxs.Count();
+ unsigned steps = 0;
while (!m_suboptimal_chunks.empty()) {
+ ++steps;
// Pop an entry from the potentially-suboptimal chunk queue.
- TxIdx chunk = m_suboptimal_chunks.front();
+ SetIdx chunk_idx = m_suboptimal_chunks.front();
m_suboptimal_chunks.pop_front();
- auto& chunk_data = m_tx_data[chunk];
- // If what was popped is not currently a chunk representative, continue. This may
+ Assume(m_suboptimal_idxs[chunk_idx]);
+ m_suboptimal_idxs.Reset(chunk_idx);
+ // If what was popped is not currently a chunk, continue. This may
// happen when it was merged with something else since being added.
- if (chunk_data.chunk_rep != chunk) continue;
+ if (!m_chunk_idxs[chunk_idx]) continue;
+ /** What direction(s) to attempt merging in. 1=up, 2=down, 3=both. */
+ unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1;
int flip = m_rng.randbool();
for (int i = 0; i < 2; ++i) {
if (i ^ flip) {
+ if (!(direction & 1)) continue;
// Attempt to merge the chunk upwards.
- auto result_up = MergeStep(chunk);
- if (result_up != TxIdx(-1)) {
- m_suboptimal_chunks.push_back(result_up);
+ auto result_up = MergeStep(chunk_idx);
+ if (result_up != INVALID_SET_IDX) {
+ if (!m_suboptimal_idxs[result_up]) {
+ m_suboptimal_idxs.Set(result_up);
+ m_suboptimal_chunks.push_back(result_up);
+ }
+ merged_chunks.Set(result_up);
break;
}
} else {
+ if (!(direction & 2)) continue;
// Attempt to merge the chunk downwards.
- auto result_down = MergeStep(chunk);
- if (result_down != TxIdx(-1)) {
- m_suboptimal_chunks.push_back(result_down);
+ auto result_down = MergeStep(chunk_idx);
+ if (result_down != INVALID_SET_IDX) {
+ if (!m_suboptimal_idxs[result_down]) {
+ m_suboptimal_idxs.Set(result_down);
+ m_suboptimal_chunks.push_back(result_down);
+ }
+ merged_chunks.Set(result_down);
break;
}
}
}
}
+ m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps);
}
/** Initialize the data structure for optimization. It must be topological already. */
void StartOptimizing() noexcept
{
+ m_cost.StartOptimizingBegin();
+ Assume(m_suboptimal_chunks.empty());
// Mark chunks suboptimal.
- for (auto tx : m_transaction_idxs) {
- auto& tx_data = m_tx_data[tx];
- if (tx_data.chunk_rep == tx) {
- m_suboptimal_chunks.push_back(tx);
- // Randomize the initial order of suboptimal chunks in the queue.
- TxIdx j = m_rng.randrange(m_suboptimal_chunks.size());
- if (j != m_suboptimal_chunks.size() - 1) {
- std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
- }
+ m_suboptimal_idxs = m_chunk_idxs;
+ for (auto chunk_idx : m_chunk_idxs) {
+ m_suboptimal_chunks.push_back(chunk_idx);
+ // Randomize the initial order of suboptimal chunks in the queue.
+ SetIdx j = m_rng.randrange(m_suboptimal_chunks.size());
+ if (j != m_suboptimal_chunks.size() - 1) {
+ std::swap(m_suboptimal_chunks.back(), m_suboptimal_chunks[j]);
}
}
+ m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size());
}
/** Try to improve the forest. Returns false if it is optimal, true otherwise. */
bool OptimizeStep() noexcept
{
- while (!m_suboptimal_chunks.empty()) {
- // Pop an entry from the potentially-suboptimal chunk queue.
- TxIdx chunk = m_suboptimal_chunks.front();
- m_suboptimal_chunks.pop_front();
- auto& chunk_data = m_tx_data[chunk];
- // If what was popped is not currently a chunk representative, continue. This may
- // happen when a split chunk merges in Improve() with one or more existing chunks that
- // are themselves on the suboptimal queue already.
- if (chunk_data.chunk_rep != chunk) continue;
- // Remember the best dependency seen so far.
- DepIdx candidate_dep = DepIdx(-1);
- uint64_t candidate_tiebreak = 0;
- // Iterate over all transactions.
- for (auto tx : chunk_data.chunk_setinfo.transactions) {
- const auto& tx_data = m_tx_data[tx];
- // Iterate over all active child dependencies of the transaction.
- const auto children = std::span{tx_data.child_deps};
- for (DepIdx dep_idx : children) {
- const auto& dep_data = m_dep_data[dep_idx];
- if (!dep_data.active) continue;
- // Skip if this dependency is ineligible (the top chunk that would be created
- // does not have higher feerate than the chunk it is currently part of).
- auto cmp = FeeRateCompare(dep_data.top_setinfo.feerate, chunk_data.chunk_setinfo.feerate);
- if (cmp <= 0) continue;
- // Generate a random tiebreak for this dependency, and reject it if its tiebreak
- // is worse than the best so far. This means that among all eligible
- // dependencies, a uniformly random one will be chosen.
- uint64_t tiebreak = m_rng.rand64();
- if (tiebreak < candidate_tiebreak) continue;
- // Remember this as our (new) candidate dependency.
- candidate_dep = dep_idx;
- candidate_tiebreak = tiebreak;
- }
- }
- // If a candidate with positive gain was found, deactivate it and then make the state
- // topological again with a sequence of merges.
- if (candidate_dep != DepIdx(-1)) Improve(candidate_dep);
- // Stop processing for now, even if nothing was activated, as the loop above may have
- // had a nontrivial cost.
+ auto chunk_idx = PickChunkToOptimize();
+ if (chunk_idx == INVALID_SET_IDX) {
+ // No improvable chunk was found, we are done.
+ return false;
+ }
+ auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx);
+ if (parent_idx == TxIdx(-1)) {
+ // Nothing to improve in chunk_idx. Need to continue with other chunks, if any.
return !m_suboptimal_chunks.empty();
}
- // No improvable chunk was found, we are done.
- return false;
+ // Deactivate the found dependency and then make the state topological again with a
+ // sequence of merges.
+ Improve(parent_idx, child_idx);
+ return true;
}
/** Initialize data structure for minimizing the chunks. Can only be called if state is known
* to be optimal. OptimizeStep() cannot be called anymore afterwards. */
void StartMinimizing() noexcept
{
+ m_cost.StartMinimizingBegin();
m_nonminimal_chunks.clear();
m_nonminimal_chunks.reserve(m_transaction_idxs.Count());
// Gather all chunks, and for each, add it with a random pivot in it, and a random initial
// direction, to m_nonminimal_chunks.
- for (auto tx : m_transaction_idxs) {
- auto& tx_data = m_tx_data[tx];
- if (tx_data.chunk_rep == tx) {
- TxIdx pivot_idx = PickRandomTx(tx_data.chunk_setinfo.transactions);
- m_nonminimal_chunks.emplace_back(tx, pivot_idx, m_rng.randbits<1>());
- // Randomize the initial order of nonminimal chunks in the queue.
- TxIdx j = m_rng.randrange(m_nonminimal_chunks.size());
- if (j != m_nonminimal_chunks.size() - 1) {
- std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]);
- }
+ for (auto chunk_idx : m_chunk_idxs) {
+ TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions);
+ m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>());
+ // Randomize the initial order of nonminimal chunks in the queue.
+ SetIdx j = m_rng.randrange(m_nonminimal_chunks.size());
+ if (j != m_nonminimal_chunks.size() - 1) {
+ std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[j]);
}
}
+ m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size());
}
/** Try to reduce a chunk's size. Returns false if all chunks are minimal, true otherwise. */
@@ -1163,11 +1353,11 @@ class SpanningForestState
{
// If the queue of potentially-non-minimal chunks is empty, we are done.
if (m_nonminimal_chunks.empty()) return false;
+ m_cost.MinimizeStepBegin();
// Pop an entry from the potentially-non-minimal chunk queue.
- auto [chunk_rep, pivot_idx, flags] = m_nonminimal_chunks.front();
+ auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front();
m_nonminimal_chunks.pop_front();
- auto& chunk_data = m_tx_data[chunk_rep];
- Assume(chunk_data.chunk_rep == chunk_rep);
+ auto& chunk_info = m_set_info[chunk_idx];
/** Whether to move the pivot down rather than up. */
bool move_pivot_down = flags & 1;
/** Whether this is already the second stage. */
@@ -1175,32 +1365,31 @@ class SpanningForestState
// Find a random dependency whose top and bottom set feerates are equal, and which has
// pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down).
- DepIdx candidate_dep = DepIdx(-1);
+ std::pair candidate_dep;
uint64_t candidate_tiebreak{0};
bool have_any = false;
// Iterate over all transactions.
- for (auto tx_idx : chunk_data.chunk_setinfo.transactions) {
+ for (auto tx_idx : chunk_info.transactions) {
const auto& tx_data = m_tx_data[tx_idx];
// Iterate over all active child dependencies of the transaction.
- for (auto dep_idx : tx_data.child_deps) {
- auto& dep_data = m_dep_data[dep_idx];
- // Skip inactive child dependencies.
- if (!dep_data.active) continue;
+ for (auto child_idx : tx_data.active_children) {
+ const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
// Skip if this dependency does not have equal top and bottom set feerates. Note
// that the top cannot have higher feerate than the bottom, or OptimizeSteps would
// have dealt with it.
- if (dep_data.top_setinfo.feerate << chunk_data.chunk_setinfo.feerate) continue;
+ if (dep_top_info.feerate << chunk_info.feerate) continue;
have_any = true;
// Skip if this dependency does not have pivot in the right place.
- if (move_pivot_down == dep_data.top_setinfo.transactions[pivot_idx]) continue;
+ if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue;
// Remember this as our chosen dependency if it has a better tiebreak.
uint64_t tiebreak = m_rng.rand64() | 1;
if (tiebreak > candidate_tiebreak) {
candidate_tiebreak = tiebreak;
- candidate_dep = dep_idx;
+ candidate_dep = {tx_idx, child_idx};
}
}
}
+ m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count());
// If no dependencies have equal top and bottom set feerate, this chunk is minimal.
if (!have_any) return true;
// If all found dependencies have the pivot in the wrong place, try moving it in the other
@@ -1208,23 +1397,25 @@ class SpanningForestState
if (candidate_tiebreak == 0) {
// Switch to other direction, and to second phase.
flags ^= 3;
- if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_rep, pivot_idx, flags);
+ if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags);
return true;
}
// Otherwise, deactivate the dependency that was found.
- Deactivate(candidate_dep);
- auto& dep_data = m_dep_data[candidate_dep];
- auto parent_chunk_rep = m_tx_data[dep_data.parent].chunk_rep;
- auto child_chunk_rep = m_tx_data[dep_data.child].chunk_rep;
- // Try to activate a dependency between the new bottom and the new top (opposite from the
+ auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second);
+ // Determine if there is a dependency from the new bottom to the new top (opposite from the
// dependency that was just deactivated).
- auto merged_chunk_rep = MergeChunks(child_chunk_rep, parent_chunk_rep);
- if (merged_chunk_rep != TxIdx(-1)) {
- // A self-merge happened.
- // Re-insert the chunk into the queue, in the same direction. Note that the chunk_rep
+ auto& parent_reachable = m_reachable[parent_chunk_idx].first;
+ auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
+ if (parent_reachable.Overlaps(child_chunk_txn)) {
+ // A self-merge is needed. Note that the child_chunk_idx is the top, and
+ // parent_chunk_idx is the bottom, because we activate a dependency in the reverse
+ // direction compared to the deactivation above.
+ auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
+ // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx
// will have changed.
- m_nonminimal_chunks.emplace_back(merged_chunk_rep, pivot_idx, flags);
+ m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags);
+ m_cost.MinimizeStepEnd(/*split=*/false);
} else {
// No self-merge happens, and thus we have found a way to split the chunk. Create two
// smaller chunks, and add them to the queue. The one that contains the current pivot
@@ -1234,17 +1425,18 @@ class SpanningForestState
// possible already. The new chunk without the current pivot gets a new randomly-chosen
// one.
if (move_pivot_down) {
- auto parent_pivot_idx = PickRandomTx(m_tx_data[parent_chunk_rep].chunk_setinfo.transactions);
- m_nonminimal_chunks.emplace_back(parent_chunk_rep, parent_pivot_idx, m_rng.randbits<1>());
- m_nonminimal_chunks.emplace_back(child_chunk_rep, pivot_idx, flags);
+ auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions);
+ m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>());
+ m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags);
} else {
- auto child_pivot_idx = PickRandomTx(m_tx_data[child_chunk_rep].chunk_setinfo.transactions);
- m_nonminimal_chunks.emplace_back(parent_chunk_rep, pivot_idx, flags);
- m_nonminimal_chunks.emplace_back(child_chunk_rep, child_pivot_idx, m_rng.randbits<1>());
+ auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions);
+ m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags);
+ m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>());
}
if (m_rng.randbool()) {
std::swap(m_nonminimal_chunks.back(), m_nonminimal_chunks[m_nonminimal_chunks.size() - 2]);
}
+ m_cost.MinimizeStepEnd(/*split=*/true);
}
return true;
}
@@ -1265,42 +1457,38 @@ class SpanningForestState
* - smallest tx size first
* - the lowest transaction, by fallback_order, first
*/
- std::vector GetLinearization(const StrongComparator auto& fallback_order) const noexcept
+ std::vector GetLinearization(const StrongComparator auto& fallback_order) noexcept
{
+ m_cost.GetLinearizationBegin();
/** The output linearization. */
std::vector ret;
- ret.reserve(m_transaction_idxs.Count());
- /** A heap with all chunks (by representative) that can currently be included, sorted by
+ ret.reserve(m_set_info.size());
+ /** A heap with all chunks (by set index) that can currently be included, sorted by
* chunk feerate (high to low), chunk size (small to large), and by least maximum element
* according to the fallback order (which is the second pair element). */
- std::vector> ready_chunks;
- /** Information about chunks:
- * - The first value is only used for chunk representatives, and counts the number of
- * unmet dependencies this chunk has on other chunks (not including dependencies within
- * the chunk itself).
- * - The second value is the number of unmet dependencies overall.
- */
- std::vector> chunk_deps(m_tx_data.size(), {0, 0});
- /** The set of all chunk representatives. */
- SetType chunk_reps;
+ std::vector> ready_chunks;
+ /** For every chunk, indexed by SetIdx, the number of unmet dependencies the chunk has on
+ * other chunks (not including dependencies within the chunk itself). */
+ std::vector chunk_deps(m_set_info.size(), 0);
+ /** For every transaction, indexed by TxIdx, the number of unmet dependencies the
+ * transaction has. */
+ std::vector tx_deps(m_tx_data.size(), 0);
/** A heap with all transactions within the current chunk that can be included, sorted by
* tx feerate (high to low), tx size (small to large), and fallback order. */
std::vector ready_tx;
- // Populate chunk_deps[c] with the number of {out-of-chunk dependencies, dependencies} the
- // child has.
+ // Populate chunk_deps and tx_deps.
+ unsigned num_deps{0};
for (TxIdx chl_idx : m_transaction_idxs) {
const auto& chl_data = m_tx_data[chl_idx];
- chunk_deps[chl_idx].second = chl_data.parents.Count();
- auto chl_chunk_rep = chl_data.chunk_rep;
- chunk_reps.Set(chl_chunk_rep);
- for (auto par_idx : chl_data.parents) {
- auto par_chunk_rep = m_tx_data[par_idx].chunk_rep;
- chunk_deps[chl_chunk_rep].first += (par_chunk_rep != chl_chunk_rep);
- }
+ tx_deps[chl_idx] = chl_data.parents.Count();
+ num_deps += tx_deps[chl_idx];
+ auto chl_chunk_idx = chl_data.chunk_idx;
+ auto& chl_chunk_info = m_set_info[chl_chunk_idx];
+ chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count();
}
/** Function to compute the highest element of a chunk, by fallback_order. */
- auto max_fallback_fn = [&](TxIdx chunk_rep) noexcept {
- auto& chunk = m_tx_data[chunk_rep].chunk_setinfo.transactions;
+ auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept {
+ auto& chunk = m_set_info[chunk_idx].transactions;
auto it = chunk.begin();
DepGraphIndex ret = *it;
++it;
@@ -1338,8 +1526,8 @@ class SpanningForestState
// Bail out for identical chunks.
if (a.first == b.first) return false;
// First sort by increasing chunk feerate.
- auto& chunk_feerate_a = m_tx_data[a.first].chunk_setinfo.feerate;
- auto& chunk_feerate_b = m_tx_data[b.first].chunk_setinfo.feerate;
+ auto& chunk_feerate_a = m_set_info[a.first].feerate;
+ auto& chunk_feerate_b = m_set_info[b.first].feerate;
auto feerate_cmp = FeeRateCompare(chunk_feerate_a, chunk_feerate_b);
if (feerate_cmp != 0) return feerate_cmp < 0;
// Then by decreasing chunk size.
@@ -1354,24 +1542,23 @@ class SpanningForestState
return a.second < b.second;
};
// Construct a heap with all chunks that have no out-of-chunk dependencies.
- for (TxIdx chunk_rep : chunk_reps) {
- if (chunk_deps[chunk_rep].first == 0) {
- ready_chunks.emplace_back(chunk_rep, max_fallback_fn(chunk_rep));
+ for (SetIdx chunk_idx : m_chunk_idxs) {
+ if (chunk_deps[chunk_idx] == 0) {
+ ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx));
}
}
std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
// Pop chunks off the heap.
while (!ready_chunks.empty()) {
- auto [chunk_rep, _rnd] = ready_chunks.front();
+ auto [chunk_idx, _rnd] = ready_chunks.front();
std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
ready_chunks.pop_back();
- Assume(m_tx_data[chunk_rep].chunk_rep == chunk_rep);
- Assume(chunk_deps[chunk_rep].first == 0);
- const auto& chunk_txn = m_tx_data[chunk_rep].chunk_setinfo.transactions;
+ Assume(chunk_deps[chunk_idx] == 0);
+ const auto& chunk_txn = m_set_info[chunk_idx].transactions;
// Build heap of all includable transactions in chunk.
Assume(ready_tx.empty());
for (TxIdx tx_idx : chunk_txn) {
- if (chunk_deps[tx_idx].second == 0) ready_tx.push_back(tx_idx);
+ if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx);
}
Assume(!ready_tx.empty());
std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
@@ -1389,25 +1576,26 @@ class SpanningForestState
for (TxIdx chl_idx : tx_data.children) {
auto& chl_data = m_tx_data[chl_idx];
// Decrement tx dependency count.
- Assume(chunk_deps[chl_idx].second > 0);
- if (--chunk_deps[chl_idx].second == 0 && chunk_txn[chl_idx]) {
+ Assume(tx_deps[chl_idx] > 0);
+ if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) {
// Child tx has no dependencies left, and is in this chunk. Add it to the tx heap.
ready_tx.push_back(chl_idx);
std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
}
// Decrement chunk dependency count if this is out-of-chunk dependency.
- if (chl_data.chunk_rep != chunk_rep) {
- Assume(chunk_deps[chl_data.chunk_rep].first > 0);
- if (--chunk_deps[chl_data.chunk_rep].first == 0) {
+ if (chl_data.chunk_idx != chunk_idx) {
+ Assume(chunk_deps[chl_data.chunk_idx] > 0);
+ if (--chunk_deps[chl_data.chunk_idx] == 0) {
// Child chunk has no dependencies left. Add it to the chunk heap.
- ready_chunks.emplace_back(chl_data.chunk_rep, max_fallback_fn(chl_data.chunk_rep));
+ ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx));
std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
}
}
}
}
}
- Assume(ret.size() == m_transaction_idxs.Count());
+ Assume(ret.size() == m_set_info.size());
+ m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps);
return ret;
}
@@ -1427,168 +1615,170 @@ class SpanningForestState
std::vector GetDiagram() const noexcept
{
std::vector ret;
- for (auto tx : m_transaction_idxs) {
- if (m_tx_data[tx].chunk_rep == tx) {
- ret.push_back(m_tx_data[tx].chunk_setinfo.feerate);
- }
+ for (auto chunk_idx : m_chunk_idxs) {
+ ret.push_back(m_set_info[chunk_idx].feerate);
}
std::sort(ret.begin(), ret.end(), std::greater{});
return ret;
}
/** Determine how much work was performed so far. */
- uint64_t GetCost() const noexcept { return m_cost; }
+ uint64_t GetCost() const noexcept { return m_cost.GetCost(); }
/** Verify internal consistency of the data structure. */
- void SanityCheck(const DepGraph& depgraph) const
+ void SanityCheck() const
{
//
// Verify dependency parent/child information, and build list of (active) dependencies.
//
std::vector> expected_dependencies;
- std::vector> all_dependencies;
- std::vector> active_dependencies;
- for (auto parent_idx : depgraph.Positions()) {
- for (auto child_idx : depgraph.GetReducedChildren(parent_idx)) {
+ std::vector> all_dependencies;
+ std::vector> active_dependencies;
+ for (auto parent_idx : m_depgraph.Positions()) {
+ for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) {
expected_dependencies.emplace_back(parent_idx, child_idx);
}
}
- for (DepIdx dep_idx = 0; dep_idx < m_dep_data.size(); ++dep_idx) {
- const auto& dep_data = m_dep_data[dep_idx];
- all_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx);
- // Also add to active_dependencies if it is active.
- if (m_dep_data[dep_idx].active) {
- active_dependencies.emplace_back(dep_data.parent, dep_data.child, dep_idx);
+ for (auto tx_idx : m_transaction_idxs) {
+ for (auto child_idx : m_tx_data[tx_idx].children) {
+ all_dependencies.emplace_back(tx_idx, child_idx);
+ if (m_tx_data[tx_idx].active_children[child_idx]) {
+ active_dependencies.emplace_back(tx_idx, child_idx);
+ }
}
}
std::sort(expected_dependencies.begin(), expected_dependencies.end());
std::sort(all_dependencies.begin(), all_dependencies.end());
- assert(expected_dependencies.size() == all_dependencies.size());
- for (size_t i = 0; i < expected_dependencies.size(); ++i) {
- assert(expected_dependencies[i] ==
- std::make_pair(std::get<0>(all_dependencies[i]),
- std::get<1>(all_dependencies[i])));
- }
+ assert(expected_dependencies == all_dependencies);
//
// Verify the chunks against the list of active dependencies
//
- for (auto tx_idx: depgraph.Positions()) {
- // Only process chunks for now.
- if (m_tx_data[tx_idx].chunk_rep == tx_idx) {
- const auto& chunk_data = m_tx_data[tx_idx];
- // Verify that transactions in the chunk point back to it. This guarantees
- // that chunks are non-overlapping.
- for (auto chunk_tx : chunk_data.chunk_setinfo.transactions) {
- assert(m_tx_data[chunk_tx].chunk_rep == tx_idx);
- }
- // Verify the chunk's transaction set: it must contain the representative, and for
- // every active dependency, if it contains the parent or child, it must contain
- // both. It must have exactly N-1 active dependencies in it, guaranteeing it is
- // acyclic.
- SetType expected_chunk = SetType::Singleton(tx_idx);
- while (true) {
- auto old = expected_chunk;
- size_t active_dep_count{0};
- for (const auto& [par, chl, _dep] : active_dependencies) {
- if (expected_chunk[par] || expected_chunk[chl]) {
- expected_chunk.Set(par);
- expected_chunk.Set(chl);
- ++active_dep_count;
- }
- }
- if (old == expected_chunk) {
- assert(expected_chunk.Count() == active_dep_count + 1);
- break;
+ SetType chunk_cover;
+ for (auto chunk_idx : m_chunk_idxs) {
+ const auto& chunk_info = m_set_info[chunk_idx];
+ // Verify that transactions in the chunk point back to it. This guarantees
+ // that chunks are non-overlapping.
+ for (auto tx_idx : chunk_info.transactions) {
+ assert(m_tx_data[tx_idx].chunk_idx == chunk_idx);
+ }
+ assert(!chunk_cover.Overlaps(chunk_info.transactions));
+ chunk_cover |= chunk_info.transactions;
+ // Verify the chunk's transaction set: start from an arbitrary chunk transaction,
+ // and for every active dependency, if it contains the parent or child, add the
+ // other. It must have exactly N-1 active dependencies in it, guaranteeing it is
+ // acyclic.
+ assert(chunk_info.transactions.Any());
+ SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First());
+ while (true) {
+ auto old = expected_chunk;
+ size_t active_dep_count{0};
+ for (const auto& [par, chl] : active_dependencies) {
+ if (expected_chunk[par] || expected_chunk[chl]) {
+ expected_chunk.Set(par);
+ expected_chunk.Set(chl);
+ ++active_dep_count;
}
}
- assert(chunk_data.chunk_setinfo.transactions == expected_chunk);
- // Verify the chunk's feerate.
- assert(chunk_data.chunk_setinfo.feerate ==
- depgraph.FeeRate(chunk_data.chunk_setinfo.transactions));
+ if (old == expected_chunk) {
+ assert(expected_chunk.Count() == active_dep_count + 1);
+ break;
+ }
}
+ assert(chunk_info.transactions == expected_chunk);
+ // Verify the chunk's feerate.
+ assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions));
+ // Verify the chunk's reachable transactions.
+ assert(m_reachable[chunk_idx] == GetReachable(expected_chunk));
+ // Verify that the chunk's reachable transactions don't include its own transactions.
+ assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions));
+ assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions));
}
+ // Verify that together, the chunks cover all transactions.
+ assert(chunk_cover == m_depgraph.Positions());
//
- // Verify other transaction data.
+ // Verify transaction data.
//
- assert(m_transaction_idxs == depgraph.Positions());
+ assert(m_transaction_idxs == m_depgraph.Positions());
for (auto tx_idx : m_transaction_idxs) {
const auto& tx_data = m_tx_data[tx_idx];
- // Verify it has a valid chunk representative, and that chunk includes this
- // transaction.
- assert(m_tx_data[tx_data.chunk_rep].chunk_rep == tx_data.chunk_rep);
- assert(m_tx_data[tx_data.chunk_rep].chunk_setinfo.transactions[tx_idx]);
+ // Verify it has a valid chunk index, and that chunk includes this transaction.
+ assert(m_chunk_idxs[tx_data.chunk_idx]);
+ assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]);
// Verify parents/children.
- assert(tx_data.parents == depgraph.GetReducedParents(tx_idx));
- assert(tx_data.children == depgraph.GetReducedChildren(tx_idx));
- // Verify list of child dependencies.
- std::vector expected_child_deps;
- for (const auto& [par_idx, chl_idx, dep_idx] : all_dependencies) {
- if (tx_idx == par_idx) {
- assert(tx_data.children[chl_idx]);
- expected_child_deps.push_back(dep_idx);
- }
+ assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx));
+ assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx));
+ // Verify active_children is a subset of children.
+ assert(tx_data.active_children.IsSubsetOf(tx_data.children));
+ // Verify each active child's dep_top_idx points to a valid non-chunk set.
+ for (auto child_idx : tx_data.active_children) {
+ assert(tx_data.dep_top_idx[child_idx] < m_set_info.size());
+ assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]);
}
- std::sort(expected_child_deps.begin(), expected_child_deps.end());
- auto child_deps_copy = tx_data.child_deps;
- std::sort(child_deps_copy.begin(), child_deps_copy.end());
- assert(expected_child_deps == child_deps_copy);
}
//
- // Verify active dependencies' top_setinfo.
+ // Verify active dependencies' top sets.
//
- for (const auto& [par_idx, chl_idx, dep_idx] : active_dependencies) {
- const auto& dep_data = m_dep_data[dep_idx];
- // Verify the top_info's transactions: it must contain the parent, and for every
- // active dependency, except dep_idx itself, if it contains the parent or child, it
- // must contain both.
+ for (const auto& [par_idx, chl_idx] : active_dependencies) {
+ // Verify the top set's transactions: it must contain the parent, and for every
+ // active dependency, except the chl_idx->par_idx dependency itself, if it contains the
+ // parent or child, it must contain both. It must have exactly N-1 active dependencies
+ // in it, guaranteeing it is acyclic.
SetType expected_top = SetType::Singleton(par_idx);
while (true) {
auto old = expected_top;
- for (const auto& [par2_idx, chl2_idx, dep2_idx] : active_dependencies) {
- if (dep2_idx != dep_idx && (expected_top[par2_idx] || expected_top[chl2_idx])) {
+ size_t active_dep_count{0};
+ for (const auto& [par2_idx, chl2_idx] : active_dependencies) {
+ if (par_idx == par2_idx && chl_idx == chl2_idx) continue;
+ if (expected_top[par2_idx] || expected_top[chl2_idx]) {
expected_top.Set(par2_idx);
expected_top.Set(chl2_idx);
+ ++active_dep_count;
}
}
- if (old == expected_top) break;
+ if (old == expected_top) {
+ assert(expected_top.Count() == active_dep_count + 1);
+ break;
+ }
}
assert(!expected_top[chl_idx]);
- assert(dep_data.top_setinfo.transactions == expected_top);
- // Verify the top_info's feerate.
- assert(dep_data.top_setinfo.feerate ==
- depgraph.FeeRate(dep_data.top_setinfo.transactions));
+ auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]];
+ assert(dep_top_info.transactions == expected_top);
+ // Verify the top set's feerate.
+ assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions));
}
//
// Verify m_suboptimal_chunks.
//
+ SetType suboptimal_idxs;
for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) {
- auto tx_idx = m_suboptimal_chunks[i];
- assert(m_transaction_idxs[tx_idx]);
+ auto chunk_idx = m_suboptimal_chunks[i];
+ assert(!suboptimal_idxs[chunk_idx]);
+ suboptimal_idxs.Set(chunk_idx);
}
+ assert(m_suboptimal_idxs == suboptimal_idxs);
//
// Verify m_nonminimal_chunks.
//
- SetType nonminimal_reps;
+ SetType nonminimal_idxs;
for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) {
- auto [chunk_rep, pivot, flags] = m_nonminimal_chunks[i];
- assert(m_tx_data[chunk_rep].chunk_rep == chunk_rep);
- assert(m_tx_data[pivot].chunk_rep == chunk_rep);
- assert(!nonminimal_reps[chunk_rep]);
- nonminimal_reps.Set(chunk_rep);
+ auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i];
+ assert(m_tx_data[pivot].chunk_idx == chunk_idx);
+ assert(!nonminimal_idxs[chunk_idx]);
+ nonminimal_idxs.Set(chunk_idx);
}
- assert(nonminimal_reps.IsSubsetOf(m_transaction_idxs));
+ assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs));
}
};
/** Find or improve a linearization for a cluster.
*
* @param[in] depgraph Dependency graph of the cluster to be linearized.
- * @param[in] max_iterations Upper bound on the amount of work that will be done.
+ * @param[in] max_cost Upper bound on the amount of work that will be done.
* @param[in] rng_seed A random number seed to control search order. This prevents peers
* from predicting exactly which clusters would be hard for us to
* linearize.
@@ -1608,7 +1798,7 @@ class SpanningForestState
template
std::tuple, bool, uint64_t> Linearize(
const DepGraph& depgraph,
- uint64_t max_iterations,
+ uint64_t max_cost,
uint64_t rng_seed,
const StrongComparator auto& fallback_order,
std::span old_linearization = {},
@@ -1624,23 +1814,23 @@ std::tuple, bool, uint64_t> Linearize(
}
// Make improvement steps to it until we hit the max_iterations limit, or an optimal result
// is found.
- if (forest.GetCost() < max_iterations) {
+ if (forest.GetCost() < max_cost) {
forest.StartOptimizing();
do {
if (!forest.OptimizeStep()) break;
- } while (forest.GetCost() < max_iterations);
+ } while (forest.GetCost() < max_cost);
}
// Make chunk minimization steps until we hit the max_iterations limit, or all chunks are
// minimal.
bool optimal = false;
- if (forest.GetCost() < max_iterations) {
+ if (forest.GetCost() < max_cost) {
forest.StartMinimizing();
do {
if (!forest.MinimizeStep()) {
optimal = true;
break;
}
- } while (forest.GetCost() < max_iterations);
+ } while (forest.GetCost() < max_cost);
}
return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()};
}
diff --git a/src/coins.cpp b/src/coins.cpp
index fc33c521617c..25b1ead0c1dd 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -15,6 +15,7 @@ TRACEPOINT_SEMAPHORE(utxocache, spent);
TRACEPOINT_SEMAPHORE(utxocache, uncache);
std::optional CCoinsView::GetCoin(const COutPoint& outpoint) const { return std::nullopt; }
+std::optional CCoinsView::PeekCoin(const COutPoint& outpoint) const { return GetCoin(outpoint); }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
std::vector CCoinsView::GetHeadBlocks() const { return std::vector(); }
void CCoinsView::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& hashBlock)
@@ -31,6 +32,7 @@ bool CCoinsView::HaveCoin(const COutPoint &outpoint) const
CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { }
std::optional CCoinsViewBacked::GetCoin(const COutPoint& outpoint) const { return base->GetCoin(outpoint); }
+std::optional CCoinsViewBacked::PeekCoin(const COutPoint& outpoint) const { return base->PeekCoin(outpoint); }
bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
std::vector CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); }
@@ -39,6 +41,14 @@ void CCoinsViewBacked::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& h
std::unique_ptr CCoinsViewBacked::Cursor() const { return base->Cursor(); }
size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); }
+std::optional CCoinsViewCache::PeekCoin(const COutPoint& outpoint) const
+{
+ if (auto it{cacheCoins.find(outpoint)}; it != cacheCoins.end()) {
+ return it->second.coin.IsSpent() ? std::nullopt : std::optional{it->second.coin};
+ }
+ return base->PeekCoin(outpoint);
+}
+
CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) :
CCoinsViewBacked(baseIn), m_deterministic(deterministic),
cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic), CCoinsMap::key_equal{}, &m_cache_coins_memory_resource)
@@ -50,10 +60,15 @@ size_t CCoinsViewCache::DynamicMemoryUsage() const {
return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage;
}
+std::optional CCoinsViewCache::FetchCoinFromBase(const COutPoint& outpoint) const
+{
+ return base->GetCoin(outpoint);
+}
+
CCoinsMap::iterator CCoinsViewCache::FetchCoin(const COutPoint &outpoint) const {
const auto [ret, inserted] = cacheCoins.try_emplace(outpoint);
if (inserted) {
- if (auto coin{base->GetCoin(outpoint)}) {
+ if (auto coin{FetchCoinFromBase(outpoint)}) {
ret->second.coin = std::move(*coin);
cachedCoinsUsage += ret->second.coin.DynamicMemoryUsage();
Assert(!ret->second.coin.IsSpent());
@@ -98,10 +113,12 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi
fresh = !it->second.IsDirty();
}
if (!inserted) {
- cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
+ Assume(TrySub(m_dirty_count, it->second.IsDirty()));
+ Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage()));
}
it->second.coin = std::move(coin);
CCoinsCacheEntry::SetDirty(*it, m_sentinel);
+ ++m_dirty_count;
if (fresh) CCoinsCacheEntry::SetFresh(*it, m_sentinel);
cachedCoinsUsage += it->second.coin.DynamicMemoryUsage();
TRACEPOINT(utxocache, add,
@@ -117,6 +134,7 @@ void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coi
auto [it, inserted] = cacheCoins.try_emplace(std::move(outpoint), std::move(coin));
if (inserted) {
CCoinsCacheEntry::SetDirty(*it, m_sentinel);
+ ++m_dirty_count;
cachedCoinsUsage += mem_usage;
}
}
@@ -135,7 +153,8 @@ void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool
bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) {
CCoinsMap::iterator it = FetchCoin(outpoint);
if (it == cacheCoins.end()) return false;
- cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
+ Assume(TrySub(m_dirty_count, it->second.IsDirty()));
+ Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage()));
TRACEPOINT(utxocache, spent,
outpoint.hash.data(),
(uint32_t)outpoint.n,
@@ -149,6 +168,7 @@ bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) {
cacheCoins.erase(it);
} else {
CCoinsCacheEntry::SetDirty(*it, m_sentinel);
+ ++m_dirty_count;
it->second.coin.Clear();
}
return true;
@@ -207,8 +227,9 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha
} else {
entry.coin = it->second.coin;
}
- cachedCoinsUsage += entry.coin.DynamicMemoryUsage();
CCoinsCacheEntry::SetDirty(*itUs, m_sentinel);
+ ++m_dirty_count;
+ cachedCoinsUsage += entry.coin.DynamicMemoryUsage();
// We can mark it FRESH in the parent if it was FRESH in the child
// Otherwise it might have just been flushed from the parent's cache
// and already exist in the grandparent
@@ -227,11 +248,12 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha
if (itUs->second.IsFresh() && it->second.coin.IsSpent()) {
// The grandparent cache does not have an entry, and the coin
// has been spent. We can just delete it from the parent cache.
- cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage();
+ Assume(TrySub(m_dirty_count, itUs->second.IsDirty()));
+ Assume(TrySub(cachedCoinsUsage, itUs->second.coin.DynamicMemoryUsage()));
cacheCoins.erase(itUs);
} else {
// A normal modification.
- cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage();
+ Assume(TrySub(cachedCoinsUsage, itUs->second.coin.DynamicMemoryUsage()));
if (cursor.WillErase(*it)) {
// Since this entry will be erased,
// we can move the coin into us instead of copying it
@@ -240,7 +262,10 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha
itUs->second.coin = it->second.coin;
}
cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage();
- CCoinsCacheEntry::SetDirty(*itUs, m_sentinel);
+ if (!itUs->second.IsDirty()) {
+ CCoinsCacheEntry::SetDirty(*itUs, m_sentinel);
+ ++m_dirty_count;
+ }
// NOTE: It isn't safe to mark the coin as FRESH in the parent
// cache. If it already existed and was spent in the parent
// cache then marking it FRESH would prevent that spentness
@@ -253,8 +278,9 @@ void CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& ha
void CCoinsViewCache::Flush(bool reallocate_cache)
{
- auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/true)};
+ auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/true)};
base->BatchWrite(cursor, hashBlock);
+ Assume(m_dirty_count == 0);
cacheCoins.clear();
if (reallocate_cache) {
ReallocateCache();
@@ -264,8 +290,9 @@ void CCoinsViewCache::Flush(bool reallocate_cache)
void CCoinsViewCache::Sync()
{
- auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/false)};
+ auto cursor{CoinsViewCacheCursor(m_dirty_count, m_sentinel, cacheCoins, /*will_erase=*/false)};
base->BatchWrite(cursor, hashBlock);
+ Assume(m_dirty_count == 0);
if (m_sentinel.second.Next() != &m_sentinel) {
/* BatchWrite must clear flags of all entries */
throw std::logic_error("Not all unspent flagged entries were cleared");
@@ -276,6 +303,7 @@ void CCoinsViewCache::Reset() noexcept
{
cacheCoins.clear();
cachedCoinsUsage = 0;
+ m_dirty_count = 0;
SetBestBlock(uint256::ZERO);
}
@@ -283,7 +311,7 @@ void CCoinsViewCache::Uncache(const COutPoint& hash)
{
CCoinsMap::iterator it = cacheCoins.find(hash);
if (it != cacheCoins.end() && !it->second.IsDirty()) {
- cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
+ Assume(TrySub(cachedCoinsUsage, it->second.coin.DynamicMemoryUsage()));
TRACEPOINT(utxocache, uncache,
hash.hash.data(),
(uint32_t)hash.n,
@@ -348,7 +376,7 @@ void CCoinsViewCache::SanityCheck() const
// Count the number of entries actually in the list.
++count_linked;
}
- assert(count_linked == count_dirty);
+ assert(count_dirty == count_linked && count_dirty == m_dirty_count);
assert(recomputed_usage == cachedCoinsUsage);
}
@@ -393,3 +421,8 @@ bool CCoinsViewErrorCatcher::HaveCoin(const COutPoint& outpoint) const
{
return ExecuteBackedWrapper([&]() { return CCoinsViewBacked::HaveCoin(outpoint); }, m_err_callbacks);
}
+
+std::optional CCoinsViewErrorCatcher::PeekCoin(const COutPoint& outpoint) const
+{
+ return ExecuteBackedWrapper>([&]() { return CCoinsViewBacked::PeekCoin(outpoint); }, m_err_callbacks);
+}
diff --git a/src/coins.h b/src/coins.h
index 4b39c0bacd28..08c1886f9307 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#include
#include
@@ -265,10 +266,11 @@ struct CoinsViewCacheCursor
//! This is an optimization compared to erasing all entries as the cursor iterates them when will_erase is set.
//! Calling CCoinsMap::clear() afterwards is faster because a CoinsCachePair cannot be coerced back into a
//! CCoinsMap::iterator to be erased, and must therefore be looked up again by key in the CCoinsMap before being erased.
- CoinsViewCacheCursor(CoinsCachePair& sentinel LIFETIMEBOUND,
+ CoinsViewCacheCursor(size_t& dirty_count LIFETIMEBOUND,
+ CoinsCachePair& sentinel LIFETIMEBOUND,
CCoinsMap& map LIFETIMEBOUND,
bool will_erase) noexcept
- : m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {}
+ : m_dirty_count(dirty_count), m_sentinel(sentinel), m_map(map), m_will_erase(will_erase) {}
inline CoinsCachePair* Begin() const noexcept { return m_sentinel.second.Next(); }
inline CoinsCachePair* End() const noexcept { return &m_sentinel; }
@@ -277,6 +279,7 @@ struct CoinsViewCacheCursor
inline CoinsCachePair* NextAndMaybeErase(CoinsCachePair& current) noexcept
{
const auto next_entry{current.second.Next()};
+ Assume(TrySub(m_dirty_count, current.second.IsDirty()));
// If we are not going to erase the cache, we must still erase spent entries.
// Otherwise, clear the state of the entry.
if (!m_will_erase) {
@@ -291,7 +294,10 @@ struct CoinsViewCacheCursor
}
inline bool WillErase(CoinsCachePair& current) const noexcept { return m_will_erase || current.second.coin.IsSpent(); }
+ size_t GetDirtyCount() const noexcept { return m_dirty_count; }
+ size_t GetTotalCount() const noexcept { return m_map.size(); }
private:
+ size_t& m_dirty_count;
CoinsCachePair& m_sentinel;
CCoinsMap& m_map;
bool m_will_erase;
@@ -302,9 +308,15 @@ class CCoinsView
{
public:
//! Retrieve the Coin (unspent transaction output) for a given outpoint.
+ //! May populate the cache. Use PeekCoin() to perform a non-caching lookup.
virtual std::optional GetCoin(const COutPoint& outpoint) const;
+ //! Retrieve the Coin (unspent transaction output) for a given outpoint, without caching results.
+ //! Does not populate the cache. Use GetCoin() to cache the result.
+ virtual std::optional PeekCoin(const COutPoint& outpoint) const;
+
//! Just check whether a given outpoint is unspent.
+ //! May populate the cache. Use PeekCoin() to perform a non-caching lookup.
virtual bool HaveCoin(const COutPoint &outpoint) const;
//! Retrieve the block hash whose state this CCoinsView currently represents
@@ -340,6 +352,7 @@ class CCoinsViewBacked : public CCoinsView
public:
CCoinsViewBacked(CCoinsView *viewIn);
std::optional GetCoin(const COutPoint& outpoint) const override;
+ std::optional PeekCoin(const COutPoint& outpoint) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
std::vector GetHeadBlocks() const override;
@@ -369,6 +382,8 @@ class CCoinsViewCache : public CCoinsViewBacked
/* Cached dynamic memory usage for the inner Coin objects. */
mutable size_t cachedCoinsUsage{0};
+ /* Running count of dirty Coin cache entries. */
+ mutable size_t m_dirty_count{0};
/**
* Discard all modifications made to this cache without flushing to the base view.
@@ -376,6 +391,9 @@ class CCoinsViewCache : public CCoinsViewBacked
*/
void Reset() noexcept;
+ /* Fetch the coin from base. Used for cache misses in FetchCoin. */
+ virtual std::optional FetchCoinFromBase(const COutPoint& outpoint) const;
+
public:
CCoinsViewCache(CCoinsView *baseIn, bool deterministic = false);
@@ -386,6 +404,7 @@ class CCoinsViewCache : public CCoinsViewBacked
// Standard CCoinsView methods
std::optional GetCoin(const COutPoint& outpoint) const override;
+ std::optional PeekCoin(const COutPoint& outpoint) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
void SetBestBlock(const uint256 &hashBlock);
@@ -458,9 +477,12 @@ class CCoinsViewCache : public CCoinsViewBacked
*/
void Uncache(const COutPoint &outpoint);
- //! Calculate the size of the cache (in number of transaction outputs)
+ //! Size of the cache (in number of transaction outputs)
unsigned int GetCacheSize() const;
+ //! Number of dirty cache entries (transaction outputs)
+ size_t GetDirtyCount() const noexcept { return m_dirty_count; }
+
//! Calculate the size of the cache (in bytes)
size_t DynamicMemoryUsage() const;
@@ -504,6 +526,27 @@ class CCoinsViewCache : public CCoinsViewBacked
CCoinsMap::iterator FetchCoin(const COutPoint &outpoint) const;
};
+/**
+ * CCoinsViewCache overlay that avoids populating/mutating parent cache layers on cache misses.
+ *
+ * This is achieved by fetching coins from the base view using PeekCoin() instead of GetCoin(),
+ * so intermediate CCoinsViewCache layers are not filled.
+ *
+ * Used during ConnectBlock() as an ephemeral, resettable top-level view that is flushed only
+ * on success, so invalid blocks don't pollute the underlying cache.
+ */
+class CoinsViewOverlay : public CCoinsViewCache
+{
+private:
+ std::optional FetchCoinFromBase(const COutPoint& outpoint) const override
+ {
+ return base->PeekCoin(outpoint);
+ }
+
+public:
+ using CCoinsViewCache::CCoinsViewCache;
+};
+
//! Utility function to add all of a transaction's outputs to a cache.
//! When check is false, this assumes that overwrites are only possible for coinbase transactions.
//! When check is true, the underlying view may be queried to determine whether an addition is
@@ -536,6 +579,7 @@ class CCoinsViewErrorCatcher final : public CCoinsViewBacked
std::optional GetCoin(const COutPoint& outpoint) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
+ std::optional PeekCoin(const COutPoint& outpoint) const override;
private:
/** A list of callbacks to execute upon leveldb read error. */
diff --git a/src/common/args.cpp b/src/common/args.cpp
index 3ffa4d3f105e..5c8589cf4402 100644
--- a/src/common/args.cpp
+++ b/src/common/args.cpp
@@ -483,29 +483,33 @@ std::string SettingToString(const common::SettingsValue& value, const std::strin
return SettingToString(value).value_or(strDefault);
}
-int64_t ArgsManager::GetIntArg(const std::string& strArg, int64_t nDefault) const
+template
+Int ArgsManager::GetArg(const std::string& strArg, Int nDefault) const
{
- return GetIntArg(strArg).value_or(nDefault);
+ return GetArg(strArg).value_or(nDefault);
}
-std::optional ArgsManager::GetIntArg(const std::string& strArg) const
+template
+std::optional ArgsManager::GetArg(const std::string& strArg) const
{
const common::SettingsValue value = GetSetting(strArg);
- return SettingToInt(value);
+ return SettingTo(value);
}
-std::optional SettingToInt(const common::SettingsValue& value)
+template
+std::optional SettingTo(const common::SettingsValue& value)
{
if (value.isNull()) return std::nullopt;
if (value.isFalse()) return 0;
if (value.isTrue()) return 1;
- if (value.isNum()) return value.getInt();
- return LocaleIndependentAtoi(value.get_str());
+ if (value.isNum()) return value.getInt();
+ return LocaleIndependentAtoi(value.get_str());
}
-int64_t SettingToInt(const common::SettingsValue& value, int64_t nDefault)
+template
+Int SettingTo(const common::SettingsValue& value, Int nDefault)
{
- return SettingToInt(value).value_or(nDefault);
+ return SettingTo(value).value_or(nDefault);
}
bool ArgsManager::GetBoolArg(const std::string& strArg, bool fDefault) const
@@ -531,6 +535,23 @@ bool SettingToBool(const common::SettingsValue& value, bool fDefault)
return SettingToBool(value).value_or(fDefault);
}
+#define INSTANTIATE_INT_TYPE(Type) \
+ template Type ArgsManager::GetArg(const std::string&, Type) const; \
+ template std::optional ArgsManager::GetArg(const std::string&) const; \
+ template Type SettingTo(const common::SettingsValue&, Type); \
+ template std::optional SettingTo(const common::SettingsValue&)
+
+INSTANTIATE_INT_TYPE(int8_t);
+INSTANTIATE_INT_TYPE(uint8_t);
+INSTANTIATE_INT_TYPE(int16_t);
+INSTANTIATE_INT_TYPE(uint16_t);
+INSTANTIATE_INT_TYPE(int32_t);
+INSTANTIATE_INT_TYPE(uint32_t);
+INSTANTIATE_INT_TYPE(int64_t);
+INSTANTIATE_INT_TYPE(uint64_t);
+
+#undef INSTANTIATE_INT_TYPE
+
bool ArgsManager::SoftSetArg(const std::string& strArg, const std::string& strValue)
{
LOCK(cs_args);
diff --git a/src/common/args.h b/src/common/args.h
index 1b9233ec75c1..ea4e173bf72a 100644
--- a/src/common/args.h
+++ b/src/common/args.h
@@ -11,6 +11,7 @@
#include
#include
+#include
#include
#include
#include
@@ -89,8 +90,11 @@ struct SectionInfo {
std::string SettingToString(const common::SettingsValue&, const std::string&);
std::optional SettingToString(const common::SettingsValue&);
-int64_t SettingToInt(const common::SettingsValue&, int64_t);
-std::optional SettingToInt(const common::SettingsValue&);
+template
+Int SettingTo(const common::SettingsValue&, Int);
+
+template
+std::optional SettingTo(const common::SettingsValue&);
bool SettingToBool(const common::SettingsValue&, bool);
std::optional SettingToBool(const common::SettingsValue&);
@@ -293,8 +297,14 @@ class ArgsManager
* @param nDefault (e.g. 1)
* @return command-line argument (0 if invalid number) or default value
*/
- int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const;
- std::optional GetIntArg(const std::string& strArg) const;
+ template
+ Int GetArg(const std::string& strArg, Int nDefault) const;
+
+ template
+ std::optional GetArg(const std::string& strArg) const;
+
+ int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const { return GetArg(strArg, nDefault); }
+ std::optional GetIntArg(const std::string& strArg) const { return GetArg(strArg); }
/**
* Return boolean argument or default value
diff --git a/src/common/messages.cpp b/src/common/messages.cpp
index 123db93cf61c..637ec62af895 100644
--- a/src/common/messages.cpp
+++ b/src/common/messages.cpp
@@ -4,11 +4,11 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include
-
#include
-#include
#include
+#include
#include
+#include
#include
#include
#include
@@ -33,7 +33,6 @@ std::string StringForFeeReason(FeeReason reason)
{FeeReason::DOUBLE_ESTIMATE, "Double Target 95% Threshold"},
{FeeReason::CONSERVATIVE, "Conservative Double Target longer horizon"},
{FeeReason::MEMPOOL_MIN, "Mempool Min Fee"},
- {FeeReason::PAYTXFEE, "PayTxFee set"},
{FeeReason::FALLBACK, "Fallback fee"},
{FeeReason::REQUIRED, "Minimum Required Fee"},
};
@@ -68,7 +67,6 @@ std::string FeeModeInfo(const std::pair& mode, std
"less responsive to short-term drops in the prevailing fee market. This mode\n"
"potentially returns a higher fee rate estimate.\n", mode.first);
default:
- // Other modes apart from the ones handled are fee rate units; they should not be clarified.
assert(false);
}
}
diff --git a/src/common/netif.cpp b/src/common/netif.cpp
index cba4d4e74736..8c0c4fa0832c 100644
--- a/src/common/netif.cpp
+++ b/src/common/netif.cpp
@@ -34,6 +34,8 @@
#include
#endif
+#include
+
namespace {
//! Return CNetAddr for the specified OS-level network address.
@@ -134,7 +136,9 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family)
return std::nullopt;
}
- for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) {
+ using recv_result_t = std::conditional_t, int64_t, decltype(NLMSG_HDRLEN)>;
+
+ for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, static_cast(recv_result)); hdr = NLMSG_NEXT(hdr, recv_result)) {
if (!(hdr->nlmsg_flags & NLM_F_MULTI)) {
done = true;
}
diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp
index 4864136b8405..a640b823f9c7 100644
--- a/src/common/pcp.cpp
+++ b/src/common/pcp.cpp
@@ -4,6 +4,7 @@
#include
+#include
#include
#include
#include
@@ -81,6 +82,8 @@ constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12;
constexpr uint8_t NATPMP_RESULT_SUCCESS = 0;
//! Result code representing unsupported version.
constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1;
+//! Result code representing not authorized (router doesn't support port mapping).
+constexpr uint8_t NATPMP_RESULT_NOT_AUTHORIZED = 2;
//! Result code representing lack of resources.
constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4;
@@ -144,6 +147,8 @@ constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20;
//! Result code representing success (RFC6887 7.4), shared with NAT-PMP.
constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS;
+//! Result code representing not authorized (RFC6887 7.4), shared with NAT-PMP.
+constexpr uint8_t PCP_RESULT_NOT_AUTHORIZED = NATPMP_RESULT_NOT_AUTHORIZED;
//! Result code representing lack of resources (RFC6887 7.4).
constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8;
@@ -374,7 +379,16 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g
Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE);
uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS);
if (result_code != NATPMP_RESULT_SUCCESS) {
- LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code));
+ if (result_code == NATPMP_RESULT_NOT_AUTHORIZED) {
+ static std::atomic warned{false};
+ if (!warned.exchange(true)) {
+ LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code));
+ } else {
+ LogDebug(BCLog::NET, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code));
+ }
+ } else {
+ LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code));
+ }
if (result_code == NATPMP_RESULT_NO_RESOURCES) {
return MappingError::NO_RESOURCES;
}
@@ -508,7 +522,16 @@ std::variant PCPRequestPortMap(const PCPMappingNonc
uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS);
CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))};
if (result_code != PCP_RESULT_SUCCESS) {
- LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code));
+ if (result_code == PCP_RESULT_NOT_AUTHORIZED) {
+ static std::atomic warned{false};
+ if (!warned.exchange(true)) {
+ LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code));
+ } else {
+ LogDebug(BCLog::NET, "pcp: Mapping failed with result %s\n", PCPResultString(result_code));
+ }
+ } else {
+ LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code));
+ }
if (result_code == PCP_RESULT_NO_RESOURCES) {
return MappingError::NO_RESOURCES;
}
diff --git a/src/common/run_command.cpp b/src/common/run_command.cpp
index 57683e03467e..86f89e17f23e 100644
--- a/src/common/run_command.cpp
+++ b/src/common/run_command.cpp
@@ -8,12 +8,13 @@
#include
#include
+#include
#ifdef ENABLE_EXTERNAL_SIGNER
#include
#endif // ENABLE_EXTERNAL_SIGNER
-UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in)
+UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in)
{
#ifdef ENABLE_EXTERNAL_SIGNER
namespace sp = subprocess;
@@ -22,9 +23,9 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string&
std::istringstream stdout_stream;
std::istringstream stderr_stream;
- if (str_command.empty()) return UniValue::VNULL;
+ if (cmd_args.empty()) return UniValue::VNULL;
- auto c = sp::Popen(str_command, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE});
+ auto c = sp::Popen(cmd_args, sp::input{sp::PIPE}, sp::output{sp::PIPE}, sp::error{sp::PIPE});
if (!str_std_in.empty()) {
c.send(str_std_in);
}
@@ -38,7 +39,7 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string&
std::getline(stderr_stream, error);
const int n_error = c.retcode();
- if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error));
+ if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", util::Join(cmd_args, " "), n_error, error));
if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result);
return result_json;
diff --git a/src/common/run_command.h b/src/common/run_command.h
index 56c94f83bd9b..9162c704561b 100644
--- a/src/common/run_command.h
+++ b/src/common/run_command.h
@@ -6,16 +6,17 @@
#define BITCOIN_COMMON_RUN_COMMAND_H
#include
+#include
class UniValue;
/**
* Execute a command which returns JSON, and parse the result.
*
- * @param str_command The command to execute, including any arguments
+ * @param cmd_args The command and arguments
* @param str_std_in string to pass to stdin
* @return parsed JSON
*/
-UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in="");
+UniValue RunCommandParseJSON(const std::vector& cmd_args, const std::string& str_std_in = "");
#endif // BITCOIN_COMMON_RUN_COMMAND_H
diff --git a/src/compat/stdin.cpp b/src/compat/stdin.cpp
index 20540f2ad61e..10c811ad3807 100644
--- a/src/compat/stdin.cpp
+++ b/src/compat/stdin.cpp
@@ -18,25 +18,38 @@
// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
void SetStdinEcho(bool enable)
{
+ if (!StdinTerminal()) {
+ return;
+ }
#ifdef WIN32
HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
DWORD mode;
- GetConsoleMode(hStdin, &mode);
+ if (!GetConsoleMode(hStdin, &mode)) {
+ fputs("GetConsoleMode failed\n", stderr);
+ return;
+ }
if (!enable) {
mode &= ~ENABLE_ECHO_INPUT;
} else {
mode |= ENABLE_ECHO_INPUT;
}
- SetConsoleMode(hStdin, mode);
+ if (!SetConsoleMode(hStdin, mode)) {
+ fputs("SetConsoleMode failed\n", stderr);
+ }
#else
struct termios tty;
- tcgetattr(STDIN_FILENO, &tty);
+ if (tcgetattr(STDIN_FILENO, &tty) != 0) {
+ fputs("tcgetattr failed\n", stderr);
+ return;
+ }
if (!enable) {
- tty.c_lflag &= ~ECHO;
+ tty.c_lflag &= static_cast(~ECHO);
} else {
tty.c_lflag |= ECHO;
}
- (void)tcsetattr(STDIN_FILENO, TCSANOW, &tty);
+ if (tcsetattr(STDIN_FILENO, TCSANOW, &tty) != 0) {
+ fputs("tcsetattr failed\n", stderr);
+ }
#endif
}
diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp
index 85fb1ed1e52f..24952fea197c 100644
--- a/src/dummywallet.cpp
+++ b/src/dummywallet.cpp
@@ -38,7 +38,6 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
"-maxapsfee=",
"-maxtxfee=",
"-mintxfee=",
- "-paytxfee=",
"-signer=",
"-spendzeroconfchange",
"-txconfirmtarget=",
diff --git a/src/external_signer.cpp b/src/external_signer.cpp
index 84d98a199062..3790f4d36f98 100644
--- a/src/external_signer.cpp
+++ b/src/external_signer.cpp
@@ -9,24 +9,27 @@
#include
#include
#include
+#include
#include
#include
#include
#include
-ExternalSigner::ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name)
+ExternalSigner::ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name)
: m_command{std::move(command)}, m_chain{std::move(chain)}, m_fingerprint{std::move(fingerprint)}, m_name{std::move(name)} {}
-std::string ExternalSigner::NetworkArg() const
+std::vector ExternalSigner::NetworkArg() const
{
- return " --chain " + m_chain;
+ return {"--chain", m_chain};
}
bool ExternalSigner::Enumerate(const std::string& command, std::vector& signers, const std::string& chain)
{
// Call enumerate
- const UniValue result = RunCommandParseJSON(command + " enumerate");
+ std::vector cmd_args = Cat(subprocess::util::split(command), {"enumerate"});
+
+ const UniValue result = RunCommandParseJSON(cmd_args, "");
if (!result.isArray()) {
throw std::runtime_error(strprintf("'%s' received invalid response, expected array of signers", command));
}
@@ -56,19 +59,19 @@ bool ExternalSigner::Enumerate(const std::string& command, std::vector command = Cat(m_command, Cat({"--stdin", "--fingerprint", m_fingerprint}, NetworkArg()));
const std::string stdinStr = "signtx " + EncodeBase64(ssTx.str());
const UniValue signer_result = RunCommandParseJSON(command, stdinStr);
diff --git a/src/external_signer.h b/src/external_signer.h
index 1b36d49622e1..5ba37c0626b9 100644
--- a/src/external_signer.h
+++ b/src/external_signer.h
@@ -19,19 +19,19 @@ class ExternalSigner
{
private:
//! The command which handles interaction with the external signer.
- std::string m_command;
+ std::vector m_command;
//! Bitcoin mainnet, testnet, etc
std::string m_chain;
- std::string NetworkArg() const;
+ std::vector NetworkArg() const;
public:
//! @param[in] command the command which handles interaction with the external signer
//! @param[in] fingerprint master key fingerprint of the signer
//! @param[in] chain "main", "test", "regtest" or "signet"
//! @param[in] name device name
- ExternalSigner(std::string command, std::string chain, std::string fingerprint, std::string name);
+ ExternalSigner(std::vector command, std::string chain, std::string fingerprint, std::string name);
//! Master key fingerprint of the signer
std::string m_fingerprint;
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 671e119642f2..b84f0da08fda 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -211,7 +211,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
}
}
}
- auto hreq{std::make_unique(req, *static_cast(arg))};
+ auto hreq{std::make_shared(req, *static_cast(arg))};
// Early address-based allow check
if (!ClientAllowed(hreq->GetPeer())) {
@@ -258,7 +258,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
return;
}
- auto item = [req = std::move(hreq), in_path = std::move(path), fn = i->handler]() {
+ auto item = [req = hreq, in_path = std::move(path), fn = i->handler]() {
std::string err_msg;
try {
fn(req.get(), in_path);
@@ -276,7 +276,13 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
req->WriteReply(HTTP_INTERNAL_SERVER_ERROR, err_msg);
};
- [[maybe_unused]] auto _{g_threadpool_http.Submit(std::move(item))};
+ if (auto res = g_threadpool_http.Submit(std::move(item)); !res.has_value()) {
+ Assume(hreq.use_count() == 1); // ensure request will be deleted
+ // Both SubmitError::Inactive and SubmitError::Interrupted mean shutdown
+ LogWarning("HTTP request rejected during server shutdown: '%s'", SubmitErrorString(res.error()));
+ hreq->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Request rejected during server shutdown");
+ return;
+ }
} else {
hreq->WriteReply(HTTP_NOT_FOUND);
}
@@ -410,8 +416,8 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt)
}
LogDebug(BCLog::HTTP, "Initialized HTTP server\n");
- g_max_queue_depth = std::max((long)gArgs.GetIntArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L);
- LogDebug(BCLog::HTTP, "set work queue of depth %d\n", g_max_queue_depth);
+ g_max_queue_depth = std::max(gArgs.GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1);
+ LogDebug(BCLog::HTTP, "set work queue of depth %d", g_max_queue_depth);
// transfer ownership to eventBase/HTTP via .release()
eventBase = base_ctr.release();
@@ -431,8 +437,8 @@ static std::thread g_thread_http;
void StartHTTPServer()
{
- int rpcThreads = std::max((long)gArgs.GetIntArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L);
- LogInfo("Starting HTTP server with %d worker threads\n", rpcThreads);
+ int rpcThreads = std::max(gArgs.GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1);
+ LogInfo("Starting HTTP server with %d worker threads", rpcThreads);
g_threadpool_http.Start(rpcThreads);
g_thread_http = std::thread(ThreadHTTP, eventBase);
}
diff --git a/src/index/base.h b/src/index/base.h
index 8cb8ad8effea..d8fd85669335 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -122,9 +122,6 @@ class BaseIndex : public CValidationInterface
void ChainStateFlushed(const kernel::ChainstateRole& role, const CBlockLocator& locator) override;
- /// Return custom notification options for index.
- [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; }
-
/// Initialize internal state from the database and block index.
[[nodiscard]] virtual bool CustomInit(const std::optional& block) { return true; }
@@ -151,6 +148,9 @@ class BaseIndex : public CValidationInterface
/// Get the name of the index for display in logs.
const std::string& GetName() const LIFETIMEBOUND { return m_name; }
+ /// Return custom notification options for index.
+ [[nodiscard]] virtual interfaces::Chain::NotifyOptions CustomOptions() { return {}; }
+
/// Blocks the current thread until the index is caught up to the current
/// state of the block chain. This only blocks if the index has gotten in
/// sync once and only needs to process blocks in the ValidationInterface
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index 96d393a3a975..0bb4a74e1256 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -63,8 +63,6 @@ class BlockFilterIndex final : public BaseIndex
std::optional ReadFilterHeader(int height, const uint256& expected_block_hash);
protected:
- interfaces::Chain::NotifyOptions CustomOptions() override;
-
bool CustomInit(const std::optional& block) override;
bool CustomCommit(CDBBatch& batch) override;
@@ -80,6 +78,8 @@ class BlockFilterIndex final : public BaseIndex
explicit BlockFilterIndex(std::unique_ptr chain, BlockFilterType filter_type,
size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
+ interfaces::Chain::NotifyOptions CustomOptions() override;
+
BlockFilterType GetFilterType() const { return m_filter_type; }
/** Get a single filter by block. */
diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h
index 041c0b896a62..0e26fba56d95 100644
--- a/src/index/coinstatsindex.h
+++ b/src/index/coinstatsindex.h
@@ -52,8 +52,6 @@ class CoinStatsIndex final : public BaseIndex
bool AllowPrune() const override { return true; }
protected:
- interfaces::Chain::NotifyOptions CustomOptions() override;
-
bool CustomInit(const std::optional& block) override;
bool CustomCommit(CDBBatch& batch) override;
@@ -68,6 +66,8 @@ class CoinStatsIndex final : public BaseIndex
// Constructs the index, which becomes available to be queried.
explicit CoinStatsIndex(std::unique_ptr chain, size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
+ interfaces::Chain::NotifyOptions CustomOptions() override;
+
// Look up stats for a specific block using CBlockIndex
std::optional LookUpStats(const CBlockIndex& block_index) const;
};
diff --git a/src/index/txospenderindex.cpp b/src/index/txospenderindex.cpp
new file mode 100644
index 000000000000..d451bb1e0a49
--- /dev/null
+++ b/src/index/txospenderindex.cpp
@@ -0,0 +1,184 @@
+// Copyright (c) The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* The database is used to find the spending transaction of a given utxo.
+ * For every input of every transaction it stores a key that is a pair(siphash(input outpoint), transaction location on disk) and an empty value.
+ * To find the spending transaction of an outpoint, we perform a range query on siphash(outpoint), and for each returned key load the transaction
+ * and return it if it does spend the provided outpoint.
+ */
+
+// LevelDB key prefix. We only have one key for now but it will make it easier to add others if needed.
+constexpr uint8_t DB_TXOSPENDERINDEX{'s'};
+
+std::unique_ptr g_txospenderindex;
+
+struct DBKey {
+ uint64_t hash;
+ CDiskTxPos pos;
+
+ explicit DBKey(const uint64_t& hash_in, const CDiskTxPos& pos_in) : hash(hash_in), pos(pos_in) {}
+
+ SERIALIZE_METHODS(DBKey, obj)
+ {
+ uint8_t prefix{DB_TXOSPENDERINDEX};
+ READWRITE(prefix);
+ if (prefix != DB_TXOSPENDERINDEX) {
+ throw std::ios_base::failure("Invalid format for spender index DB key");
+ }
+ READWRITE(obj.hash);
+ READWRITE(obj.pos);
+ }
+};
+
+TxoSpenderIndex::TxoSpenderIndex(std::unique_ptr chain, size_t n_cache_size, bool f_memory, bool f_wipe)
+ : BaseIndex(std::move(chain), "txospenderindex"), m_db{std::make_unique(gArgs.GetDataDirNet() / "indexes" / "txospenderindex" / "db", n_cache_size, f_memory, f_wipe)}
+{
+ if (!m_db->Read("siphash_key", m_siphash_key)) {
+ FastRandomContext rng(false);
+ m_siphash_key = {rng.rand64(), rng.rand64()};
+ m_db->Write("siphash_key", m_siphash_key, /*fSync=*/ true);
+ }
+}
+
+interfaces::Chain::NotifyOptions TxoSpenderIndex::CustomOptions()
+{
+ interfaces::Chain::NotifyOptions options;
+ options.disconnect_data = true;
+ return options;
+}
+
+static uint64_t CreateKeyPrefix(std::pair siphash_key, const COutPoint& vout)
+{
+ return PresaltedSipHasher(siphash_key.first, siphash_key.second)(vout.hash.ToUint256(), vout.n);
+}
+
+static DBKey CreateKey(std::pair siphash_key, const COutPoint& vout, const CDiskTxPos& pos)
+{
+ return DBKey(CreateKeyPrefix(siphash_key, vout), pos);
+}
+
+void TxoSpenderIndex::WriteSpenderInfos(const std::vector