build, wallet, doc: Remove BDB

This commit is contained in:
Ava Chow 2024-01-05 18:42:20 -05:00
parent 44057fe38c
commit 04a7a7a28c
51 changed files with 34 additions and 2108 deletions

View File

@ -12,7 +12,7 @@ set -o errexit -o pipefail -o xtrace
echo "Running test-one-commit on $( git log -1 )"
# Use clang++, because it is a bit faster and uses less memory than g++
CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function'
CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function'
cmake --build build -j "$( nproc )" && ctest --output-on-failure --stop-on-failure --test-dir build -j "$( nproc )"

View File

@ -77,7 +77,7 @@ jobs:
git config user.name "CI"
- run: |
sudo apt-get update
sudo apt-get install clang ccache build-essential cmake ninja-build pkgconf python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libzmq3-dev qt6-base-dev qt6-tools-dev qt6-l10n-tools libqrencode-dev -y
sudo apt-get install clang ccache build-essential cmake ninja-build pkgconf python3-zmq libevent-dev libboost-dev libsqlite3-dev systemtap-sdt-dev libzmq3-dev qt6-base-dev qt6-tools-dev qt6-l10n-tools libqrencode-dev -y
- name: Compile and run tests
run: |
# Run tests on commits after the last merge commit and before the PR head commit
@ -175,7 +175,7 @@ jobs:
job-type: [standard, fuzz]
include:
- job-type: standard
generate-options: '-DBUILD_GUI=ON -DWITH_BDB=ON -DWITH_ZMQ=ON -DBUILD_BENCH=ON -DWERROR=ON'
generate-options: '-DBUILD_GUI=ON -DWITH_ZMQ=ON -DBUILD_BENCH=ON -DWERROR=ON'
job-name: 'Windows native, VS 2022'
- job-type: fuzz
generate-options: '-DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet" -DBUILD_GUI=OFF -DBUILD_FOR_FUZZING=ON -DWERROR=ON'

View File

@ -110,22 +110,6 @@ if(ENABLE_WALLET)
find_package(SQLite3 3.7.17 REQUIRED)
endif()
endif()
option(WITH_BDB "Enable Berkeley DB (BDB) wallet support." OFF)
cmake_dependent_option(WARN_INCOMPATIBLE_BDB "Warn when using a Berkeley DB (BDB) version other than 4.8." ON "WITH_BDB" OFF)
if(WITH_BDB)
find_package(BerkeleyDB 4.8 MODULE REQUIRED)
set(USE_BDB ON)
if(NOT BerkeleyDB_VERSION VERSION_EQUAL 4.8)
message(WARNING "Found Berkeley DB (BDB) other than 4.8.\n"
"BDB (legacy) wallets opened by this build will not be portable!"
)
if(WARN_INCOMPATIBLE_BDB)
message(WARNING "If this is intended, pass \"-DWARN_INCOMPATIBLE_BDB=OFF\".\n"
"Passing \"-DWITH_BDB=OFF\" will suppress this warning."
)
endif()
endif()
endif()
cmake_dependent_option(BUILD_WALLET_TOOL "Build bitcoin-wallet tool." ${BUILD_TESTS} "ENABLE_WALLET" OFF)
option(REDUCE_EXPORTS "Attempt to reduce exported symbols in the resulting executables." OFF)
@ -677,9 +661,6 @@ message(" bitcoin-chainstate (experimental) ... ${BUILD_UTIL_CHAINSTATE}")
message(" libbitcoinkernel (experimental) ..... ${BUILD_KERNEL_LIB}")
message("Optional features:")
message(" wallet support ...................... ${ENABLE_WALLET}")
if(ENABLE_WALLET)
message(" - legacy wallets (Berkeley DB) ..... ${WITH_BDB}")
endif()
message(" external signer ..................... ${ENABLE_EXTERNAL_SIGNER}")
message(" ZeroMQ .............................. ${WITH_ZMQ}")
if(ENABLE_IPC)

View File

@ -78,8 +78,6 @@
"BUILD_WALLET_TOOL": "ON",
"ENABLE_EXTERNAL_SIGNER": "ON",
"ENABLE_WALLET": "ON",
"WARN_INCOMPATIBLE_BDB": "OFF",
"WITH_BDB": "ON",
"ENABLE_IPC": "ON",
"WITH_QRENCODE": "ON",
"WITH_USDT": "ON",

View File

@ -20,11 +20,11 @@ fi
export CONTAINER_NAME=ci_native_asan
export APT_LLVM_V="20"
export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qt6-base-dev qt6-tools-dev qt6-l10n-tools libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qt6-base-dev qt6-tools-dev qt6-l10n-tools libevent-dev libboost-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
export NO_DEPENDS=1
export GOAL="install"
export BITCOIN_CONFIG="\
-DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \
-DWITH_USDT=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON \
-DSANITIZERS=address,float-divide-by-zero,integer,undefined \
-DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \
-DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \

View File

@ -14,8 +14,7 @@ export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
export CONTAINER_NAME="ci_native_fuzz_msan"
export PACKAGES="ninja-build"
# BDB generates false-positives and will be removed in future
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export DEP_OPTS="DEBUG=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="all"
# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered.
# _FORTIFY_SOURCE is not compatible with MSAN.

View File

@ -14,8 +14,7 @@ export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
export CONTAINER_NAME="ci_native_msan"
export PACKAGES="ninja-build"
# BDB generates false-positives and will be removed in future
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export DEP_OPTS="DEBUG=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="install"
# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered.
# _FORTIFY_SOURCE is not compatible with MSAN.

View File

@ -10,7 +10,7 @@ export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
export CONTAINER_NAME=ci_native_tidy
export TIDY_LLVM_V="20"
export APT_LLVM_V="${TIDY_LLVM_V}"
export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq libevent-dev libboost-dev libzmq3-dev systemtap-sdt-dev qt6-base-dev qt6-tools-dev qt6-l10n-tools libqrencode-dev libsqlite3-dev libdb++-dev"
export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq libevent-dev libboost-dev libzmq3-dev systemtap-sdt-dev qt6-base-dev qt6-tools-dev qt6-l10n-tools libqrencode-dev libsqlite3-dev"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
@ -19,7 +19,7 @@ export RUN_CHECK_DEPS=true
export RUN_TIDY=true
export GOAL="install"
export BITCOIN_CONFIG="\
-DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DWITH_USDT=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF \
-DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DWITH_USDT=ON \
-DCMAKE_C_COMPILER=clang-${TIDY_LLVM_V} \
-DCMAKE_CXX_COMPILER=clang++-${TIDY_LLVM_V} \
-DCMAKE_C_FLAGS_RELWITHDEBINFO='-O0 -g0' \

View File

@ -8,12 +8,12 @@ export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
export CONTAINER_NAME=ci_native_valgrind
export PACKAGES="valgrind python3-zmq libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libsqlite3-dev"
export PACKAGES="valgrind python3-zmq libevent-dev libboost-dev libzmq3-dev libsqlite3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # feature_init excluded for now, see https://github.com/bitcoin/bitcoin/issues/30011 ; bind tests excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export GOAL="install"
# TODO enable GUI
export BITCOIN_CONFIG="\
-DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=OFF \
-DWITH_ZMQ=ON -DBUILD_GUI=OFF \
"

View File

@ -123,9 +123,6 @@
/* Define to 1 if strerror_r returns char *. */
#cmakedefine STRERROR_R_CHAR_P 1
/* Define if BDB support should be compiled in */
#cmakedefine USE_BDB 1
/* Define if dbus support should be compiled in */
#cmakedefine USE_DBUS 1

View File

@ -1,133 +0,0 @@
# Copyright (c) 2023-present The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/license/mit/.
#[=======================================================================[
FindBerkeleyDB
--------------
Finds the Berkeley DB headers and library.
Imported Targets
^^^^^^^^^^^^^^^^
This module provides imported target ``BerkeleyDB::BerkeleyDB``, if
Berkeley DB has been found.
Result Variables
^^^^^^^^^^^^^^^^
This module defines the following variables:
``BerkeleyDB_FOUND``
"True" if Berkeley DB found.
``BerkeleyDB_VERSION``
The MAJOR.MINOR version of Berkeley DB found.
#]=======================================================================]
set(_BerkeleyDB_homebrew_prefix)
if(CMAKE_HOST_APPLE)
find_program(HOMEBREW_EXECUTABLE brew)
if(HOMEBREW_EXECUTABLE)
# The Homebrew package manager installs the berkeley-db* packages as
# "keg-only", which means they are not symlinked into the default prefix.
# To find such a package, the find_path() and find_library() commands
# need additional path hints that are computed by Homebrew itself.
execute_process(
COMMAND ${HOMEBREW_EXECUTABLE} --prefix berkeley-db@4
OUTPUT_VARIABLE _BerkeleyDB_homebrew_prefix
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE
)
endif()
endif()
find_path(BerkeleyDB_INCLUDE_DIR
NAMES db_cxx.h
HINTS ${_BerkeleyDB_homebrew_prefix}/include
PATH_SUFFIXES 4.8 48 db4.8 4 db4 5.3 db5.3 5 db5
)
mark_as_advanced(BerkeleyDB_INCLUDE_DIR)
unset(_BerkeleyDB_homebrew_prefix)
if(NOT BerkeleyDB_LIBRARY)
if(VCPKG_TARGET_TRIPLET)
# The vcpkg package manager installs the berkeleydb package with the same name
# of release and debug libraries. Therefore, the default search paths set by
# vcpkg's toolchain file cannot be used to search libraries as the debug one
# will always be found.
set(CMAKE_FIND_USE_CMAKE_PATH FALSE)
endif()
get_filename_component(_BerkeleyDB_lib_hint "${BerkeleyDB_INCLUDE_DIR}" DIRECTORY)
find_library(BerkeleyDB_LIBRARY_RELEASE
NAMES db_cxx-4.8 db4_cxx db48 db_cxx-5.3 db_cxx-5 db_cxx libdb48
NAMES_PER_DIR
HINTS ${_BerkeleyDB_lib_hint}
PATH_SUFFIXES lib
)
mark_as_advanced(BerkeleyDB_LIBRARY_RELEASE)
find_library(BerkeleyDB_LIBRARY_DEBUG
NAMES db_cxx-4.8 db4_cxx db48 db_cxx-5.3 db_cxx-5 db_cxx libdb48
NAMES_PER_DIR
HINTS ${_BerkeleyDB_lib_hint}
PATH_SUFFIXES debug/lib
)
mark_as_advanced(BerkeleyDB_LIBRARY_DEBUG)
unset(_BerkeleyDB_lib_hint)
unset(CMAKE_FIND_USE_CMAKE_PATH)
include(SelectLibraryConfigurations)
select_library_configurations(BerkeleyDB)
# The select_library_configurations() command sets BerkeleyDB_FOUND, but we
# want the one from the find_package_handle_standard_args() command below.
unset(BerkeleyDB_FOUND)
endif()
if(BerkeleyDB_INCLUDE_DIR)
file(STRINGS "${BerkeleyDB_INCLUDE_DIR}/db.h" _BerkeleyDB_version_strings REGEX "^#define[\t ]+DB_VERSION_(MAJOR|MINOR|PATCH)[ \t]+[0-9]+.*")
string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_MAJOR[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_major "${_BerkeleyDB_version_strings}")
string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_MINOR[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_minor "${_BerkeleyDB_version_strings}")
string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_PATCH[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_patch "${_BerkeleyDB_version_strings}")
unset(_BerkeleyDB_version_strings)
# The MAJOR.MINOR.PATCH version will be logged in the following find_package_handle_standard_args() command.
set(_BerkeleyDB_full_version ${_BerkeleyDB_version_major}.${_BerkeleyDB_version_minor}.${_BerkeleyDB_version_patch})
set(BerkeleyDB_VERSION ${_BerkeleyDB_version_major}.${_BerkeleyDB_version_minor})
unset(_BerkeleyDB_version_major)
unset(_BerkeleyDB_version_minor)
unset(_BerkeleyDB_version_patch)
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(BerkeleyDB
REQUIRED_VARS BerkeleyDB_LIBRARY BerkeleyDB_INCLUDE_DIR
VERSION_VAR _BerkeleyDB_full_version
)
unset(_BerkeleyDB_full_version)
if(BerkeleyDB_FOUND AND NOT TARGET BerkeleyDB::BerkeleyDB)
add_library(BerkeleyDB::BerkeleyDB UNKNOWN IMPORTED)
set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${BerkeleyDB_INCLUDE_DIR}"
)
if(BerkeleyDB_LIBRARY_RELEASE)
set_property(TARGET BerkeleyDB::BerkeleyDB APPEND PROPERTY
IMPORTED_CONFIGURATIONS RELEASE
)
set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES
IMPORTED_LOCATION_RELEASE "${BerkeleyDB_LIBRARY_RELEASE}"
)
endif()
if(BerkeleyDB_LIBRARY_DEBUG)
set_property(TARGET BerkeleyDB::BerkeleyDB APPEND PROPERTY
IMPORTED_CONFIGURATIONS DEBUG)
set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES
IMPORTED_LOCATION_DEBUG "${BerkeleyDB_LIBRARY_DEBUG}"
)
endif()
endif()

View File

@ -41,9 +41,6 @@ ALLOWED_DEPENDENCIES+=(
# Declare list of known errors that should be suppressed.
declare -A SUPPRESS
# init.cpp file currently calls Berkeley DB sanity check function on startup, so
# there is an undocumented dependency of the node library on the wallet library.
SUPPRESS["init.cpp.o bdb.cpp.o _ZN6wallet27BerkeleyDatabaseSanityCheckEv"]=1
# init/common.cpp file calls InitError and InitWarning from interface_ui which
# is currently part of the node library. interface_ui should just be part of the
# common library instead, and is moved in

View File

@ -14,44 +14,12 @@
# Note that suppressions may depend on OS and/or library versions.
# Tested on aarch64 and x86_64 with Ubuntu Noble system libs, using clang-16
# and GCC, without gui.
{
Suppress libdb warning - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=662917
Memcheck:Cond
obj:*/libdb_cxx-*.so
fun:__log_put
}
{
Suppress libdb warning
Memcheck:Param
pwrite64(buf)
fun:pwrite
fun:__os_io
}
{
Suppress libdb warning
Memcheck:Cond
fun:__log_putr.isra.1
}
{
Suppress libdb warning
Memcheck:Param
pwrite64(buf)
...
obj:*/libdb_cxx-*.so
}
{
Suppress uninitialized bytes warning in compat code
Memcheck:Param
ioctl(TCSET{S,SW,SF})
fun:tcsetattr
}
{
Suppress libdb warning
Memcheck:Leak
fun:malloc
...
obj:*/libdb_cxx-*.so
}
{
Suppress leaks on shutdown
Memcheck:Leak

View File

@ -36,7 +36,6 @@ NO_BOOST ?=
NO_LIBEVENT ?=
NO_QT ?=
NO_QR ?=
NO_BDB ?=
NO_WALLET ?=
NO_ZMQ ?=
NO_USDT ?=
@ -159,8 +158,7 @@ qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages)
qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_)
qt_native_packages_$(NO_QT) = $(qt_native_packages)
bdb_packages_$(NO_BDB) = $(bdb_packages)
wallet_packages_$(NO_WALLET) = $(bdb_packages_) $(sqlite_packages)
wallet_packages_$(NO_WALLET) = $(sqlite_packages)
zmq_packages_$(NO_ZMQ) = $(zmq_packages)
multiprocess_packages_$(MULTIPROCESS) = $(multiprocess_packages)
@ -232,7 +230,6 @@ $(host_prefix)/toolchain.cmake : toolchain.cmake.in $(host_prefix)/.stamp_$(fina
-e 's|@qrencode_packages@|$(qrencode_packages_)|' \
-e 's|@zmq_packages@|$(zmq_packages_)|' \
-e 's|@wallet_packages@|$(wallet_packages_)|' \
-e 's|@bdb_packages@|$(bdb_packages_)|' \
-e 's|@usdt_packages@|$(usdt_packages_)|' \
-e 's|@multiprocess@|$(MULTIPROCESS)|' \
$< > $@

View File

@ -79,7 +79,6 @@ The following can be set when running make: `make FOO=bar`
- `NO_QR`: Don't download/build/cache packages needed for enabling qrencode
- `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ
- `NO_WALLET`: Don't download/build/cache libs needed to enable the wallet (SQLite)
- `NO_BDB`: Don't download/build/cache BerkeleyDB
- `NO_USDT`: Don't download/build/cache packages needed for enabling USDT tracepoints
- `MULTIPROCESS`: Build libmultiprocess (experimental)
- `DEBUG`: Disable some optimizations and enable more runtime checking

View File

@ -1,33 +0,0 @@
package=bdb
$(package)_version=4.8.30
$(package)_download_path=https://download.oracle.com/berkeley-db
$(package)_file_name=db-$($(package)_version).NC.tar.gz
$(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef
$(package)_build_subdir=build_unix
$(package)_patches=clang_cxx_11.patch
define $(package)_set_vars
$(package)_config_opts=--disable-shared --enable-cxx --disable-replication --enable-option-checking
$(package)_config_opts_mingw32=--enable-mingw
$(package)_cflags+=-Wno-error=implicit-function-declaration -Wno-error=format-security -Wno-error=implicit-int
$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600 -D__BSD_VISIBLE=1
$(package)_cppflags_netbsd=-D_XOPEN_SOURCE=600
$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
endef
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/clang_cxx_11.patch && \
cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist
endef
define $(package)_config_cmds
../dist/$($(package)_autoconf)
endef
define $(package)_build_cmds
$(MAKE) libdb_cxx-4.8.a libdb-4.8.a
endef
define $(package)_stage_cmds
$(MAKE) DESTDIR=$($(package)_staging_dir) install_lib install_include
endef

View File

@ -15,7 +15,6 @@ ifneq ($(host),$(build))
qt_native_packages := native_qt
endif
bdb_packages=bdb
sqlite_packages=sqlite
zmq_packages=zeromq

View File

@ -1,147 +0,0 @@
commit 3311d68f11d1697565401eee6efc85c34f022ea7
Author: fanquake <fanquake@gmail.com>
Date: Mon Aug 17 20:03:56 2020 +0800
Fix C++11 compatibility
diff --git a/dbinc/atomic.h b/dbinc/atomic.h
index 0034dcc..7c11d4a 100644
--- a/dbinc/atomic.h
+++ b/dbinc/atomic.h
@@ -70,7 +70,7 @@ typedef struct {
* These have no memory barriers; the caller must include them when necessary.
*/
#define atomic_read(p) ((p)->value)
-#define atomic_init(p, val) ((p)->value = (val))
+#define atomic_init_db(p, val) ((p)->value = (val))
#ifdef HAVE_ATOMIC_SUPPORT
@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val;
#define atomic_inc(env, p) __atomic_inc(p)
#define atomic_dec(env, p) __atomic_dec(p)
#define atomic_compare_exchange(env, p, o, n) \
- __atomic_compare_exchange((p), (o), (n))
+ __atomic_compare_exchange_db((p), (o), (n))
static inline int __atomic_inc(db_atomic_t *p)
{
int temp;
@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p)
* http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
* which configure could be changed to use.
*/
-static inline int __atomic_compare_exchange(
+static inline int __atomic_compare_exchange_db(
db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval)
{
atomic_value_t was;
@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange(
#define atomic_dec(env, p) (--(p)->value)
#define atomic_compare_exchange(env, p, oldval, newval) \
(DB_ASSERT(env, atomic_read(p) == (oldval)), \
- atomic_init(p, (newval)), 1)
+ atomic_init_db(p, (newval)), 1)
#else
#define atomic_inc(env, p) __atomic_inc(env, p)
#define atomic_dec(env, p) __atomic_dec(env, p)
diff --git a/mp/mp_fget.c b/mp/mp_fget.c
index 5fdee5a..0b75f57 100644
--- a/mp/mp_fget.c
+++ b/mp/mp_fget.c
@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */
/* Initialize enough so we can call __memp_bhfree. */
alloc_bhp->flags = 0;
- atomic_init(&alloc_bhp->ref, 1);
+ atomic_init_db(&alloc_bhp->ref, 1);
#ifdef DIAGNOSTIC
if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
__db_errx(env,
@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */
MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize,
PROT_READ);
- atomic_init(&alloc_bhp->ref, 1);
+ atomic_init_db(&alloc_bhp->ref, 1);
MUTEX_LOCK(env, alloc_bhp->mtx_buf);
alloc_bhp->priority = bhp->priority;
alloc_bhp->pgno = bhp->pgno;
diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c
index 34467d2..f05aa0c 100644
--- a/mp/mp_mvcc.c
+++ b/mp/mp_mvcc.c
@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp)
#else
memcpy(frozen_bhp, bhp, SSZA(BH, buf));
#endif
- atomic_init(&frozen_bhp->ref, 0);
+ atomic_init_db(&frozen_bhp->ref, 0);
if (mutex != MUTEX_INVALID)
frozen_bhp->mtx_buf = mutex;
else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH,
@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp)
#endif
alloc_bhp->mtx_buf = mutex;
MUTEX_LOCK(env, alloc_bhp->mtx_buf);
- atomic_init(&alloc_bhp->ref, 1);
+ atomic_init_db(&alloc_bhp->ref, 1);
F_CLR(alloc_bhp, BH_FROZEN);
}
diff --git a/mp/mp_region.c b/mp/mp_region.c
index e6cece9..ddbe906 100644
--- a/mp/mp_region.c
+++ b/mp/mp_region.c
@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0)
return (ret);
SH_TAILQ_INIT(&htab[i].hash_bucket);
- atomic_init(&htab[i].hash_page_dirty, 0);
+ atomic_init_db(&htab[i].hash_page_dirty, 0);
}
/*
@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID :
mtx_base + i;
SH_TAILQ_INIT(&hp->hash_bucket);
- atomic_init(&hp->hash_page_dirty, 0);
+ atomic_init_db(&hp->hash_page_dirty, 0);
#ifdef HAVE_STATISTICS
hp->hash_io_wait = 0;
hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0;
diff --git a/mutex/mut_method.c b/mutex/mut_method.c
index 2588763..5c6d516 100644
--- a/mutex/mut_method.c
+++ b/mutex/mut_method.c
@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval)
MUTEX_LOCK(env, mtx);
ret = atomic_read(v) == oldval;
if (ret)
- atomic_init(v, newval);
+ atomic_init_db(v, newval);
MUTEX_UNLOCK(env, mtx);
return (ret);
diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c
index f3922e0..e40fcdf 100644
--- a/mutex/mut_tas.c
+++ b/mutex/mut_tas.c
@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags)
#ifdef HAVE_SHARED_LATCHES
if (F_ISSET(mutexp, DB_MUTEX_SHARED))
- atomic_init(&mutexp->sharecount, 0);
+ atomic_init_db(&mutexp->sharecount, 0);
else
#endif
if (MUTEX_INIT(&mutexp->tas)) {
@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex)
F_CLR(mutexp, DB_MUTEX_LOCKED);
/* Flush flag update before zeroing count */
MEMBAR_EXIT();
- atomic_init(&mutexp->sharecount, 0);
+ atomic_init_db(&mutexp->sharecount, 0);
} else {
DB_ASSERT(env, sharecount > 0);
MEMBAR_EXIT();

View File

@ -144,13 +144,6 @@ else()
set(ENABLE_WALLET ON CACHE BOOL "")
endif()
set(bdb_packages @bdb_packages@)
if("${wallet_packages}" STREQUAL "" OR "${bdb_packages}" STREQUAL "")
set(WITH_BDB OFF CACHE BOOL "")
else()
set(WITH_BDB ON CACHE BOOL "")
endif()
set(usdt_packages @usdt_packages@)
if("${usdt_packages}" STREQUAL "")
set(WITH_USDT OFF CACHE BOOL "")

View File

@ -34,33 +34,6 @@ Skip if you don't intend to use descriptor wallets.
pkg install sqlite3
```
###### Legacy Wallet Support
BerkeleyDB is only required if legacy wallet support is required.
It is required to use Berkeley DB 4.8. You **cannot** use the BerkeleyDB library
from ports. However, you can build DB 4.8 yourself [using depends](/depends).
```bash
pkg install gmake
gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_ZMQ=1 NO_USDT=1
```
When the build is complete, the Berkeley DB installation location will be displayed:
```
to: /path/to/bitcoin/depends/x86_64-unknown-freebsd[release-number]
```
Finally, set `BDB_PREFIX` to this path according to your shell:
```
csh: setenv BDB_PREFIX [path displayed above]
```
```
sh/bash: export BDB_PREFIX=[path displayed above]
```
#### GUI Dependencies
###### Qt6
@ -107,20 +80,13 @@ pkg install python3 databases/py-sqlite3 net/py-pyzmq
There are many ways to configure Bitcoin Core, here are a few common examples:
##### Descriptor Wallet and GUI:
This disables legacy wallet support and enables the GUI, assuming `sqlite` and `qt` are installed.
This enables the GUI, assuming `sqlite` and `qt` are installed.
```bash
cmake -B build -DWITH_BDB=OFF -DBUILD_GUI=ON
cmake -B build -DBUILD_GUI=ON
```
Run `cmake -B build -LH` to see the full list of available options.
##### Descriptor & Legacy Wallet. No GUI:
This enables support for both wallet types, assuming
`sqlite3` and `db4` are both installed.
```bash
cmake -B build -DBerkeleyDB_INCLUDE_DIR:PATH="${BDB_PREFIX}/include" -DWITH_BDB=ON
```
##### No Wallet or GUI
```bash
cmake -B build -DENABLE_WALLET=OFF

View File

@ -55,14 +55,6 @@ It is not necessary to build wallet functionality to run bitcoind or the GUI.
pkgin install sqlite3
```
###### Legacy Wallet Support
`db4` is required to enable support for legacy wallets.
```bash
pkgin install db4
```
#### GUI Dependencies
###### Qt6

View File

@ -26,35 +26,13 @@ git clone https://github.com/bitcoin/bitcoin.git
#### Wallet Dependencies
It is not necessary to build wallet functionality to run either `bitcoind` or `bitcoin-qt`.
SQLite is required to build the wallet.
###### Descriptor Wallet Support
SQLite is required to support [descriptor wallets](descriptors.md).
``` bash
pkg_add sqlite3
```
###### Legacy Wallet Support
BerkeleyDB is only required to support legacy wallets.
It is recommended to use Berkeley DB 4.8. You cannot use the BerkeleyDB library
from ports. However you can build it yourself, [using depends](/depends).
Refer to [depends/README.md](/depends/README.md) for detailed instructions.
```bash
gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_ZMQ=1 NO_USDT=1
...
to: /path/to/bitcoin/depends/*-unknown-openbsd*
```
Then set `BDB_PREFIX`:
```bash
export BDB_PREFIX="[path displayed above]"
```
#### GUI Dependencies
###### Qt6
@ -108,13 +86,6 @@ cmake -B build -DBUILD_GUI=ON
Run `cmake -B build -LH` to see the full list of available options.
##### Descriptor & Legacy Wallet. No GUI:
This enables support for both wallet types:
```bash
cmake -B build -DBerkeleyDB_INCLUDE_DIR:PATH="${BDB_PREFIX}/include" -DWITH_BDB=ON
```
### 2. Compile
```bash

View File

@ -74,14 +74,6 @@ It is not necessary to build wallet functionality to run `bitcoind` or `bitcoin
macOS ships with a useable `sqlite` package, meaning you don't need to
install anything.
###### Legacy Wallet Support
`berkeley-db@4` is only required to support for legacy wallets.
Skip if you don't intend to use legacy wallets.
``` bash
brew install berkeley-db@4
```
---
#### GUI Dependencies
@ -160,14 +152,6 @@ It is required that you have `python` and `zip` installed.
There are many ways to configure Bitcoin Core, here are a few common examples:
##### Wallet (BDB + SQlite) Support, No GUI:
If `berkeley-db@4` or `sqlite` are not installed, this will throw an error.
``` bash
cmake -B build -DWITH_BDB=ON
```
##### Wallet (only SQlite) and GUI Support:
This enables the GUI.

View File

@ -54,10 +54,6 @@ SQLite is required for the descriptor wallet:
sudo apt install libsqlite3-dev
Berkeley DB is only required for the legacy wallet. Ubuntu and Debian have their own `libdb-dev` and `libdb++-dev` packages,
but these will install Berkeley DB 5.3 or later. This will break binary wallet compatibility with the distributed
executables, which are based on BerkeleyDB 4.8. Otherwise, you can build Berkeley DB [yourself](#berkeley-db).
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
ZMQ dependencies (provides ZMQ API):
@ -109,10 +105,6 @@ SQLite is required for the descriptor wallet:
sudo dnf install sqlite-devel
Berkeley DB is only required for the legacy wallet. Fedora releases have only `libdb-devel` and `libdb-cxx-devel` packages, but these will install
Berkeley DB 5.3 or later. This will break binary wallet compatibility with the distributed executables, which
are based on Berkeley DB 4.8. Otherwise, you can build Berkeley DB [yourself](#berkeley-db).
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
ZMQ dependencies (provides ZMQ API):
@ -153,27 +145,6 @@ See [dependencies.md](dependencies.md) for a complete overview, and
[depends](/depends/README.md) on how to compile them yourself, if you wish to
not use the packages of your Linux distribution.
### Berkeley DB
The legacy wallet uses Berkeley DB. To ensure backwards compatibility it is
recommended to use Berkeley DB 4.8. If you have to build it yourself, and don't
want to use any other libraries built in depends, you can do:
```bash
make -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_ZMQ=1 NO_USDT=1
...
to: /path/to/bitcoin/depends/x86_64-pc-linux-gnu
```
and configure using the following:
```bash
export BDB_PREFIX="/path/to/bitcoin/depends/x86_64-pc-linux-gnu"
cmake -B build -DBerkeleyDB_INCLUDE_DIR:PATH="${BDB_PREFIX}/include" -DWITH_BDB=ON
```
**Note**: Make sure that `BDB_PREFIX` is an absolute path.
**Note**: You only need Berkeley DB if the legacy wallet is enabled (see [*Disable-wallet mode*](#disable-wallet-mode)).
Disable-wallet mode
--------------------
When the intention is to only run a P2P node, without a wallet, Bitcoin Core can
@ -181,7 +152,7 @@ be compiled in disable-wallet mode with:
cmake -B build -DENABLE_WALLET=OFF
In this case there is no dependency on SQLite or Berkeley DB.
In this case there is no dependency on SQLite.
Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call.
@ -204,4 +175,3 @@ This example lists the steps necessary to setup and build a command line only di
ctest --test-dir build
./build/bin/bitcoind
If you intend to work with legacy Berkeley DB wallets, see [Berkeley DB](#berkeley-db) section.

View File

@ -32,7 +32,6 @@ Bitcoin Core requires one of the following compilers.
| [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No |
| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | [6.7.3](https://github.com/bitcoin/bitcoin/pull/30997) | [6.2](https://github.com/bitcoin/bitcoin/pull/30997) | No |
| [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No |
| [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No |
| [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
| Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No |
| [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No |

View File

@ -11,8 +11,6 @@ Supporting RPCs are:
addresses.
- `listunspent` outputs a specialized descriptor for the reported unspent outputs.
- `getaddressinfo` outputs a descriptor for solvable addresses (since v0.18).
- `importmulti` takes as input descriptors to import into a legacy wallet
(since v0.18).
- `generatetodescriptor` takes as input a descriptor and generates coins to it
(`regtest` only, since v0.19).
- `utxoupdatepsbt` takes as input descriptors to add information to the psbt
@ -319,5 +317,5 @@ roughly 1 in a trillion chance of not detecting the errors.
All RPCs in Bitcoin Core will include the checksum in their output. Only
certain RPCs require checksums on input, including `deriveaddresses` and
`importmulti`. The checksum for a descriptor without one can be computed
`importdescriptors`. The checksum for a descriptor without one can be computed
using the `getdescriptorinfo` RPC.

View File

@ -8,14 +8,14 @@
- [Multi-wallet environment](#multi-wallet-environment)
- [Berkeley DB database based wallets](#berkeley-db-database-based-wallets)
- [SQLite database based wallets](#sqlite-database-based-wallets)
- [GUI settings](#gui-settings)
- [Legacy subdirectories and files](#legacy-subdirectories-and-files)
- [Berkeley DB database based wallets](#berkeley-db-database-based-wallets)
- [Notes](#notes)
## Data directory location
@ -75,7 +75,7 @@ Subdirectory | File(s) | Description
## Multi-wallet environment
Wallets are Berkeley DB (BDB) or SQLite databases.
Wallets are SQLite databases.
1. Each user-defined wallet named "wallet_name" resides in the `wallets/wallet_name/` subdirectory.
@ -88,15 +88,6 @@ Wallets are Berkeley DB (BDB) or SQLite databases.
5. Any copy or backup of the wallet should be done through a `backupwallet` call in order to update and lock the wallet, preventing any file corruption caused by updates during the copy.
### Berkeley DB database based wallets
Subdirectory | File(s) | Description
-------------|-------------------|-------------
`database/` | BDB logging files | Part of BDB environment; created at start and deleted on shutdown; a user *must keep it as safe* as personal wallet `wallet.dat`
`./` | `db.log` | BDB error file
`./` | `wallet.dat` | Personal wallet (a BDB database) with keys and transactions
`./` | `.walletlock` | BDB wallet lock file
### SQLite database based wallets
Subdirectory | File | Description
@ -123,6 +114,15 @@ Path | Description | Repository notes
`addr.dat` | Peer IP address BDB database; replaced by `peers.dat` in [0.7.0](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.7.0.md) | [PR #1198](https://github.com/bitcoin/bitcoin/pull/1198), [`928d3a01`](https://github.com/bitcoin/bitcoin/commit/928d3a011cc66c7f907c4d053f674ea77dc611cc)
`onion_private_key` | Cached Tor onion service private key for `-listenonion` option. Was used for Tor v2 services; replaced by `onion_v3_private_key` in [0.21.0](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.21.0.md) | [PR #19954](https://github.com/bitcoin/bitcoin/pull/19954)
### Berkeley DB database based wallets
Subdirectory | File(s) | Description
-------------|-------------------|-------------
`database/` | BDB logging files | Part of BDB environment; created at start and deleted on shutdown; a user *must keep it as safe* as personal wallet `wallet.dat`
`./` | `db.log` | BDB error file
`./` | `wallet.dat` | Personal wallet (a BDB database) with keys and transactions
`./` | `.walletlock` | BDB wallet lock file
## Notes
<a name="note1">1</a>. The `/` (slash, U+002F) is used as the platform-independent path component separator in this document.

View File

@ -52,8 +52,6 @@ During the generation of the build system only essential build options are enabl
Run `cmake -B build -LH` to see the full list of available options. GUI tools, such as `ccmake` and `cmake-gui`, can be also helpful.
If you do need the wallet enabled (`-DENABLE_WALLET=ON`), it is common for devs to use your system bdb version for the wallet, so you don't have to find a copy of bdb 4.8. Wallets from such a build will be incompatible with any release binary (and vice versa), so use with caution on mainnet.
### Make use of your threads with `-j`
If you have multiple threads on your machine, you can utilize all of them with:

View File

@ -97,54 +97,3 @@ hardware implementations will typically implement multiple roles simultaneously.
#### Multisig with multiple Bitcoin Core instances
For a quick start see [Basic M-of-N multisig example using descriptor wallets and PSBTs](./descriptors.md#basic-multisig-example).
If you are using legacy wallets feel free to continue with the example provided here.
Alice, Bob, and Carol want to create a 2-of-3 multisig address. They're all using
Bitcoin Core. We assume their wallets only contain the multisig funds. In case
they also have a personal wallet, this can be accomplished through the
multiwallet feature - possibly resulting in a need to add `-rpcwallet=name` to
the command line in case `bitcoin-cli` is used.
Setup:
- All three call `getnewaddress` to create a new address; call these addresses
*Aalice*, *Abob*, and *Acarol*.
- All three call `getaddressinfo "X"`, with *X* their respective address, and
remember the corresponding public keys. Call these public keys *Kalice*,
*Kbob*, and *Kcarol*.
- All three now run `addmultisigaddress 2 ["Kalice","Kbob","Kcarol"]` to teach
their wallet about the multisig script. Call the address produced by this
command *Amulti*. They may be required to explicitly specify the same
addresstype option each, to avoid constructing different versions due to
differences in configuration.
- They also run `importaddress "Amulti" "" false` to make their wallets treat
payments to *Amulti* as contributing to the watch-only balance.
- Others can verify the produced address by running
`createmultisig 2 ["Kalice","Kbob","Kcarol"]`, and expecting *Amulti* as
output. Again, it may be necessary to explicitly specify the addresstype
in order to get a result that matches. This command won't enable them to
initiate transactions later, however.
- They can now give out *Amulti* as address others can pay to.
Later, when *V* BTC has been received on *Amulti*, and Bob and Carol want to
move the coins in their entirety to address *Asend*, with no change. Alice
does not need to be involved.
- One of them - let's assume Carol here - initiates the creation. She runs
`walletcreatefundedpsbt [] {"Asend":V} 0 {"subtractFeeFromOutputs":[0], "includeWatching":true}`.
We call the resulting PSBT *P*. *P* does not contain any signatures.
- Carol needs to sign the transaction herself. In order to do so, she runs
`walletprocesspsbt "P"`, and gives the resulting PSBT *P2* to Bob.
- Bob inspects the PSBT using `decodepsbt "P2"` to determine if the transaction
has indeed just the expected input, and an output to *Asend*, and the fee is
reasonable. If he agrees, he calls `walletprocesspsbt "P2"` to sign. The
resulting PSBT *P3* contains both Carol's and Bob's signature.
- Now anyone can call `finalizepsbt "P3"` to extract a fully signed transaction
*T*.
- Finally anyone can broadcast the transaction using `sendrawtransaction "T"`.
In case there are more signers, it may be advantageous to let them all sign in
parallel, rather than passing the PSBT from one signer to the next one. In the
above example this would translate to Carol handing a copy of *P* to each signer
separately. They can then all invoke `walletprocesspsbt "P"`, and end up with
their individually-signed PSBT structures. They then all send those back to
Carol (or anyone) who can combine them using `combinepsbt`. The last two steps
(`finalizepsbt` and `sendrawtransaction`) remain unchanged.

View File

@ -41,7 +41,6 @@ static void SetupWalletToolArgs(ArgsManager& argsman)
argsman.AddArg("-descriptors", "Create descriptors wallet. Only for 'create'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-legacy", "Create legacy wallet. Only for 'create'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-withinternalbdb", "Use the internal Berkeley DB parser when dumping a Berkeley DB wallet file (default: false)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
argsman.AddCommand("info", "Get wallet info");
argsman.AddCommand("create", "Create new wallet file");

View File

@ -47,13 +47,9 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
"-walletdir=<dir>",
"-walletnotify=<cmd>",
"-walletrbf",
"-dblogsize=<n>",
"-flushwallet",
"-privdb",
"-walletrejectlongchains",
"-walletcrosschain",
"-unsafesqlitesync",
"-swapbdbendian",
});
}

View File

@ -181,7 +181,7 @@ BOOST_AUTO_TEST_CASE(parse_hex)
result = TryParseHex<uint8_t>("12 34 56 78").value();
BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
// Leading space must be supported (used in BerkeleyEnvironment::Salvage)
// Leading space must be supported
expected = {0x89, 0x34, 0x56, 0x78};
result = ParseHex(" 89 34 56 78");
BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());

View File

@ -44,8 +44,3 @@ target_link_libraries(bitcoin_wallet
Boost::headers
$<TARGET_NAME_IF_EXISTS:USDT::headers>
)
if(USE_BDB)
target_sources(bitcoin_wallet PRIVATE bdb.cpp)
target_link_libraries(bitcoin_wallet PUBLIC BerkeleyDB::BerkeleyDB)
endif()

View File

@ -1,972 +0,0 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <compat/compat.h>
#include <logging.h>
#include <util/fs.h>
#include <util/time.h>
#include <wallet/bdb.h>
#include <wallet/db.h>
#include <sync.h>
#include <util/check.h>
#include <util/fs_helpers.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <stdint.h>
#include <db_cxx.h>
#include <sys/stat.h>
// Windows may not define S_IRUSR or S_IWUSR. We define both
// here, with the same values as glibc (see stat.h).
#ifdef WIN32
#ifndef S_IRUSR
#define S_IRUSR 0400
#define S_IWUSR 0200
#endif
#endif
static_assert(BDB_DB_FILE_ID_LEN == DB_FILE_ID_LEN, "DB_FILE_ID_LEN should be 20.");
namespace wallet {
namespace {
//! Make sure database has a unique fileid within the environment. If it
//! doesn't, throw an error. BDB caches do not work properly when more than one
//! open database has the same fileid (values written to one database may show
//! up in reads to other databases).
//!
//! BerkeleyDB generates unique fileids by default
//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
//! so bitcoin should never create different databases with the same fileid, but
//! this error can be triggered if users manually copy database files.
void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filename, Db& db, WalletDatabaseFileId& fileid)
{
if (env.IsMock()) return;
int ret = db.get_mpf()->get_fileid(fileid.value);
if (ret != 0) {
throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (get_fileid failed with %d)", filename, ret));
}
for (const auto& item : env.m_fileids) {
if (fileid == item.second && &fileid != &item.second) {
throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename,
HexStr(item.second.value), item.first));
}
}
}
RecursiveMutex cs_db;
std::map<std::string, std::weak_ptr<BerkeleyEnvironment>> g_dbenvs GUARDED_BY(cs_db); //!< Map from directory name to db environment.
} // namespace
static constexpr auto REVERSE_BYTE_ORDER{std::endian::native == std::endian::little ? 4321 : 1234};
bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
{
return memcmp(value, &rhs.value, sizeof(value)) == 0;
}
/**
* @param[in] env_directory Path to environment directory
* @return A shared pointer to the BerkeleyEnvironment object for the wallet directory, never empty because ~BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map.
* @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map.
*/
std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory, bool use_shared_memory)
{
LOCK(cs_db);
auto inserted = g_dbenvs.emplace(fs::PathToString(env_directory), std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
auto env = std::make_shared<BerkeleyEnvironment>(env_directory, use_shared_memory);
inserted.first->second = env;
return env;
}
return inserted.first->second.lock();
}
//
// BerkeleyBatch
//
void BerkeleyEnvironment::Close()
{
if (!fDbEnvInit)
return;
fDbEnvInit = false;
for (auto& db : m_databases) {
BerkeleyDatabase& database = db.second.get();
assert(database.m_refcount <= 0);
if (database.m_db) {
database.m_db->close(0);
database.m_db.reset();
}
}
FILE* error_file = nullptr;
dbenv->get_errfile(&error_file);
int ret = dbenv->close(0);
if (ret != 0)
LogPrintf("BerkeleyEnvironment::Close: Error %d closing database environment: %s\n", ret, DbEnv::strerror(ret));
if (!fMockDb)
DbEnv(uint32_t{0}).remove(strPath.c_str(), 0);
if (error_file) fclose(error_file);
UnlockDirectory(fs::PathFromString(strPath), ".walletlock");
}
void BerkeleyEnvironment::Reset()
{
dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS));
fDbEnvInit = false;
fMockDb = false;
}
BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path, bool use_shared_memory) : strPath(fs::PathToString(dir_path)), m_use_shared_memory(use_shared_memory)
{
Reset();
}
BerkeleyEnvironment::~BerkeleyEnvironment()
{
LOCK(cs_db);
g_dbenvs.erase(strPath);
Close();
}
bool BerkeleyEnvironment::Open(bilingual_str& err)
{
if (fDbEnvInit) {
return true;
}
fs::path pathIn = fs::PathFromString(strPath);
TryCreateDirectories(pathIn);
if (util::LockDirectory(pathIn, ".walletlock") != util::LockResult::Success) {
LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance may be using it.\n", strPath);
err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
return false;
}
fs::path pathLogDir = pathIn / "database";
TryCreateDirectories(pathLogDir);
fs::path pathErrorFile = pathIn / "db.log";
LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", fs::PathToString(pathLogDir), fs::PathToString(pathErrorFile));
unsigned int nEnvFlags = 0;
if (!m_use_shared_memory) {
nEnvFlags |= DB_PRIVATE;
}
dbenv->set_lg_dir(fs::PathToString(pathLogDir).c_str());
dbenv->set_cachesize(0, 0x100000, 1); // 1 MiB should be enough for just the wallet
dbenv->set_lg_bsize(0x10000);
dbenv->set_lg_max(1048576);
dbenv->set_lk_max_locks(40000);
dbenv->set_lk_max_objects(40000);
dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); /// debug
dbenv->set_flags(DB_AUTO_COMMIT, 1);
dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1);
dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1);
int ret = dbenv->open(strPath.c_str(),
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_RECOVER |
nEnvFlags,
S_IRUSR | S_IWUSR);
if (ret != 0) {
LogPrintf("BerkeleyEnvironment::Open: Error %d opening database environment: %s\n", ret, DbEnv::strerror(ret));
int ret2 = dbenv->close(0);
if (ret2 != 0) {
LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2));
}
Reset();
err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
if (ret == DB_RUNRECOVERY) {
err += Untranslated(" ") + _("This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet");
}
return false;
}
fDbEnvInit = true;
fMockDb = false;
return true;
}
//! Construct an in-memory mock Berkeley environment for testing
BerkeleyEnvironment::BerkeleyEnvironment() : m_use_shared_memory(false)
{
Reset();
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::MakeMock\n");
dbenv->set_cachesize(1, 0, 1);
dbenv->set_lg_bsize(10485760 * 4);
dbenv->set_lg_max(10485760);
dbenv->set_lk_max_locks(10000);
dbenv->set_lk_max_objects(10000);
dbenv->set_flags(DB_AUTO_COMMIT, 1);
dbenv->log_set_config(DB_LOG_IN_MEMORY, 1);
int ret = dbenv->open(nullptr,
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_PRIVATE,
S_IRUSR | S_IWUSR);
if (ret > 0) {
throw std::runtime_error(strprintf("BerkeleyEnvironment::MakeMock: Error %d opening database environment.", ret));
}
fDbEnvInit = true;
fMockDb = true;
}
/** RAII class that automatically cleanses its data on destruction */
class SafeDbt final
{
Dbt m_dbt;
public:
// construct Dbt with internally-managed data
SafeDbt();
// construct Dbt with provided data
SafeDbt(void* data, size_t size);
~SafeDbt();
// delegate to Dbt
const void* get_data() const;
uint32_t get_size() const;
// conversion operator to access the underlying Dbt
operator Dbt*();
};
SafeDbt::SafeDbt()
{
m_dbt.set_flags(DB_DBT_MALLOC);
}
SafeDbt::SafeDbt(void* data, size_t size)
: m_dbt(data, size)
{
}
SafeDbt::~SafeDbt()
{
if (m_dbt.get_data() != nullptr) {
// Clear memory, e.g. in case it was a private key
memory_cleanse(m_dbt.get_data(), m_dbt.get_size());
// under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
// freed by the caller.
// https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
if (m_dbt.get_flags() & DB_DBT_MALLOC) {
free(m_dbt.get_data());
}
}
}
const void* SafeDbt::get_data() const
{
return m_dbt.get_data();
}
uint32_t SafeDbt::get_size() const
{
return m_dbt.get_size();
}
SafeDbt::operator Dbt*()
{
return &m_dbt;
}
static std::span<const std::byte> SpanFromDbt(const SafeDbt& dbt)
{
return {reinterpret_cast<const std::byte*>(dbt.get_data()), dbt.get_size()};
}
BerkeleyDatabase::BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, fs::path filename, const DatabaseOptions& options) :
WalletDatabase(),
env(std::move(env)),
m_byteswap(options.require_format == DatabaseFormat::BERKELEY_SWAP),
m_filename(std::move(filename)),
m_max_log_mb(options.max_log_mb)
{
auto inserted = this->env->m_databases.emplace(m_filename, std::ref(*this));
assert(inserted.second);
}
bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
{
fs::path walletDir = env->Directory();
fs::path file_path = walletDir / m_filename;
LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
LogPrintf("Using wallet %s\n", fs::PathToString(file_path));
if (!env->Open(errorStr)) {
return false;
}
if (fs::exists(file_path))
{
assert(m_refcount == 0);
Db db(env->dbenv.get(), 0);
const std::string strFile = fs::PathToString(m_filename);
int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
if (result != 0) {
errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), fs::quoted(fs::PathToString(file_path)));
return false;
}
}
// also return true if files does not exists
return true;
}
void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile)
{
dbenv->txn_checkpoint(0, 0, 0);
if (fMockDb)
return;
dbenv->lsn_reset(strFile.c_str(), 0);
}
BerkeleyDatabase::~BerkeleyDatabase()
{
if (env) {
LOCK(cs_db);
env->CloseDb(m_filename);
assert(!m_db);
size_t erased = env->m_databases.erase(m_filename);
assert(erased == 1);
env->m_fileids.erase(fs::PathToString(m_filename));
}
}
BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : m_database(database)
{
database.AddRef();
database.Open();
fReadOnly = read_only;
fFlushOnClose = fFlushOnCloseIn;
env = database.env.get();
pdb = database.m_db.get();
strFile = fs::PathToString(database.m_filename);
}
void BerkeleyDatabase::Open()
{
unsigned int nFlags = DB_THREAD | DB_CREATE;
{
LOCK(cs_db);
bilingual_str open_err;
if (!env->Open(open_err))
throw std::runtime_error("BerkeleyDatabase: Failed to open database environment.");
if (m_db == nullptr) {
int ret;
std::unique_ptr<Db> pdb_temp = std::make_unique<Db>(env->dbenv.get(), 0);
const std::string strFile = fs::PathToString(m_filename);
bool fMockDb = env->IsMock();
if (fMockDb) {
DbMpoolFile* mpf = pdb_temp->get_mpf();
ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
if (ret != 0) {
throw std::runtime_error(strprintf("BerkeleyDatabase: Failed to configure for no temp file backing for database %s", strFile));
}
}
if (m_byteswap) {
pdb_temp->set_lorder(REVERSE_BYTE_ORDER);
}
ret = pdb_temp->open(nullptr, // Txn pointer
fMockDb ? nullptr : strFile.c_str(), // Filename
fMockDb ? strFile.c_str() : "main", // Logical db name
DB_BTREE, // Database type
nFlags, // Flags
0);
if (ret != 0) {
throw std::runtime_error(strprintf("BerkeleyDatabase: Error %d, can't open database %s", ret, strFile));
}
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
CheckUniqueFileid(*env, strFile, *pdb_temp, this->env->m_fileids[strFile]);
m_db.reset(pdb_temp.release());
}
}
}
void BerkeleyBatch::Flush()
{
if (activeTxn)
return;
// Flush database activity from memory pool to disk log
unsigned int nMinutes = 0;
if (fReadOnly)
nMinutes = 1;
if (env) { // env is nullptr for dummy databases (i.e. in tests). Don't actually flush if env is nullptr so we don't segfault
env->dbenv->txn_checkpoint(nMinutes ? m_database.m_max_log_mb * 1024 : 0, nMinutes, 0);
}
}
void BerkeleyDatabase::IncrementUpdateCounter()
{
++nUpdateCounter;
}
BerkeleyBatch::~BerkeleyBatch()
{
Close();
m_database.RemoveRef();
}
void BerkeleyBatch::Close()
{
if (!pdb)
return;
if (activeTxn)
activeTxn->abort();
activeTxn = nullptr;
pdb = nullptr;
if (fFlushOnClose)
Flush();
}
void BerkeleyEnvironment::CloseDb(const fs::path& filename)
{
{
LOCK(cs_db);
auto it = m_databases.find(filename);
assert(it != m_databases.end());
BerkeleyDatabase& database = it->second.get();
if (database.m_db) {
// Close the database handle
database.m_db->close(0);
database.m_db.reset();
}
}
}
void BerkeleyEnvironment::ReloadDbEnv()
{
// Make sure that no Db's are in use
AssertLockNotHeld(cs_db);
std::unique_lock<RecursiveMutex> lock(cs_db);
m_db_in_use.wait(lock, [this](){
for (auto& db : m_databases) {
if (db.second.get().m_refcount > 0) return false;
}
return true;
});
std::vector<fs::path> filenames;
filenames.reserve(m_databases.size());
for (const auto& it : m_databases) {
filenames.push_back(it.first);
}
// Close the individual Db's
for (const fs::path& filename : filenames) {
CloseDb(filename);
}
// Reset the environment
Flush(true); // This will flush and close the environment
Reset();
bilingual_str open_err;
Open(open_err);
}
DbTxn* BerkeleyEnvironment::TxnBegin(int flags)
{
DbTxn* ptxn = nullptr;
int ret = dbenv->txn_begin(nullptr, &ptxn, flags);
if (!ptxn || ret != 0)
return nullptr;
return ptxn;
}
bool BerkeleyDatabase::Rewrite(const char* pszSkip)
{
while (true) {
{
LOCK(cs_db);
const std::string strFile = fs::PathToString(m_filename);
if (m_refcount <= 0) {
// Flush log data to the dat file
env->CloseDb(m_filename);
env->CheckpointLSN(strFile);
m_refcount = -1;
bool fSuccess = true;
LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
std::string strFileRes = strFile + ".rewrite";
{ // surround usage of db with extra {}
BerkeleyBatch db(*this, true);
std::unique_ptr<Db> pdbCopy = std::make_unique<Db>(env->dbenv.get(), 0);
if (m_byteswap) {
pdbCopy->set_lorder(REVERSE_BYTE_ORDER);
}
int ret = pdbCopy->open(nullptr, // Txn pointer
strFileRes.c_str(), // Filename
"main", // Logical db name
DB_BTREE, // Database type
DB_CREATE, // Flags
0);
if (ret > 0) {
LogPrintf("BerkeleyBatch::Rewrite: Can't create database file %s\n", strFileRes);
fSuccess = false;
}
std::unique_ptr<DatabaseCursor> cursor = db.GetNewCursor();
if (cursor) {
while (fSuccess) {
DataStream ssKey{};
DataStream ssValue{};
DatabaseCursor::Status ret1 = cursor->Next(ssKey, ssValue);
if (ret1 == DatabaseCursor::Status::DONE) {
break;
} else if (ret1 == DatabaseCursor::Status::FAIL) {
fSuccess = false;
break;
}
if (pszSkip &&
strncmp((const char*)ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0)
continue;
if (strncmp((const char*)ssKey.data(), "\x07version", 8) == 0) {
// Update version:
ssValue.clear();
ssValue << CLIENT_VERSION;
}
Dbt datKey(ssKey.data(), ssKey.size());
Dbt datValue(ssValue.data(), ssValue.size());
int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE);
if (ret2 > 0)
fSuccess = false;
}
cursor.reset();
}
if (fSuccess) {
db.Close();
env->CloseDb(m_filename);
if (pdbCopy->close(0))
fSuccess = false;
} else {
pdbCopy->close(0);
}
}
if (fSuccess) {
Db dbA(env->dbenv.get(), 0);
if (dbA.remove(strFile.c_str(), nullptr, 0))
fSuccess = false;
Db dbB(env->dbenv.get(), 0);
if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0))
fSuccess = false;
}
if (!fSuccess)
LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite database file %s\n", strFileRes);
return fSuccess;
}
}
UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
void BerkeleyEnvironment::Flush(bool fShutdown)
{
const auto start{SteadyClock::now()};
// Flush log data to the actual data file on all files that are not in use
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: [%s] Flush(%s)%s\n", strPath, fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
if (!fDbEnvInit)
return;
{
LOCK(cs_db);
bool no_dbs_accessed = true;
for (auto& db_it : m_databases) {
const fs::path& filename = db_it.first;
int nRefCount = db_it.second.get().m_refcount;
if (nRefCount < 0) continue;
const std::string strFile = fs::PathToString(filename);
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
if (nRefCount == 0) {
// Move log data to the dat file
CloseDb(filename);
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile);
dbenv->txn_checkpoint(0, 0, 0);
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s detach\n", strFile);
if (!fMockDb)
dbenv->lsn_reset(strFile.c_str(), 0);
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile);
nRefCount = -1;
} else {
no_dbs_accessed = false;
}
}
LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
if (fShutdown) {
char** listp;
if (no_dbs_accessed) {
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
fs::remove_all(fs::PathFromString(strPath) / "database");
}
}
}
}
}
bool BerkeleyDatabase::PeriodicFlush()
{
// Don't flush if we can't acquire the lock.
TRY_LOCK(cs_db, lockDb);
if (!lockDb) return false;
// Don't flush if any databases are in use
for (auto& it : env->m_databases) {
if (it.second.get().m_refcount > 0) return false;
}
// Don't flush if there haven't been any batch writes for this database.
if (m_refcount < 0) return false;
const std::string strFile = fs::PathToString(m_filename);
LogDebug(BCLog::WALLETDB, "Flushing %s\n", strFile);
const auto start{SteadyClock::now()};
// Flush wallet file so it's self contained
env->CloseDb(m_filename);
env->CheckpointLSN(strFile);
m_refcount = -1;
LogDebug(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
return true;
}
bool BerkeleyDatabase::Backup(const std::string& strDest) const
{
const std::string strFile = fs::PathToString(m_filename);
while (true)
{
{
LOCK(cs_db);
if (m_refcount <= 0)
{
// Flush log data to the dat file
env->CloseDb(m_filename);
env->CheckpointLSN(strFile);
// Copy wallet file
fs::path pathSrc = env->Directory() / m_filename;
fs::path pathDest(fs::PathFromString(strDest));
if (fs::is_directory(pathDest))
pathDest /= m_filename;
try {
if (fs::exists(pathDest) && fs::equivalent(pathSrc, pathDest)) {
LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(pathDest));
return false;
}
fs::copy_file(pathSrc, pathDest, fs::copy_options::overwrite_existing);
LogPrintf("copied %s to %s\n", strFile, fs::PathToString(pathDest));
return true;
} catch (const fs::filesystem_error& e) {
LogWarning("error copying %s to %s - %s\n", strFile, fs::PathToString(pathDest), e.code().message());
return false;
}
}
}
UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
void BerkeleyDatabase::Flush()
{
env->Flush(false);
}
void BerkeleyDatabase::Close()
{
env->Flush(true);
}
void BerkeleyDatabase::ReloadDbEnv()
{
env->ReloadDbEnv();
}
BerkeleyCursor::BerkeleyCursor(BerkeleyDatabase& database, const BerkeleyBatch& batch, std::span<const std::byte> prefix)
: m_key_prefix(prefix.begin(), prefix.end())
{
if (!database.m_db.get()) {
throw std::runtime_error(STR_INTERNAL_BUG("BerkeleyDatabase does not exist"));
}
// Transaction argument to cursor is only needed when using the cursor to
// write to the database. Read-only cursors do not need a txn pointer.
int ret = database.m_db->cursor(batch.txn(), &m_cursor, 0);
if (ret != 0) {
throw std::runtime_error(STR_INTERNAL_BUG(strprintf("BDB Cursor could not be created. Returned %d", ret)));
}
}
DatabaseCursor::Status BerkeleyCursor::Next(DataStream& ssKey, DataStream& ssValue)
{
if (m_cursor == nullptr) return Status::FAIL;
// Read at cursor
SafeDbt datKey(m_key_prefix.data(), m_key_prefix.size());
SafeDbt datValue;
int ret = -1;
if (m_first && !m_key_prefix.empty()) {
ret = m_cursor->get(datKey, datValue, DB_SET_RANGE);
} else {
ret = m_cursor->get(datKey, datValue, DB_NEXT);
}
m_first = false;
if (ret == DB_NOTFOUND) {
return Status::DONE;
}
if (ret != 0) {
return Status::FAIL;
}
std::span<const std::byte> raw_key = SpanFromDbt(datKey);
if (!m_key_prefix.empty() && std::mismatch(raw_key.begin(), raw_key.end(), m_key_prefix.begin(), m_key_prefix.end()).second != m_key_prefix.end()) {
return Status::DONE;
}
// Convert to streams
ssKey.clear();
ssKey.write(raw_key);
ssValue.clear();
ssValue.write(SpanFromDbt(datValue));
return Status::MORE;
}
BerkeleyCursor::~BerkeleyCursor()
{
if (!m_cursor) return;
m_cursor->close();
m_cursor = nullptr;
}
std::unique_ptr<DatabaseCursor> BerkeleyBatch::GetNewCursor()
{
if (!pdb) return nullptr;
return std::make_unique<BerkeleyCursor>(m_database, *this);
}
std::unique_ptr<DatabaseCursor> BerkeleyBatch::GetNewPrefixCursor(std::span<const std::byte> prefix)
{
if (!pdb) return nullptr;
return std::make_unique<BerkeleyCursor>(m_database, *this, prefix);
}
bool BerkeleyBatch::TxnBegin()
{
if (!pdb || activeTxn)
return false;
DbTxn* ptxn = env->TxnBegin(DB_TXN_WRITE_NOSYNC);
if (!ptxn)
return false;
activeTxn = ptxn;
return true;
}
bool BerkeleyBatch::TxnCommit()
{
if (!pdb || !activeTxn)
return false;
int ret = activeTxn->commit(0);
activeTxn = nullptr;
return (ret == 0);
}
bool BerkeleyBatch::TxnAbort()
{
if (!pdb || !activeTxn)
return false;
int ret = activeTxn->abort();
activeTxn = nullptr;
return (ret == 0);
}
bool BerkeleyDatabaseSanityCheck()
{
int major, minor;
DbEnv::version(&major, &minor, nullptr);
/* If the major version differs, or the minor version of library is *older*
* than the header that was compiled against, flag an error.
*/
if (major != DB_VERSION_MAJOR || minor < DB_VERSION_MINOR) {
LogPrintf("BerkeleyDB database version conflict: header version is %d.%d, library version is %d.%d\n",
DB_VERSION_MAJOR, DB_VERSION_MINOR, major, minor);
return false;
}
return true;
}
std::string BerkeleyDatabaseVersion()
{
return DbEnv::version(nullptr, nullptr, nullptr);
}
bool BerkeleyBatch::ReadKey(DataStream&& key, DataStream& value)
{
if (!pdb)
return false;
SafeDbt datKey(key.data(), key.size());
SafeDbt datValue;
int ret = pdb->get(activeTxn, datKey, datValue, 0);
if (ret == 0 && datValue.get_data() != nullptr) {
value.clear();
value.write(SpanFromDbt(datValue));
return true;
}
return false;
}
bool BerkeleyBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
{
if (!pdb)
return false;
if (fReadOnly)
assert(!"Write called on database in read-only mode");
SafeDbt datKey(key.data(), key.size());
SafeDbt datValue(value.data(), value.size());
int ret = pdb->put(activeTxn, datKey, datValue, (overwrite ? 0 : DB_NOOVERWRITE));
return (ret == 0);
}
bool BerkeleyBatch::EraseKey(DataStream&& key)
{
if (!pdb)
return false;
if (fReadOnly)
assert(!"Erase called on database in read-only mode");
SafeDbt datKey(key.data(), key.size());
int ret = pdb->del(activeTxn, datKey, 0);
return (ret == 0 || ret == DB_NOTFOUND);
}
bool BerkeleyBatch::HasKey(DataStream&& key)
{
if (!pdb)
return false;
SafeDbt datKey(key.data(), key.size());
int ret = pdb->exists(activeTxn, datKey, 0);
return ret == 0;
}
bool BerkeleyBatch::ErasePrefix(std::span<const std::byte> prefix)
{
// Because this function erases records one by one, ensure that it is executed within a txn context.
// Otherwise, consistency is at risk; it's possible that certain records are removed while others
// remain due to an internal failure during the procedure.
// Additionally, the Dbc::del() cursor delete call below would fail without an active transaction.
if (!Assume(activeTxn)) return false;
auto cursor{std::make_unique<BerkeleyCursor>(m_database, *this)};
// const_cast is safe below even though prefix_key is an in/out parameter,
// because we are not using the DB_DBT_USERMEM flag, so BDB will allocate
// and return a different output data pointer
Dbt prefix_key{const_cast<std::byte*>(prefix.data()), static_cast<uint32_t>(prefix.size())}, prefix_value{};
int ret{cursor->dbc()->get(&prefix_key, &prefix_value, DB_SET_RANGE)};
for (int flag{DB_CURRENT}; ret == 0; flag = DB_NEXT) {
SafeDbt key, value;
ret = cursor->dbc()->get(key, value, flag);
if (ret != 0 || key.get_size() < prefix.size() || memcmp(key.get_data(), prefix.data(), prefix.size()) != 0) break;
ret = cursor->dbc()->del(0);
}
cursor.reset();
return ret == 0 || ret == DB_NOTFOUND;
}
void BerkeleyDatabase::AddRef()
{
LOCK(cs_db);
if (m_refcount < 0) {
m_refcount = 1;
} else {
m_refcount++;
}
}
void BerkeleyDatabase::RemoveRef()
{
LOCK(cs_db);
m_refcount--;
if (env) env->m_db_in_use.notify_all();
}
std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(bool flush_on_close)
{
return std::make_unique<BerkeleyBatch>(*this, false, flush_on_close);
}
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
fs::path data_file = BDBDataFile(path);
std::unique_ptr<BerkeleyDatabase> db;
{
LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor
fs::path data_filename = data_file.filename();
std::shared_ptr<BerkeleyEnvironment> env = GetBerkeleyEnv(data_file.parent_path(), options.use_shared_memory);
if (env->m_databases.count(data_filename)) {
error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", fs::PathToString(env->Directory() / data_filename)));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
return nullptr;
}
db = std::make_unique<BerkeleyDatabase>(std::move(env), std::move(data_filename), options);
}
if (options.verify && !db->Verify(error)) {
status = DatabaseStatus::FAILED_VERIFY;
return nullptr;
}
status = DatabaseStatus::SUCCESS;
return db;
}
} // namespace wallet

View File

@ -1,225 +0,0 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_WALLET_BDB_H
#define BITCOIN_WALLET_BDB_H
#include <clientversion.h>
#include <common/system.h>
#include <serialize.h>
#include <streams.h>
#include <util/fs.h>
#include <wallet/db.h>
#include <atomic>
#include <condition_variable>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
struct bilingual_str;
class DbEnv;
class DbTxn;
class Db;
class Dbc;
// This constant was introduced in BDB 4.0.14 and has never changed, but there
// is a belt-and-suspenders check in the cpp file just in case.
#define BDB_DB_FILE_ID_LEN 20 /* Unique file ID length. */
namespace wallet {
struct WalletDatabaseFileId {
uint8_t value[BDB_DB_FILE_ID_LEN];
bool operator==(const WalletDatabaseFileId& rhs) const;
};
class BerkeleyDatabase;
class BerkeleyEnvironment
{
private:
bool fDbEnvInit;
bool fMockDb;
// Don't change into fs::path, as that can result in
// shutdown problems/crashes caused by a static initialized internal pointer.
std::string strPath;
public:
std::unique_ptr<DbEnv> dbenv;
std::map<fs::path, std::reference_wrapper<BerkeleyDatabase>> m_databases;
std::unordered_map<std::string, WalletDatabaseFileId> m_fileids;
std::condition_variable_any m_db_in_use;
bool m_use_shared_memory;
explicit BerkeleyEnvironment(const fs::path& env_directory, bool use_shared_memory);
BerkeleyEnvironment();
~BerkeleyEnvironment();
void Reset();
bool IsMock() const { return fMockDb; }
bool IsInitialized() const { return fDbEnvInit; }
fs::path Directory() const { return fs::PathFromString(strPath); }
bool Open(bilingual_str& error);
void Close();
void Flush(bool fShutdown);
void CheckpointLSN(const std::string& strFile);
void CloseDb(const fs::path& filename);
void ReloadDbEnv();
DbTxn* TxnBegin(int flags);
};
/** Get BerkeleyEnvironment given a directory path. */
std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory, bool use_shared_memory);
class BerkeleyBatch;
/** An instance of this class represents one database.
* For BerkeleyDB this is just a (env, strFile) tuple.
**/
class BerkeleyDatabase : public WalletDatabase
{
public:
BerkeleyDatabase() = delete;
/** Create DB handle to real database */
BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, fs::path filename, const DatabaseOptions& options);
~BerkeleyDatabase() override;
/** Open the database if it is not already opened. */
void Open() override;
/** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero
*/
bool Rewrite(const char* pszSkip=nullptr) override;
/** Indicate that a new database user has begun using the database. */
void AddRef() override;
/** Indicate that database user has stopped using the database and that it could be flushed or closed. */
void RemoveRef() override;
/** Back up the entire database to a file.
*/
bool Backup(const std::string& strDest) const override;
/** Make sure all changes are flushed to database file.
*/
void Flush() override;
/** Flush to the database file and close the database.
* Also close the environment if no other databases are open in it.
*/
void Close() override;
/* flush the wallet passively (TRY_LOCK)
ideal to be called periodically */
bool PeriodicFlush() override;
void IncrementUpdateCounter() override;
void ReloadDbEnv() override;
/** Verifies the environment and database file */
bool Verify(bilingual_str& error);
/** Return path to main database filename */
std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); }
std::string Format() override { return "bdb"; }
/**
* Pointer to shared database environment.
*
* Normally there is only one BerkeleyDatabase object per
* BerkeleyEnvivonment, but in the special, backwards compatible case where
* multiple wallet BDB data files are loaded from the same directory, this
* will point to a shared instance that gets freed when the last data file
* is closed.
*/
std::shared_ptr<BerkeleyEnvironment> env;
/** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
std::unique_ptr<Db> m_db;
// Whether to byteswap
bool m_byteswap;
fs::path m_filename;
int64_t m_max_log_mb;
/** Make a BerkeleyBatch connected to this database */
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override;
};
class BerkeleyCursor : public DatabaseCursor
{
private:
Dbc* m_cursor;
std::vector<std::byte> m_key_prefix;
bool m_first{true};
public:
// Constructor for cursor for records matching the prefix
// To match all records, an empty prefix may be provided.
explicit BerkeleyCursor(BerkeleyDatabase& database, const BerkeleyBatch& batch, std::span<const std::byte> prefix = {});
~BerkeleyCursor() override;
Status Next(DataStream& key, DataStream& value) override;
Dbc* dbc() const { return m_cursor; }
};
/** RAII class that provides access to a Berkeley database */
class BerkeleyBatch : public DatabaseBatch
{
private:
bool ReadKey(DataStream&& key, DataStream& value) override;
bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override;
bool EraseKey(DataStream&& key) override;
bool HasKey(DataStream&& key) override;
bool ErasePrefix(std::span<const std::byte> prefix) override;
protected:
Db* pdb{nullptr};
std::string strFile;
DbTxn* activeTxn{nullptr};
bool fReadOnly;
bool fFlushOnClose;
BerkeleyEnvironment *env;
BerkeleyDatabase& m_database;
public:
explicit BerkeleyBatch(BerkeleyDatabase& database, const bool fReadOnly, bool fFlushOnCloseIn=true);
~BerkeleyBatch() override;
BerkeleyBatch(const BerkeleyBatch&) = delete;
BerkeleyBatch& operator=(const BerkeleyBatch&) = delete;
void Flush() override;
void Close() override;
std::unique_ptr<DatabaseCursor> GetNewCursor() override;
std::unique_ptr<DatabaseCursor> GetNewPrefixCursor(std::span<const std::byte> prefix) override;
bool TxnBegin() override;
bool TxnCommit() override;
bool TxnAbort() override;
bool HasActiveTxn() override { return activeTxn != nullptr; }
DbTxn* txn() const { return activeTxn; }
};
std::string BerkeleyDatabaseVersion();
/** Perform sanity check of runtime BDB version versus linked BDB version.
*/
bool BerkeleyDatabaseSanityCheck();
//! Return object giving access to Berkeley database at specified path.
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
} // namespace wallet
#endif // BITCOIN_WALLET_BDB_H

View File

@ -155,8 +155,6 @@ void ReadDatabaseArgs(const ArgsManager& args, DatabaseOptions& options)
{
// Override current options with args values, if any were specified
options.use_unsafe_sync = args.GetBoolArg("-unsafesqlitesync", options.use_unsafe_sync);
options.use_shared_memory = !args.GetBoolArg("-privdb", !options.use_shared_memory);
options.max_log_mb = args.GetIntArg("-dblogsize", options.max_log_mb);
}
} // namespace wallet

View File

@ -184,7 +184,6 @@ public:
enum class DatabaseFormat {
SQLITE,
BERKELEY_RO,
BERKELEY_SWAP,
};
struct DatabaseOptions {

View File

@ -18,9 +18,6 @@
#include <util/check.h>
#include <util/moneystr.h>
#include <util/translation.h>
#ifdef USE_BDB
#include <wallet/bdb.h>
#endif
#include <wallet/coincontrol.h>
#include <wallet/wallet.h>
#include <walletinitinterface.h>
@ -81,15 +78,6 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const
#endif
argsman.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
#ifdef USE_BDB
argsman.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DatabaseOptions().max_log_mb), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", !DatabaseOptions().use_shared_memory), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-swapbdbendian", "Swaps the internal endianness of BDB wallet databases (default: false)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
#else
argsman.AddHiddenArgs({"-dblogsize", "-flushwallet", "-privdb", "-swapbdbendian"});
#endif
argsman.AddArg("-unsafesqlitesync", "Set SQLite synchronous=OFF to disable waiting for the database to sync to disk. This is unsafe and can cause data loss and corruption. This option is only used by tests to improve their performance (default: false)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);

View File

@ -159,10 +159,6 @@ void StartWallets(WalletContext& context)
pwallet->postInitProcess();
}
// Schedule periodic wallet flushes and tx rebroadcasts
if (context.args->GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) {
context.scheduler->scheduleEvery([&context] { MaybeCompactWalletDB(context); }, 500ms);
}
context.scheduler->scheduleEvery([&context] { MaybeResendWalletTxs(context); }, 1min);
}

View File

@ -50,7 +50,7 @@ static RPCHelpMan getwalletinfo()
{
{RPCResult::Type::STR, "walletname", "the wallet name"},
{RPCResult::Type::NUM, "walletversion", "the wallet version"},
{RPCResult::Type::STR, "format", "the database format (bdb or sqlite)"},
{RPCResult::Type::STR, "format", "the database format (only sqlite)"},
{RPCResult::Type::STR_AMOUNT, "balance", "DEPRECATED. Identical to getbalances().mine.trusted"},
{RPCResult::Type::STR_AMOUNT, "unconfirmed_balance", "DEPRECATED. Identical to getbalances().mine.untrusted_pending"},
{RPCResult::Type::STR_AMOUNT, "immature_balance", "DEPRECATED. Identical to getbalances().mine.immature"},

View File

@ -2,7 +2,6 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@ -10,7 +9,6 @@
#include <util/fs.h>
#include <util/time.h>
#include <util/translation.h>
#include <wallet/bdb.h>
#include <wallet/db.h>
#include <wallet/dump.h>
#include <wallet/migrate.h>
@ -18,13 +16,6 @@
#include <fstream>
#include <iostream>
// There is an inconsistency in BDB on Windows.
// See: https://github.com/bitcoin/bitcoin/pull/26606#issuecomment-2322763212
#undef USE_BDB_NON_MSVC
#if defined(USE_BDB) && !defined(_MSC_VER)
#define USE_BDB_NON_MSVC
#endif
using wallet::DatabaseOptions;
using wallet::DatabaseStatus;
@ -57,17 +48,10 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
}
g_setup->m_args.ForceSetArg("-dumpfile", fs::PathToString(bdb_ro_dumpfile));
#ifdef USE_BDB_NON_MSVC
bool bdb_ro_err = false;
bool bdb_ro_strict_err = false;
#endif
auto db{MakeBerkeleyRODatabase(wallet_path, options, status, error)};
if (db) {
assert(DumpWallet(g_setup->m_args, *db, error));
} else {
#ifdef USE_BDB_NON_MSVC
bdb_ro_err = true;
#endif
if (error.original.starts_with("AutoFile::ignore: end of file") ||
error.original.starts_with("AutoFile::read: end of file") ||
error.original.starts_with("AutoFile::seek: ") ||
@ -97,47 +81,8 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
error.original == "Subdatabase has an unexpected name" ||
error.original == "Unsupported BDB data file version number" ||
error.original == "BDB builtin encryption is not supported") {
#ifdef USE_BDB_NON_MSVC
bdb_ro_strict_err = true;
#endif
} else {
throw std::runtime_error(error.original);
}
}
#ifdef USE_BDB_NON_MSVC
// Try opening with BDB
fs::path bdb_dumpfile{g_setup->m_args.GetDataDirNet() / "fuzzed_dumpfile_bdb.dump"};
if (fs::exists(bdb_dumpfile)) { // Writing into an existing dump file will throw an exception
remove(bdb_dumpfile);
}
g_setup->m_args.ForceSetArg("-dumpfile", fs::PathToString(bdb_dumpfile));
try {
auto db{MakeBerkeleyDatabase(wallet_path, options, status, error)};
if (bdb_ro_err && !db) {
return;
}
assert(db);
if (bdb_ro_strict_err) {
// BerkeleyRO will be stricter than BDB. Ignore when those specific errors are hit.
return;
}
assert(!bdb_ro_err);
assert(DumpWallet(g_setup->m_args, *db, error));
} catch (const std::runtime_error& e) {
if (bdb_ro_err) return;
throw e;
}
// Make sure the dumpfiles match
if (fs::exists(bdb_ro_dumpfile) && fs::exists(bdb_dumpfile)) {
std::ifstream bdb_ro_dump(bdb_ro_dumpfile, std::ios_base::binary | std::ios_base::in);
std::ifstream bdb_dump(bdb_dumpfile, std::ios_base::binary | std::ios_base::in);
assert(std::equal(
std::istreambuf_iterator<char>(bdb_ro_dump.rdbuf()),
std::istreambuf_iterator<char>(),
std::istreambuf_iterator<char>(bdb_dump.rdbuf())));
}
#endif
}

View File

@ -18,9 +18,6 @@
#include <util/fs.h>
#include <util/time.h>
#include <util/translation.h>
#ifdef USE_BDB
#include <wallet/bdb.h>
#endif
#include <wallet/migrate.h>
#include <wallet/sqlite.h>
#include <wallet/wallet.h>

View File

@ -32,15 +32,8 @@ struct WalletContext;
*
* - WalletBatch is an abstract modifier object for the wallet database, and encapsulates a database
* batch update as well as methods to act on the database. It should be agnostic to the database implementation.
*
* The following classes are implementation specific:
* - BerkeleyEnvironment is an environment in which the database exists.
* - BerkeleyDatabase represents a wallet database.
* - BerkeleyBatch is a low-level database batch update.
*/
static const bool DEFAULT_FLUSHWALLET = true;
/** Error statuses for the wallet database.
* Values are in order of severity. When multiple errors occur, the most severe (highest value) will be returned.
*/

View File

@ -168,7 +168,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
options.require_existing = true;
DatabaseStatus status;
if (args.GetBoolArg("-withinternalbdb", false) && IsBDBFile(BDBDataFile(path))) {
if (IsBDBFile(BDBDataFile(path))) {
options.require_format = DatabaseFormat::BERKELEY_RO;
}

View File

@ -16,7 +16,6 @@ function(create_test_config)
endmacro()
set_configure_variable(ENABLE_WALLET ENABLE_WALLET)
set_configure_variable(WITH_BDB USE_BDB)
set_configure_variable(BUILD_CLI BUILD_BITCOIN_CLI)
set_configure_variable(BUILD_UTIL BUILD_BITCOIN_UTIL)
set_configure_variable(BUILD_UTIL_CHAINSTATE BUILD_BITCOIN_CHAINSTATE)

View File

@ -16,7 +16,6 @@ RPCAUTH=@abs_top_srcdir@/share/rpcauth/rpcauth.py
[components]
# Which components are enabled. These are commented out by cmake if they were disabled during configuration.
@ENABLE_WALLET_TRUE@ENABLE_WALLET=true
@USE_BDB_TRUE@USE_BDB=true
@BUILD_BITCOIN_CLI_TRUE@ENABLE_CLI=true
@BUILD_BITCOIN_UTIL_TRUE@ENABLE_BITCOIN_UTIL=true
@BUILD_BITCOIN_CHAINSTATE_TRUE@ENABLE_BITCOIN_CHAINSTATE=true

View File

@ -1,210 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Utilities for working directly with the wallet's BDB database file
This is specific to the configuration of BDB used in this project:
- Outer database contains single subdatabase named 'main'
- btree
- btree internal, leaf and overflow pages
Each key-value pair is two entries in a btree leaf, which optionally refers to overflow pages
if the data doesn't fit into a single page. The first entry is the key, the one that follows
is the value. And so on. Note that the entry data is itself not in the correct order. Instead
entry offsets are stored in the correct order and those offsets are needed to then retrieve
the data itself. Note that this implementation currently only supports reading databases that
are in the same endianness as the host.
Page format can be found in BDB source code dbinc/db_page.h
`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file
"""
import struct
# Important constants
PAGE_HEADER_SIZE = 26
OUTER_META_PAGE = 0
# Page type values
BTREE_INTERNAL = 3
BTREE_LEAF = 5
OVERFLOW_DATA = 7
BTREE_META = 9
# Record type values
RECORD_KEYDATA = 1
RECORD_OVERFLOW_DATA = 3
# Some magic numbers for sanity checking
BTREE_MAGIC = 0x053162
DB_VERSION = 9
SUBDATABASE_NAME = b'main'
# Deserializes an internal, leaf or overflow page into a dict.
# In addition to the common page header fields, the result contains an 'entries'
# array of dicts with the following fields, depending on the page type:
# internal page [BTREE_INTERNAL]:
# - 'page_num': referenced page number (used to find further pages to process)
# leaf page [BTREE_LEAF]:
# - 'record_type': record type, must be RECORD_KEYDATA or RECORD_OVERFLOW_DATA
# - 'data': binary data (key or value payload), if record type is RECORD_KEYDATA
# - 'page_num': referenced overflow page number, if record type is RECORD_OVERFLOW_DATA
# overflow page [OVERFLOW_DATA]:
# - 'data': binary data (part of key or value payload)
def dump_page(data):
page_info = {}
page_header = data[0:26]
_, pgno, prev_pgno, next_pgno, entries, hf_offset, level, pg_type = struct.unpack('QIIIHHBB', page_header)
page_info['pgno'] = pgno
page_info['prev_pgno'] = prev_pgno
page_info['next_pgno'] = next_pgno
page_info['hf_offset'] = hf_offset
page_info['level'] = level
page_info['pg_type'] = pg_type
page_info['entry_offsets'] = struct.unpack('{}H'.format(entries), data[26:26 + entries * 2])
page_info['entries'] = []
assert pg_type in (BTREE_INTERNAL, BTREE_LEAF, OVERFLOW_DATA)
if pg_type == OVERFLOW_DATA:
assert entries == 1
page_info['entries'].append({'data': data[26:26 + hf_offset]})
return page_info
for i in range(0, entries):
entry = {}
offset = page_info['entry_offsets'][i]
record_header = data[offset:offset + 3]
offset += 3
e_len, record_type = struct.unpack('HB', record_header)
if pg_type == BTREE_INTERNAL:
assert record_type == RECORD_KEYDATA
internal_record_data = data[offset:offset + 9]
_, page_num, _ = struct.unpack('=BII', internal_record_data)
entry['page_num'] = page_num
elif pg_type == BTREE_LEAF:
assert record_type in (RECORD_KEYDATA, RECORD_OVERFLOW_DATA)
entry['record_type'] = record_type
if record_type == RECORD_KEYDATA:
entry['data'] = data[offset:offset + e_len]
elif record_type == RECORD_OVERFLOW_DATA:
overflow_record_data = data[offset:offset + 9]
_, page_num, _ = struct.unpack('=BII', overflow_record_data)
entry['page_num'] = page_num
page_info['entries'].append(entry)
return page_info
# Deserializes a btree metadata page into a dict.
# Does a simple sanity check on the magic value, type, and version
def dump_meta_page(page):
# metadata page
# general metadata
metadata = {}
meta_page = page[0:72]
_, pgno, magic, version, pagesize, encrypt_alg, pg_type, metaflags, _, free, last_pgno, nparts, key_count, record_count, flags, uid = struct.unpack('QIIIIBBBBIIIIII20s', meta_page)
metadata['pgno'] = pgno
metadata['magic'] = magic
metadata['version'] = version
metadata['pagesize'] = pagesize
metadata['encrypt_alg'] = encrypt_alg
metadata['pg_type'] = pg_type
metadata['metaflags'] = metaflags
metadata['free'] = free
metadata['last_pgno'] = last_pgno
metadata['nparts'] = nparts
metadata['key_count'] = key_count
metadata['record_count'] = record_count
metadata['flags'] = flags
metadata['uid'] = uid.hex().encode()
assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic'
assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page'
assert version == DB_VERSION, 'Database too new'
# btree metadata
btree_meta_page = page[72:512]
_, minkey, re_len, re_pad, root, _, crypto_magic, _, iv, chksum = struct.unpack('IIIII368sI12s16s20s', btree_meta_page)
metadata['minkey'] = minkey
metadata['re_len'] = re_len
metadata['re_pad'] = re_pad
metadata['root'] = root
metadata['crypto_magic'] = crypto_magic
metadata['iv'] = iv.hex().encode()
metadata['chksum'] = chksum.hex().encode()
return metadata
# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict
def extract_kv_pairs(page_data, pages):
out = {}
last_key = None
for i, entry in enumerate(page_data['entries']):
data = b''
if entry['record_type'] == RECORD_KEYDATA:
data = entry['data']
elif entry['record_type'] == RECORD_OVERFLOW_DATA:
next_page = entry['page_num']
while next_page != 0:
opage = pages[next_page]
opage_info = dump_page(opage)
data += opage_info['entries'][0]['data']
next_page = opage_info['next_pgno']
# By virtue of these all being pairs, even number entries are keys, and odd are values
if i % 2 == 0:
out[entry['data']] = b''
last_key = data
else:
out[last_key] = data
return out
# Extract the key-value pairs of the BDB file given in filename
def dump_bdb_kv(filename):
# Read in the BDB file and start deserializing it
pages = []
with open(filename, 'rb') as f:
# Determine pagesize first
data = f.read(PAGE_HEADER_SIZE)
pagesize = struct.unpack('I', data[20:24])[0]
assert pagesize in (512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
# Read rest of first page
data += f.read(pagesize - PAGE_HEADER_SIZE)
assert len(data) == pagesize
# Read all remaining pages
while len(data) > 0:
pages.append(data)
data = f.read(pagesize)
# Sanity check the meta pages, read root page
outer_meta_info = dump_meta_page(pages[OUTER_META_PAGE])
root_page_info = dump_page(pages[outer_meta_info['root']])
assert root_page_info['pg_type'] == BTREE_LEAF
assert len(root_page_info['entries']) == 2
assert root_page_info['entries'][0]['data'] == SUBDATABASE_NAME
assert len(root_page_info['entries'][1]['data']) == 4
inner_meta_page = int.from_bytes(root_page_info['entries'][1]['data'], 'big')
inner_meta_info = dump_meta_page(pages[inner_meta_page])
# Fetch the kv pairs from the pages
kv = {}
pages_to_process = [inner_meta_info['root']]
while len(pages_to_process) > 0:
curr_page_no = pages_to_process.pop()
assert curr_page_no <= outer_meta_info['last_pgno']
info = dump_page(pages[curr_page_no])
assert info['pg_type'] in (BTREE_INTERNAL, BTREE_LEAF)
if info['pg_type'] == BTREE_INTERNAL:
for entry in info['entries']:
pages_to_process.append(entry['page_num'])
elif info['pg_type'] == BTREE_LEAF:
info_kv = extract_kv_pairs(info, pages)
kv = {**kv, **info_kv}
return kv

View File

@ -982,11 +982,6 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_bdb(self):
"""Skip the running test if BDB has not been compiled."""
if not self.is_bdb_compiled():
raise SkipTest("BDB has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
@ -1057,9 +1052,5 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Checks whether the USDT tracepoints were compiled."""
return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS")
def is_bdb_compiled(self):
"""Checks whether the wallet module was compiled with BDB support."""
return self.config["components"].getboolean("USE_BDB")
def has_blockfile(self, node, filenum: str):
return (node.blocks_path/ f"blk{filenum}.dat").is_file()

View File

@ -15,7 +15,6 @@ from test_framework.address import (
script_to_p2sh,
script_to_p2wsh,
)
from test_framework.bdb import BTREE_MAGIC
from test_framework.descriptors import descsum_create
from test_framework.key import ECPubKey
from test_framework.test_framework import BitcoinTestFramework
@ -33,6 +32,8 @@ from test_framework.wallet_util import (
generate_keypair,
)
BTREE_MAGIC = 0x053162
class WalletMigrationTest(BitcoinTestFramework):
def set_test_params(self):

View File

@ -45,7 +45,6 @@ KNOWN_VIOLATIONS = [
"src/dbwrapper.cpp:.*vsnprintf",
"src/test/fuzz/locale.cpp:.*setlocale",
"src/test/util_tests.cpp:.*strtoll",
"src/wallet/bdb.cpp:.*DbEnv::strerror", # False positive
"src/util/syserror.cpp:.*strerror", # Outside this function use `SysErrorString`
]

View File

@ -7,7 +7,6 @@
race:LoadWallet
race:WalletBatch::WriteHDChain
race:BerkeleyBatch
race:BerkeleyDatabase
race:DatabaseBatch
race:zmq::*
race:bitcoin-qt
@ -25,8 +24,6 @@ race:src/qt/test/*
deadlock:src/qt/test/*
# External libraries
# https://github.com/bitcoin/bitcoin/pull/27658#issuecomment-1547639621
deadlock:libdb
race:libzmq
# Intermittent issues