diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 39ffff97d2..da4f99fb99 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -586,7 +586,7 @@ private: * @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact. * Set to false if the tx has already been rejected before, * e.g. is an orphan, to avoid adding duplicate entries. - * Updates m_txrequest, m_recent_rejects, m_orphanage, and vExtraTxnForCompact. */ + * Updates m_txrequest, m_recent_rejects, m_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */ void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, bool maybe_add_extra_compact_tx) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main); @@ -596,6 +596,45 @@ private: void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list& replaced_transactions) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main); + /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for + * individual transactions, and caches rejection for the package as a group. + * @param[in] senders Must contain the nodeids of the peers that provided each transaction + * in package, in the same order. + * */ + void ProcessPackageResult(const Package& package, const PackageMempoolAcceptResult& package_result, const std::vector& senders) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main); + + /** A package to validate */ + struct PackageToValidate { + const Package m_txns; + const std::vector m_senders; + /** Construct a 1-parent-1-child package. */ + explicit PackageToValidate(const CTransactionRef& parent, + const CTransactionRef& child, + NodeId parent_sender, + NodeId child_sender) : + m_txns{parent, child}, + m_senders {parent_sender, child_sender} + {} + + std::string ToString() const { + Assume(m_txns.size() == 2); + return strprintf("parent %s (wtxid=%s, sender=%d) + child %s (wtxid=%s, sender=%d)", + m_txns.front()->GetHash().ToString(), + m_txns.front()->GetWitnessHash().ToString(), + m_senders.front(), + m_txns.back()->GetHash().ToString(), + m_txns.back()->GetWitnessHash().ToString(), + m_senders.back()); + } + }; + + /** Look for a child of this transaction in the orphanage to form a 1-parent-1-child package, + * skipping any combinations that have already been tried. Return the resulting package along with + * the senders of its respective transactions, or std::nullopt if no package is found. */ + std::optional Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main); + /** * Reconsider orphan transactions after a parent has been accepted to the mempool. * @@ -806,7 +845,16 @@ private: /** Stalling timeout for blocks in IBD */ std::atomic m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; - bool AlreadyHaveTx(const GenTxid& gtxid) + /** Check whether we already have this gtxid in: + * - mempool + * - orphanage + * - m_recent_rejects + * - m_recent_rejects_reconsiderable (if include_reconsiderable = true) + * - m_recent_confirmed_transactions + * Also responsible for resetting m_recent_rejects and m_recent_rejects_reconsiderable if the + * chain tip has changed. + * */ + bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex); /** @@ -844,8 +892,32 @@ private: * Memory used: 1.3 MB */ CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001}; + /** Block hash of chain tip the last time we reset m_recent_rejects and + * m_recent_rejects_reconsiderable. */ uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); + /** + * Filter for: + * (1) wtxids of transactions that were recently rejected by the mempool but are + * eligible for reconsideration if submitted with other transactions. + * (2) packages (see GetPackageHash) we have already rejected before and should not retry. + * + * Similar to m_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers + * have larger mempools and thus lower minimum feerates than us. + * + * When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by + * itself), add its wtxid to this filter. When a package fails for any reason, add the combined + * hash to this filter. + * + * Upon receiving an announcement for a transaction, if it exists in this filter, do not + * download the txdata. When considering packages, if it exists in this filter, drop it. + * + * Reset this filter when the chain tip changes. + * + * Parameters are picked to be the same as m_recent_rejects, with the same rationale. + */ + CRollingBloomFilter m_recent_rejects_reconsiderable GUARDED_BY(::cs_main){120'000, 0.000'001}; + /* * Filter for transactions that have been recently confirmed. * We use this to avoid requesting transactions that have already been @@ -2194,7 +2266,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta // -bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) +bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) { if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions @@ -2203,12 +2275,15 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) // txs a second chance. hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash(); m_recent_rejects.reset(); + m_recent_rejects_reconsiderable.reset(); } const uint256& hash = gtxid.GetHash(); if (m_orphanage.HaveTx(gtxid)) return true; + if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true; + { LOCK(m_recent_confirmed_transactions_mutex); if (m_recent_confirmed_transactions.contains(hash)) return true; @@ -3097,7 +3172,14 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256()); + if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { + // If the result is TX_RECONSIDERABLE, add it to m_recent_rejects_reconsiderable + // because we should not download or submit this transaction by itself again, but may + // submit it as part of a package later. + m_recent_rejects_reconsiderable.insert(ptx->GetWitnessHash().ToUint256()); + } else { + m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256()); + } m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy @@ -3107,6 +3189,8 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // processing of this transaction in the event that child // transactions are later received (resulting in // parent-fetching by txid via the orphan-handling logic). + // We only add the txid if it differs from the wtxid, to avoid wasting entries in the + // rolling bloom filter. if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) { m_recent_rejects.insert(ptx->GetHash().ToUint256()); m_txrequest.ForgetTxHash(ptx->GetHash()); @@ -3153,6 +3237,117 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c } } +void PeerManagerImpl::ProcessPackageResult(const Package& package, const PackageMempoolAcceptResult& package_result, const std::vector& senders) +{ + AssertLockNotHeld(m_peer_mutex); + AssertLockHeld(g_msgproc_mutex); + AssertLockHeld(cs_main); + + if (package_result.m_state.IsInvalid()) { + m_recent_rejects_reconsiderable.insert(GetPackageHash(package)); + } + // We currently only expect to process 1-parent-1-child packages. Remove if this changes. + if (!Assume(package.size() == 2)) return; + + // No package results to look through for PCKG_POLICY or PCKG_MEMPOOL_ERROR + if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY || + package_result.m_state.GetResult() == PackageValidationResult::PCKG_MEMPOOL_ERROR) return; + + // Iterate backwards to erase in-package descendants from the orphanage before they become + // relevant in AddChildrenToWorkSet. + auto package_iter = package.rbegin(); + auto senders_iter = senders.rbegin(); + while (package_iter != package.rend()) { + const auto& tx = *package_iter; + const NodeId nodeid = *senders_iter; + const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())}; + if (Assume(it_result != package_result.m_tx_results.end())) { + const auto& tx_result = it_result->second; + switch (tx_result.m_result_type) { + case MempoolAcceptResult::ResultType::VALID: + { + Assume(tx_result.m_replaced_transactions.has_value()); + std::list empty_replacement_list; + ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions.value_or(empty_replacement_list)); + break; + } + case MempoolAcceptResult::ResultType::INVALID: + case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS: + { + // Don't add to vExtraTxnForCompact, as these transactions should have already been + // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. + // This should be updated if package submission is ever used for transactions + // that haven't already been validated before. + ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*maybe_add_extra_compact_tx=*/false); + break; + } + case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: + { + // AlreadyHaveTx() should be catching transactions that are already in mempool. + Assume(false); + break; + } + } + } + package_iter++; + senders_iter++; + } +} + +std::optional PeerManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) +{ + AssertLockNotHeld(m_peer_mutex); + AssertLockHeld(g_msgproc_mutex); + AssertLockHeld(cs_main); + + const auto& parent_wtxid{ptx->GetWitnessHash()}; + + Assume(m_recent_rejects_reconsiderable.contains(parent_wtxid.ToUint256())); + + // Prefer children from this peer. This helps prevent censorship attempts in which an attacker + // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake + // children instead of the real one provided by the honest peer. + const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)}; + + // These children should be sorted from newest to oldest. In the (probably uncommon) case + // of children that replace each other, this helps us accept the highest feerate (probably the + // most recent) one efficiently. + for (const auto& child : cpfp_candidates_same_peer) { + Package maybe_cpfp_package{ptx, child}; + if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) { + return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid}; + } + } + + // If no suitable candidate from the same peer is found, also try children that were provided by + // a different peer. This is useful because sometimes multiple peers announce both transactions + // to us, and we happen to download them from different peers (we wouldn't have known that these + // 2 transactions are related). We still want to find 1p1c packages then. + // + // If we start tracking all announcers of orphans, we can restrict this logic to parent + child + // pairs in which both were provided by the same peer, i.e. delete this step. + const auto cpfp_candidates_different_peer{m_orphanage.GetChildrenFromDifferentPeer(ptx, nodeid)}; + + // Find the first 1p1c that hasn't already been rejected. We randomize the order to not + // create a bias that attackers can use to delay package acceptance. + // + // Create a random permutation of the indices. + std::vector tx_indices(cpfp_candidates_different_peer.size()); + std::iota(tx_indices.begin(), tx_indices.end(), 0); + Shuffle(tx_indices.begin(), tx_indices.end(), m_rng); + + for (const auto index : tx_indices) { + // If we already tried a package and failed for any reason, the combined hash was + // cached in m_recent_rejects_reconsiderable. + const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index); + Package maybe_cpfp_package{ptx, child_tx}; + if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) { + return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender}; + } + } + return std::nullopt; +} + bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) { AssertLockHeld(g_msgproc_mutex); @@ -4013,7 +4208,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } const GenTxid gtxid = ToGenTxid(inv); - const bool fAlreadyHave = AlreadyHaveTx(gtxid); + const bool fAlreadyHave = AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); AddKnownTx(*peer, inv.hash); @@ -4318,7 +4513,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // already; and an adversary can already relay us old transactions // (older than our recency filter) if trying to DoS us, without any need // for witness malleation. - if (AlreadyHaveTx(GenTxid::Wtxid(wtxid))) { + if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/true)) { if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { // Always relay transactions received from peers with forcerelay // permission, even if they were already in the mempool, allowing @@ -4332,6 +4527,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); } } + + if (m_recent_rejects_reconsiderable.contains(wtxid)) { + // When a transaction is already in m_recent_rejects_reconsiderable, we shouldn't submit + // it by itself again. However, look for a matching child in the orphanage, as it is + // possible that they succeed as a package. + LogPrint(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n", + txid.ToString(), wtxid.ToString()); + if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) { + const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; + LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), + package_result.m_state.IsValid() ? "package accepted" : "package rejected"); + ProcessPackageResult(package_to_validate->m_txns, package_result, package_to_validate->m_senders); + } + } // If a tx is detected by m_recent_rejects it is ignored. Because we haven't // submitted the tx to our mempool, we won't have computed a DoS // score for it or determined exactly why we consider it invalid. @@ -4354,7 +4563,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const TxValidationState& state = result.m_state; if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { - ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions.value()); + Assume(result.m_replaced_transactions.has_value()); + std::list empty_replacement_list; + ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions.value_or(empty_replacement_list)); pfrom.m_last_tx_time = GetTime(); } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) @@ -4371,10 +4582,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } std::sort(unique_parents.begin(), unique_parents.end()); unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); + + // Distinguish between parents in m_recent_rejects and m_recent_rejects_reconsiderable. + // We can tolerate having up to 1 parent in m_recent_rejects_reconsiderable since we + // submit 1p1c packages. However, fail immediately if any are in m_recent_rejects. + std::optional rejected_parent_reconsiderable; for (const uint256& parent_txid : unique_parents) { if (m_recent_rejects.contains(parent_txid)) { fRejectedParents = true; break; + } else if (m_recent_rejects_reconsiderable.contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { + // More than 1 parent in m_recent_rejects_reconsiderable: 1p1c will not be + // sufficient to accept this package, so just give up here. + if (rejected_parent_reconsiderable.has_value()) { + fRejectedParents = true; + break; + } + rejected_parent_reconsiderable = parent_txid; } } if (!fRejectedParents) { @@ -4388,7 +4612,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // protocol for getting all unconfirmed parents. const auto gtxid{GenTxid::Txid(parent_txid)}; AddKnownTx(*peer, parent_txid); - if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time); + // Exclude m_recent_rejects_reconsiderable: the missing parent may have been + // previously rejected for being too low feerate. This orphan might CPFP it. + if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time); } if (m_orphanage.AddTx(ptx, pfrom.GetId())) { @@ -4420,6 +4646,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (state.IsInvalid()) { ProcessInvalidTx(pfrom.GetId(), ptx, state, /*maybe_add_extra_compact_tx=*/true); } + // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the + // orphanage, as it is possible that they succeed as a package. + if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { + LogPrint(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n", + txid.ToString(), wtxid.ToString()); + if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) { + const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; + LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), + package_result.m_state.IsValid() ? "package accepted" : "package rejected"); + ProcessPackageResult(package_to_validate->m_txns, package_result, package_to_validate->m_senders); + } + } + return; } @@ -6029,7 +6268,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) entry.second.GetHash().ToString(), entry.first); } for (const GenTxid& gtxid : requestable) { - if (!AlreadyHaveTx(gtxid)) { + // Exclude m_recent_rejects_reconsiderable: we may be requesting a missing parent + // that was previously rejected for being too low feerate. + if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", gtxid.GetHash().ToString(), pto->GetId()); vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index 3a63a9fe46..99d2a6d514 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -147,3 +147,21 @@ bool IsChildWithParentsTree(const Package& package) return true; }); } + +uint256 GetPackageHash(const std::vector& transactions) +{ + // Create a vector of the wtxids. + std::vector wtxids_copy; + std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(wtxids_copy), + [](const auto& tx){ return tx->GetWitnessHash(); }); + + // Sort in ascending order + std::sort(wtxids_copy.begin(), wtxids_copy.end(), [](const auto& lhs, const auto& rhs) { return lhs.GetHex() < rhs.GetHex(); }); + + // Get sha256 hash of the wtxids concatenated in this order + HashWriter hashwriter; + for (const auto& wtxid : wtxids_copy) { + hashwriter << wtxid; + } + return hashwriter.GetSHA256(); +} diff --git a/src/policy/packages.h b/src/policy/packages.h index 537d8476e2..3050320122 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -88,4 +88,9 @@ bool IsChildWithParents(const Package& package); * other (the package is a "tree"). */ bool IsChildWithParentsTree(const Package& package); + +/** Get the hash of these transactions' wtxids, concatenated in lexicographical order (treating the + * wtxids as little endian encoded uint256, smallest to largest). */ +uint256 GetPackageHash(const std::vector& transactions); + #endif // BITCOIN_POLICY_PACKAGES_H diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index 5423ba8920..a44f47b00d 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -45,6 +45,8 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) // if true, allow duplicate input when constructing tx const bool duplicate_input = fuzzed_data_provider.ConsumeBool(); + CTransactionRef ptx_potential_parent = nullptr; + LIMITED_WHILE(outpoints.size() < 200'000 && fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS) { // construct transaction @@ -78,6 +80,27 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) return new_tx; }(); + // Trigger orphanage functions that are called using parents. ptx_potential_parent is a tx we constructed in a + // previous loop and potentially the parent of this tx. + if (ptx_potential_parent) { + // Set up future GetTxToReconsider call. + orphanage.AddChildrenToWorkSet(*ptx_potential_parent); + + // Check that all txns returned from GetChildrenFrom* are indeed a direct child of this tx. + NodeId peer_id = fuzzed_data_provider.ConsumeIntegral(); + for (const auto& child : orphanage.GetChildrenFromSamePeer(ptx_potential_parent, peer_id)) { + assert(std::any_of(child->vin.cbegin(), child->vin.cend(), [&](const auto& input) { + return input.prevout.hash == ptx_potential_parent->GetHash(); + })); + } + for (const auto& [child, peer] : orphanage.GetChildrenFromDifferentPeer(ptx_potential_parent, peer_id)) { + assert(std::any_of(child->vin.cbegin(), child->vin.cend(), [&](const auto& input) { + return input.prevout.hash == ptx_potential_parent->GetHash(); + })); + assert(peer != peer_id); + } + } + // trigger orphanage functions LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS) { @@ -85,9 +108,6 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) CallOneOf( fuzzed_data_provider, - [&] { - orphanage.AddChildrenToWorkSet(*tx); - }, [&] { { CTransactionRef ref = orphanage.GetTxToReconsider(peer_id); @@ -136,6 +156,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) orphanage.LimitOrphans(limit, limit_orphans_rng); Assert(orphanage.Size() <= limit); }); + } + // Set tx as potential parent to be used for future GetChildren() calls. + if (!ptx_potential_parent || fuzzed_data_provider.ConsumeBool()) { + ptx_potential_parent = tx; + } + } } diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index 4231fcc909..b2643cf678 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -38,14 +38,56 @@ public: } }; -static void MakeNewKeyWithFastRandomContext(CKey& key) +static void MakeNewKeyWithFastRandomContext(CKey& key, FastRandomContext& rand_ctx = g_insecure_rand_ctx) { std::vector keydata; - keydata = g_insecure_rand_ctx.randbytes(32); + keydata = rand_ctx.randbytes(32); key.Set(keydata.data(), keydata.data() + keydata.size(), /*fCompressedIn=*/true); assert(key.IsValid()); } +// Creates a transaction with 2 outputs. Spends all outpoints. If outpoints is empty, spends a random one. +static CTransactionRef MakeTransactionSpending(const std::vector& outpoints, FastRandomContext& det_rand) +{ + CKey key; + MakeNewKeyWithFastRandomContext(key, det_rand); + CMutableTransaction tx; + // If no outpoints are given, create a random one. + if (outpoints.empty()) { + tx.vin.emplace_back(Txid::FromUint256(det_rand.rand256()), 0); + } else { + for (const auto& outpoint : outpoints) { + tx.vin.emplace_back(outpoint); + } + } + // Ensure txid != wtxid + tx.vin[0].scriptWitness.stack.push_back({1}); + tx.vout.resize(2); + tx.vout[0].nValue = CENT; + tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey())); + tx.vout[1].nValue = 3 * CENT; + tx.vout[1].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(key.GetPubKey())); + return MakeTransactionRef(tx); +} + +static bool EqualTxns(const std::set& set_txns, const std::vector& vec_txns) +{ + if (vec_txns.size() != set_txns.size()) return false; + for (const auto& tx : vec_txns) { + if (!set_txns.contains(tx)) return false; + } + return true; +} +static bool EqualTxns(const std::set& set_txns, + const std::vector>& vec_txns) +{ + if (vec_txns.size() != set_txns.size()) return false; + for (const auto& [tx, nodeid] : vec_txns) { + if (!set_txns.contains(tx)) return false; + } + return true; +} + BOOST_AUTO_TEST_CASE(DoS_mapOrphans) { // This test had non-deterministic coverage due to @@ -138,4 +180,105 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) BOOST_CHECK(orphanage.CountOrphans() == 0); } +BOOST_AUTO_TEST_CASE(get_children) +{ + FastRandomContext det_rand{true}; + std::vector empty_outpoints; + + auto parent1 = MakeTransactionSpending(empty_outpoints, det_rand); + auto parent2 = MakeTransactionSpending(empty_outpoints, det_rand); + + // Make sure these parents have different txids otherwise this test won't make sense. + while (parent1->GetHash() == parent2->GetHash()) { + parent2 = MakeTransactionSpending(empty_outpoints, det_rand); + } + + // Create children to go into orphanage. + auto child_p1n0 = MakeTransactionSpending({{parent1->GetHash(), 0}}, det_rand); + auto child_p2n1 = MakeTransactionSpending({{parent2->GetHash(), 1}}, det_rand); + // Spends the same tx twice. Should not cause duplicates. + auto child_p1n0_p1n1 = MakeTransactionSpending({{parent1->GetHash(), 0}, {parent1->GetHash(), 1}}, det_rand); + // Spends the same outpoint as previous tx. Should still be returned; don't assume outpoints are unique. + auto child_p1n0_p2n0 = MakeTransactionSpending({{parent1->GetHash(), 0}, {parent2->GetHash(), 0}}, det_rand); + + const NodeId node1{1}; + const NodeId node2{2}; + + // All orphans provided by node1 + { + TxOrphanage orphanage; + BOOST_CHECK(orphanage.AddTx(child_p1n0, node1)); + BOOST_CHECK(orphanage.AddTx(child_p2n1, node1)); + BOOST_CHECK(orphanage.AddTx(child_p1n0_p1n1, node1)); + BOOST_CHECK(orphanage.AddTx(child_p1n0_p2n0, node1)); + + std::set expected_parent1_children{child_p1n0, child_p1n0_p2n0, child_p1n0_p1n1}; + std::set expected_parent2_children{child_p2n1, child_p1n0_p2n0}; + + BOOST_CHECK(EqualTxns(expected_parent1_children, orphanage.GetChildrenFromSamePeer(parent1, node1))); + BOOST_CHECK(EqualTxns(expected_parent2_children, orphanage.GetChildrenFromSamePeer(parent2, node1))); + + BOOST_CHECK(EqualTxns(expected_parent1_children, orphanage.GetChildrenFromDifferentPeer(parent1, node2))); + BOOST_CHECK(EqualTxns(expected_parent2_children, orphanage.GetChildrenFromDifferentPeer(parent2, node2))); + + // The peer must match + BOOST_CHECK(orphanage.GetChildrenFromSamePeer(parent1, node2).empty()); + BOOST_CHECK(orphanage.GetChildrenFromSamePeer(parent2, node2).empty()); + + // There shouldn't be any children of this tx in the orphanage + BOOST_CHECK(orphanage.GetChildrenFromSamePeer(child_p1n0_p2n0, node1).empty()); + BOOST_CHECK(orphanage.GetChildrenFromSamePeer(child_p1n0_p2n0, node2).empty()); + BOOST_CHECK(orphanage.GetChildrenFromDifferentPeer(child_p1n0_p2n0, node1).empty()); + BOOST_CHECK(orphanage.GetChildrenFromDifferentPeer(child_p1n0_p2n0, node2).empty()); + } + + // Orphans provided by node1 and node2 + { + TxOrphanage orphanage; + BOOST_CHECK(orphanage.AddTx(child_p1n0, node1)); + BOOST_CHECK(orphanage.AddTx(child_p2n1, node1)); + BOOST_CHECK(orphanage.AddTx(child_p1n0_p1n1, node2)); + BOOST_CHECK(orphanage.AddTx(child_p1n0_p2n0, node2)); + + // +----------------+---------------+----------------------------------+ + // | | sender=node1 | sender=node2 | + // +----------------+---------------+----------------------------------+ + // | spends parent1 | child_p1n0 | child_p1n0_p1n1, child_p1n0_p2n0 | + // | spends parent2 | child_p2n1 | child_p1n0_p2n0 | + // +----------------+---------------+----------------------------------+ + + // Children of parent1 from node1: + { + std::set expected_parent1_node1{child_p1n0}; + + BOOST_CHECK(EqualTxns(expected_parent1_node1, orphanage.GetChildrenFromSamePeer(parent1, node1))); + BOOST_CHECK(EqualTxns(expected_parent1_node1, orphanage.GetChildrenFromDifferentPeer(parent1, node2))); + } + + // Children of parent2 from node1: + { + std::set expected_parent2_node1{child_p2n1}; + + BOOST_CHECK(EqualTxns(expected_parent2_node1, orphanage.GetChildrenFromSamePeer(parent2, node1))); + BOOST_CHECK(EqualTxns(expected_parent2_node1, orphanage.GetChildrenFromDifferentPeer(parent2, node2))); + } + + // Children of parent1 from node2: + { + std::set expected_parent1_node2{child_p1n0_p1n1, child_p1n0_p2n0}; + + BOOST_CHECK(EqualTxns(expected_parent1_node2, orphanage.GetChildrenFromSamePeer(parent1, node2))); + BOOST_CHECK(EqualTxns(expected_parent1_node2, orphanage.GetChildrenFromDifferentPeer(parent1, node1))); + } + + // Children of parent2 from node2: + { + std::set expected_parent2_node2{child_p1n0_p2n0}; + + BOOST_CHECK(EqualTxns(expected_parent2_node2, orphanage.GetChildrenFromSamePeer(parent2, node2))); + BOOST_CHECK(EqualTxns(expected_parent2_node2, orphanage.GetChildrenFromDifferentPeer(parent2, node1))); + } + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index b948ea8acb..8112f5f685 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -8,9 +8,12 @@ #include #include #include