forked from bitcoin/bitcoin
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtxorphanage.cpp
More file actions
722 lines (617 loc) · 34.4 KB
/
txorphanage.cpp
File metadata and controls
722 lines (617 loc) · 34.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
// Copyright (c) 2021-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <node/txorphanage.h>
#include <consensus/validation.h>
#include <logging.h>
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <util/feefrac.h>
#include <util/time.h>
#include <util/hasher.h>
#include <boost/multi_index/indexed_by.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/tag.hpp>
#include <boost/multi_index_container.hpp>
#include <cassert>
#include <cmath>
#include <unordered_map>
namespace node {
/** Minimum NodeId for lower_bound lookups (in practice, NodeIds start at 0). */
static constexpr NodeId MIN_PEER{std::numeric_limits<NodeId>::min()};
/** Maximum NodeId for upper_bound lookups. */
static constexpr NodeId MAX_PEER{std::numeric_limits<NodeId>::max()};
class TxOrphanageImpl final : public TxOrphanage {
// Type alias for sequence numbers
using SequenceNumber = uint64_t;
/** Global sequence number, increment each time an announcement is added. */
SequenceNumber m_current_sequence{0};
/** One orphan announcement. Each announcement (i.e. combination of wtxid, nodeid) is unique. There may be multiple
* announcements for the same tx, and multiple transactions with the same txid but different wtxid are possible. */
struct Announcement
{
const CTransactionRef m_tx;
/** Which peer announced this tx */
const NodeId m_announcer;
/** What order this transaction entered the orphanage. */
const SequenceNumber m_entry_sequence;
/** Whether this tx should be reconsidered. Always starts out false. A peer's workset is the collection of all
* announcements with m_reconsider=true. */
bool m_reconsider{false};
Announcement(const CTransactionRef& tx, NodeId peer, SequenceNumber seq) :
m_tx{tx}, m_announcer{peer}, m_entry_sequence{seq}
{ }
/** Get an approximation for "memory usage". The total memory is a function of the memory used to store the
* transaction itself, each entry in m_orphans, and each entry in m_outpoint_to_orphan_it. We use weight because
* it is often higher than the actual memory usage of the tranaction. This metric conveniently encompasses
* m_outpoint_to_orphan_it usage since input data does not get the witness discount, and makes it easier to
* reason about each peer's limits using well-understood transaction attributes. */
TxOrphanage::Usage GetMemUsage() const {
return GetTransactionWeight(*m_tx);
}
/** Get an approximation of how much this transaction contributes to latency in EraseForBlock and EraseForPeer.
* The computation time is a function of the number of entries in m_orphans (thus 1 per announcement) and the
* number of entries in m_outpoint_to_orphan_it (thus an additional 1 for every 10 inputs). Transactions with a
* small number of inputs (9 or fewer) are counted as 1 to make it easier to reason about each peer's limits in
* terms of "normal" transactions. */
TxOrphanage::Count GetLatencyScore() const {
return 1 + (m_tx->vin.size() / 10);
}
};
// Index by wtxid, then peer
struct ByWtxid {};
using ByWtxidView = std::tuple<Wtxid, NodeId>;
struct WtxidExtractor
{
using result_type = ByWtxidView;
result_type operator()(const Announcement& ann) const
{
return ByWtxidView{ann.m_tx->GetWitnessHash(), ann.m_announcer};
}
};
// Sort by peer, then by whether it is ready to reconsider, then by recency.
struct ByPeer {};
using ByPeerView = std::tuple<NodeId, bool, SequenceNumber>;
struct ByPeerViewExtractor {
using result_type = ByPeerView;
result_type operator()(const Announcement& ann) const
{
return ByPeerView{ann.m_announcer, ann.m_reconsider, ann.m_entry_sequence};
}
};
struct OrphanIndices final : boost::multi_index::indexed_by<
boost::multi_index::ordered_unique<boost::multi_index::tag<ByWtxid>, WtxidExtractor>,
boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>
>{};
using AnnouncementMap = boost::multi_index::multi_index_container<Announcement, OrphanIndices>;
template<typename Tag>
using Iter = typename AnnouncementMap::index<Tag>::type::iterator;
AnnouncementMap m_orphans;
const TxOrphanage::Count m_max_global_latency_score{DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE};
const TxOrphanage::Usage m_reserved_usage_per_peer{DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER};
/** Number of unique orphans by wtxid. Less than or equal to the number of entries in m_orphans. */
TxOrphanage::Count m_unique_orphans{0};
/** Memory used by orphans (see Announcement::GetMemUsage()), deduplicated by wtxid. */
TxOrphanage::Usage m_unique_orphan_usage{0};
/** The sum of each unique transaction's latency scores including the inputs only (see Announcement::GetLatencyScore
* but subtract 1 for the announcements themselves). The total orphanage's latency score is given by this value +
* the number of entries in m_orphans. */
TxOrphanage::Count m_unique_rounded_input_scores{0};
/** Index from the parents' outputs to wtxids that exist in m_orphans. Used to find children of
* a transaction that can be reconsidered and to remove entries that conflict with a block.*/
std::unordered_map<COutPoint, std::set<Wtxid>, SaltedOutpointHasher> m_outpoint_to_orphan_it;
struct PeerDoSInfo {
TxOrphanage::Usage m_total_usage{0};
TxOrphanage::Count m_count_announcements{0};
TxOrphanage::Count m_total_latency_score{0};
bool operator==(const PeerDoSInfo& other) const
{
return m_total_usage == other.m_total_usage &&
m_count_announcements == other.m_count_announcements &&
m_total_latency_score == other.m_total_latency_score;
}
void Add(const Announcement& ann)
{
m_total_usage += ann.GetMemUsage();
m_total_latency_score += ann.GetLatencyScore();
m_count_announcements += 1;
}
bool Subtract(const Announcement& ann)
{
Assume(m_total_usage >= ann.GetMemUsage());
Assume(m_total_latency_score >= ann.GetLatencyScore());
Assume(m_count_announcements >= 1);
m_total_usage -= ann.GetMemUsage();
m_total_latency_score -= ann.GetLatencyScore();
m_count_announcements -= 1;
return m_count_announcements == 0;
}
/** There are 2 DoS scores:
* - Latency score (ratio of total latency score / max allowed latency score)
* - Memory score (ratio of total memory usage / max allowed memory usage).
*
* If the peer is using more than the allowed for either resource, its DoS score is > 1.
* A peer having a DoS score > 1 does not necessarily mean that something is wrong, since we
* do not trim unless the orphanage exceeds global limits, but it means that this peer will
* be selected for trimming sooner. If the global latency score or global memory usage
* limits are exceeded, it must be that there is a peer whose DoS score > 1. */
FeeFrac GetDosScore(TxOrphanage::Count max_peer_latency_score, TxOrphanage::Usage max_peer_bytes) const
{
assert(max_peer_latency_score > 0);
assert(max_peer_bytes > 0);
const FeeFrac cpu_score(m_total_latency_score, max_peer_latency_score);
const FeeFrac mem_score(m_total_usage, max_peer_bytes);
return std::max<FeeFrac>(cpu_score, mem_score);
}
};
/** Store per-peer statistics. Used to determine each peer's DoS score. The size of this map is used to determine the
* number of peers and thus global {latency score, memory} limits. */
std::unordered_map<NodeId, PeerDoSInfo> m_peer_orphanage_info;
/** Erase from m_orphans and update m_peer_orphanage_info. */
template<typename Tag>
void Erase(Iter<Tag> it);
/** Check if there is exactly one announcement with the same wtxid as it. */
bool IsUnique(Iter<ByWtxid> it) const;
/** Check if the orphanage needs trimming. */
bool NeedsTrim() const;
public:
TxOrphanageImpl() = default;
TxOrphanageImpl(Count max_global_ann, Usage reserved_peer_usage) :
m_max_global_latency_score{max_global_ann},
m_reserved_usage_per_peer{reserved_peer_usage}
{}
~TxOrphanageImpl() noexcept override = default;
TxOrphanage::Count CountAnnouncements() const override;
TxOrphanage::Count CountUniqueOrphans() const override;
TxOrphanage::Count AnnouncementsFromPeer(NodeId peer) const override;
TxOrphanage::Count LatencyScoreFromPeer(NodeId peer) const override;
TxOrphanage::Usage UsageByPeer(NodeId peer) const override;
TxOrphanage::Count MaxGlobalLatencyScore() const override;
TxOrphanage::Count TotalLatencyScore() const override;
TxOrphanage::Usage ReservedPeerUsage() const override;
/** Maximum allowed (deduplicated) latency score for all tranactions (see Announcement::GetLatencyScore()). Dynamic
* based on number of peers. Each peer has an equal amount, but the global maximum latency score stays constant. The
* number of peers times MaxPeerLatencyScore() (rounded) adds up to MaxGlobalLatencyScore(). As long as every peer's
* m_total_latency_score / MaxPeerLatencyScore() < 1, MaxGlobalLatencyScore() is not exceeded. */
TxOrphanage::Count MaxPeerLatencyScore() const override;
/** Maximum allowed (deduplicated) memory usage for all transactions (see Announcement::GetMemUsage()). Dynamic based
* on number of peers. More peers means more allowed memory usage. The number of peers times ReservedPeerUsage()
* adds up to MaxGlobalUsage(). As long as every peer's m_total_usage / ReservedPeerUsage() < 1, MaxGlobalUsage() is
* not exceeded. */
TxOrphanage::Usage MaxGlobalUsage() const override;
bool AddTx(const CTransactionRef& tx, NodeId peer) override;
bool AddAnnouncer(const Wtxid& wtxid, NodeId peer) override;
CTransactionRef GetTx(const Wtxid& wtxid) const override;
bool HaveTx(const Wtxid& wtxid) const override;
bool HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const override;
CTransactionRef GetTxToReconsider(NodeId peer) override;
bool EraseTx(const Wtxid& wtxid) override;
void EraseForPeer(NodeId peer) override;
void EraseForBlock(const CBlock& block) override;
void LimitOrphans() override;
std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override;
bool HaveTxToReconsider(NodeId peer) override;
std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override;
size_t Size() const override { return m_unique_orphans; }
std::vector<OrphanTxBase> GetOrphanTransactions() const override;
TxOrphanage::Usage TotalOrphanUsage() const override;
void SanityCheck() const override;
};
template<typename Tag>
void TxOrphanageImpl::Erase(Iter<Tag> it)
{
// Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
// This means peers that are not storing any orphans do not have an entry in
// m_peer_orphanage_info (they can be added back later if they announce another orphan) and
// ensures disconnected peers are not tracked forever.
auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
Assume(peer_it != m_peer_orphanage_info.end());
if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
if (IsUnique(m_orphans.project<ByWtxid>(it))) {
m_unique_orphans -= 1;
m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
m_unique_orphan_usage -= it->GetMemUsage();
// Remove references in m_outpoint_to_orphan_it
const auto& wtxid{it->m_tx->GetWitnessHash()};
for (const auto& input : it->m_tx->vin) {
auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
if (it_prev != m_outpoint_to_orphan_it.end()) {
it_prev->second.erase(wtxid);
// Clean up keys if they point to an empty set.
if (it_prev->second.empty()) {
m_outpoint_to_orphan_it.erase(it_prev);
}
}
}
}
m_orphans.get<Tag>().erase(it);
}
bool TxOrphanageImpl::IsUnique(Iter<ByWtxid> it) const
{
// Iterators ByWtxid are sorted by wtxid, so check if neighboring elements have the same wtxid.
auto& index = m_orphans.get<ByWtxid>();
if (it == index.end()) return false;
if (std::next(it) != index.end() && std::next(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
if (it != index.begin() && std::prev(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
return true;
}
TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const
{
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_usage;
}
TxOrphanage::Count TxOrphanageImpl::CountAnnouncements() const { return m_orphans.size(); }
TxOrphanage::Usage TxOrphanageImpl::TotalOrphanUsage() const { return m_unique_orphan_usage; }
TxOrphanage::Count TxOrphanageImpl::CountUniqueOrphans() const { return m_unique_orphans; }
TxOrphanage::Count TxOrphanageImpl::AnnouncementsFromPeer(NodeId peer) const {
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_count_announcements;
}
TxOrphanage::Count TxOrphanageImpl::LatencyScoreFromPeer(NodeId peer) const {
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_latency_score;
}
bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer)
{
const auto& wtxid{tx->GetWitnessHash()};
const auto& txid{tx->GetHash()};
// Ignore transactions above max standard size to avoid a send-big-orphans memory exhaustion attack.
TxOrphanage::Usage sz = GetTransactionWeight(*tx);
if (sz > MAX_STANDARD_TX_WEIGHT) {
LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, txid.ToString(), wtxid.ToString());
return false;
}
// We will return false if the tx already exists under a different peer.
const bool brand_new{!HaveTx(wtxid)};
auto [iter, inserted] = m_orphans.get<ByWtxid>().emplace(tx, peer, m_current_sequence);
// If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
if (!inserted) return false;
++m_current_sequence;
auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
peer_info.Add(*iter);
// Add links in m_outpoint_to_orphan_it
if (brand_new) {
for (const auto& input : tx->vin) {
auto& wtxids_for_prevout = m_outpoint_to_orphan_it.try_emplace(input.prevout).first->second;
wtxids_for_prevout.emplace(wtxid);
}
m_unique_orphans += 1;
m_unique_orphan_usage += iter->GetMemUsage();
m_unique_rounded_input_scores += iter->GetLatencyScore() - 1;
LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n",
txid.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_it.size());
Assume(IsUnique(iter));
} else {
LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
}
return brand_new;
}
bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer)
{
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
// Do nothing if this transaction isn't already present. We can't create an entry if we don't
// have the tx data.
if (it == index_by_wtxid.end()) return false;
if (it->m_tx->GetWitnessHash() != wtxid) return false;
// Add another announcement, copying the CTransactionRef from one that already exists.
const auto& ptx = it->m_tx;
auto [iter, inserted] = index_by_wtxid.emplace(ptx, peer, m_current_sequence);
// If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
if (!inserted) return false;
++m_current_sequence;
auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
peer_info.Add(*iter);
const auto& txid = ptx->GetHash();
LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
return true;
}
bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
{
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (it == index_by_wtxid.end() || it->m_tx->GetWitnessHash() != wtxid) return false;
auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
unsigned int num_ann{0};
const auto txid = it->m_tx->GetHash();
while (it != it_end) {
Assume(it->m_tx->GetWitnessHash() == wtxid);
Erase<ByWtxid>(it++);
num_ann += 1;
}
LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann);
return true;
}
/** Erase all entries by this peer. */
void TxOrphanageImpl::EraseForPeer(NodeId peer)
{
auto& index_by_peer = m_orphans.get<ByPeer>();
auto it = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
if (it == index_by_peer.end() || it->m_announcer != peer) return;
unsigned int num_ann{0};
while (it != index_by_peer.end() && it->m_announcer == peer) {
// Delete item, cleaning up m_outpoint_to_orphan_it iff this entry is unique by wtxid.
Erase<ByPeer>(it++);
num_ann += 1;
}
Assume(!m_peer_orphanage_info.contains(peer));
if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer);
}
/** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest
* announcement (sorting non-reconsiderable orphans first, to give reconsiderable orphans a greater chance of being
* processed). Does nothing if no global limits are exceeded. This eviction strategy effectively "reserves" an
* amount of announcements and space for each peer. The reserved amount is protected from eviction even if there
* are peers spamming the orphanage.
*/
void TxOrphanageImpl::LimitOrphans()
{
if (!NeedsTrim()) return;
const auto original_unique_txns{CountUniqueOrphans()};
// Even though it's possible for MaxPeerLatencyScore to increase within this call to LimitOrphans
// (e.g. if a peer's orphans are removed entirely, changing the number of peers), use consistent limits throughout.
const auto max_ann{MaxPeerLatencyScore()};
const auto max_mem{ReservedPeerUsage()};
// We have exceeded the global limit(s). Now, identify who is using too much and evict their orphans.
// Create a heap of pairs (NodeId, DoS score), sorted by descending DoS score.
std::vector<std::pair<NodeId, FeeFrac>> heap_peer_dos;
heap_peer_dos.reserve(m_peer_orphanage_info.size());
for (const auto& [nodeid, entry] : m_peer_orphanage_info) {
// Performance optimization: only consider peers with a DoS score > 1.
const auto dos_score = entry.GetDosScore(max_ann, max_mem);
if (dos_score >> FeeFrac{1, 1}) {
heap_peer_dos.emplace_back(nodeid, dos_score);
}
}
static constexpr auto compare_score = [](const auto& left, const auto& right) {
if (left.second != right.second) return left.second < right.second;
// Tiebreak by considering the more recent peer (higher NodeId) to be worse.
return left.first < right.first;
};
std::make_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
unsigned int num_erased{0};
// This outer loop finds the peer with the highest DoS score, which is a fraction of {usage, announcements} used
// over the respective allowances. We continue until the orphanage is within global limits. That means some peers
// might still have a DoS score > 1 at the end.
// Note: if ratios are the same, FeeFrac tiebreaks by denominator. In practice, since the CPU denominator (number of
// announcements) is always lower, this means that a peer with only high number of announcements will be targeted
// before a peer using a lot of memory, even if they have the same ratios.
do {
Assume(!heap_peer_dos.empty());
// This is a max-heap, so the worst peer is at the front. pop_heap()
// moves it to the back, and the next worst peer is moved to the front.
std::pop_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
const auto [worst_peer, dos_score] = std::move(heap_peer_dos.back());
heap_peer_dos.pop_back();
// If needs trim, then at least one peer has a DoS score higher than 1.
Assume(dos_score >> (FeeFrac{1, 1}));
auto it_worst_peer = m_peer_orphanage_info.find(worst_peer);
// This inner loop trims until this peer is no longer the DoSiest one or has a score within 1. The score 1 is
// just a conservative fallback: once the last peer goes below ratio 1, NeedsTrim() will return false anyway.
// We evict the oldest announcement(s) from this peer, sorting non-reconsiderable before reconsiderable.
// The number of inner loop iterations is bounded by the total number of announcements.
const auto& dos_threshold = heap_peer_dos.empty() ? FeeFrac{1, 1} : heap_peer_dos.front().second;
auto it_ann = m_orphans.get<ByPeer>().lower_bound(ByPeerView{worst_peer, false, 0});
while (NeedsTrim()) {
if (!Assume(it_ann->m_announcer == worst_peer)) break;
if (!Assume(it_ann != m_orphans.get<ByPeer>().end())) break;
Erase<ByPeer>(it_ann++);
num_erased += 1;
// If we erased the last orphan from this peer, it_worst_peer will be invalidated.
it_worst_peer = m_peer_orphanage_info.find(worst_peer);
if (it_worst_peer == m_peer_orphanage_info.end() || it_worst_peer->second.GetDosScore(max_ann, max_mem) <= dos_threshold) break;
}
if (!NeedsTrim()) break;
// Unless this peer is empty, put it back in the heap so we continue to consider evicting its orphans.
// We may select this peer for evictions again if there are multiple DoSy peers.
if (it_worst_peer != m_peer_orphanage_info.end() && it_worst_peer->second.m_count_announcements > 0) {
heap_peer_dos.emplace_back(worst_peer, it_worst_peer->second.GetDosScore(max_ann, max_mem));
std::push_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
}
} while (true);
const auto remaining_unique_orphans{CountUniqueOrphans()};
LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx (%u announcements)\n", original_unique_txns - remaining_unique_orphans, num_erased);
}
std::vector<std::pair<Wtxid, NodeId>> TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng)
{
std::vector<std::pair<Wtxid, NodeId>> ret;
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
for (unsigned int i = 0; i < tx.vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& wtxid : it_by_prev->second) {
// Belt and suspenders, each entry in m_outpoint_to_orphan_it should always have at least 1 announcement.
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (!Assume(it != index_by_wtxid.end())) continue;
// Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing
// inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us
// from processing the orphan by disconnecting.
auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
const auto num_announcers{std::distance(it, it_end)};
if (!Assume(num_announcers > 0)) continue;
std::advance(it, rng.randrange(num_announcers));
if (!Assume(it->m_tx->GetWitnessHash() == wtxid)) break;
// Mark this orphan as ready to be reconsidered.
static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = true; };
if (!it->m_reconsider) {
index_by_wtxid.modify(it, mark_reconsidered_modifier);
ret.emplace_back(wtxid, it->m_announcer);
}
LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n",
it->m_tx->GetHash().ToString(), it->m_tx->GetWitnessHash().ToString(), it->m_announcer);
}
}
}
return ret;
}
bool TxOrphanageImpl::HaveTx(const Wtxid& wtxid) const
{
auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
return it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid;
}
CTransactionRef TxOrphanageImpl::GetTx(const Wtxid& wtxid) const
{
auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid) return it_lower->m_tx;
return nullptr;
}
bool TxOrphanageImpl::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const
{
return m_orphans.get<ByWtxid>().count(ByWtxidView{wtxid, peer}) > 0;
}
/** If there is a tx that can be reconsidered, return it and set it back to
* non-reconsiderable. Otherwise, return a nullptr. */
CTransactionRef TxOrphanageImpl::GetTxToReconsider(NodeId peer)
{
auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
if (it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider) {
// Flip m_reconsider. Even if this transaction stays in orphanage, it shouldn't be
// reconsidered again until there is a new reason to do so.
static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = false; };
m_orphans.get<ByPeer>().modify(it, mark_reconsidered_modifier);
return it->m_tx;
}
return nullptr;
}
/** Return whether there is a tx that can be reconsidered. */
bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer)
{
auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider;
}
void TxOrphanageImpl::EraseForBlock(const CBlock& block)
{
if (m_orphans.empty()) return;
std::set<Wtxid> wtxids_to_erase;
for (const CTransactionRef& ptx : block.vtx) {
const CTransaction& block_tx = *ptx;
// Which orphan pool entries must we evict?
for (const auto& input : block_tx.vin) {
auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
if (it_prev != m_outpoint_to_orphan_it.end()) {
// Copy all wtxids to wtxids_to_erase.
std::copy(it_prev->second.cbegin(), it_prev->second.cend(), std::inserter(wtxids_to_erase, wtxids_to_erase.end()));
}
}
}
unsigned int num_erased{0};
for (const auto& wtxid : wtxids_to_erase) {
num_erased += EraseTx(wtxid) ? 1 : 0;
}
if (num_erased != 0) {
LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased);
}
Assume(wtxids_to_erase.size() == num_erased);
}
/** Get all children that spend from this tx and were received from nodeid. Sorted from most
* recent to least recent. */
std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId peer) const
{
std::vector<CTransactionRef> children_found;
const auto& parent_txid{parent->GetHash()};
// Iterate through all orphans from this peer, in reverse order, so that more recent
// transactions are added first. Doing so helps avoid work when one of the orphans replaced
// an earlier one. Since we require the NodeId to match, one peer's announcement order does
// not bias how we process other peer's orphans.
auto& index_by_peer = m_orphans.get<ByPeer>();
auto it_upper = index_by_peer.upper_bound(ByPeerView{peer, true, std::numeric_limits<uint64_t>::max()});
auto it_lower = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
while (it_upper != it_lower) {
--it_upper;
if (!Assume(it_upper->m_announcer == peer)) break;
// Check if this tx spends from parent.
for (const auto& input : it_upper->m_tx->vin) {
if (input.prevout.hash == parent_txid) {
children_found.emplace_back(it_upper->m_tx);
break;
}
}
}
return children_found;
}
std::vector<TxOrphanage::OrphanTxBase> TxOrphanageImpl::GetOrphanTransactions() const
{
std::vector<TxOrphanage::OrphanTxBase> result;
result.reserve(m_unique_orphans);
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.begin();
std::set<NodeId> this_orphan_announcers;
while (it != index_by_wtxid.end()) {
this_orphan_announcers.insert(it->m_announcer);
// If this is the last entry, or the next entry has a different wtxid, build a OrphanTxBase.
if (std::next(it) == index_by_wtxid.end() || std::next(it)->m_tx->GetWitnessHash() != it->m_tx->GetWitnessHash()) {
result.emplace_back(it->m_tx, std::move(this_orphan_announcers));
this_orphan_announcers.clear();
}
++it;
}
Assume(m_unique_orphans == result.size());
return result;
}
void TxOrphanageImpl::SanityCheck() const
{
std::unordered_map<NodeId, PeerDoSInfo> reconstructed_peer_info;
std::map<Wtxid, std::pair<TxOrphanage::Usage, TxOrphanage::Count>> unique_wtxids_to_scores;
std::set<COutPoint> all_outpoints;
for (auto it = m_orphans.begin(); it != m_orphans.end(); ++it) {
for (const auto& input : it->m_tx->vin) {
all_outpoints.insert(input.prevout);
}
unique_wtxids_to_scores.emplace(it->m_tx->GetWitnessHash(), std::make_pair(it->GetMemUsage(), it->GetLatencyScore() - 1));
auto& peer_info = reconstructed_peer_info[it->m_announcer];
peer_info.m_total_usage += it->GetMemUsage();
peer_info.m_count_announcements += 1;
peer_info.m_total_latency_score += it->GetLatencyScore();
}
assert(reconstructed_peer_info.size() == m_peer_orphanage_info.size());
// All outpoints exist in m_outpoint_to_orphan_it, all keys in m_outpoint_to_orphan_it correspond to some
// orphan, and all wtxids referenced in m_outpoint_to_orphan_it are also in m_orphans.
// This ensures m_outpoint_to_orphan_it is cleaned up.
assert(all_outpoints.size() == m_outpoint_to_orphan_it.size());
for (const auto& [outpoint, wtxid_set] : m_outpoint_to_orphan_it) {
assert(all_outpoints.contains(outpoint));
for (const auto& wtxid : wtxid_set) {
assert(unique_wtxids_to_scores.contains(wtxid));
}
}
// Cached m_unique_orphans value is correct.
assert(m_orphans.size() >= m_unique_orphans);
assert(m_orphans.size() <= m_peer_orphanage_info.size() * m_unique_orphans);
assert(unique_wtxids_to_scores.size() == m_unique_orphans);
const auto calculated_dedup_usage = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.first; });
assert(calculated_dedup_usage == m_unique_orphan_usage);
// Global usage is deduplicated, should be less than or equal to the sum of all per-peer usages.
const auto summed_peer_usage = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.m_total_usage; });
assert(summed_peer_usage >= m_unique_orphan_usage);
// Cached m_unique_rounded_input_scores value is correct.
const auto calculated_total_latency_score = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.second; });
assert(calculated_total_latency_score == m_unique_rounded_input_scores);
// Global latency score is deduplicated, should be less than or equal to the sum of all per-peer latency scores.
const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; });
assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size());
}
TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; }
TxOrphanage::Count TxOrphanageImpl::TotalLatencyScore() const { return m_unique_rounded_input_scores + m_orphans.size(); }
TxOrphanage::Usage TxOrphanageImpl::ReservedPeerUsage() const { return m_reserved_usage_per_peer; }
TxOrphanage::Count TxOrphanageImpl::MaxPeerLatencyScore() const { return m_max_global_latency_score / std::max<unsigned int>(m_peer_orphanage_info.size(), 1); }
TxOrphanage::Usage TxOrphanageImpl::MaxGlobalUsage() const { return m_reserved_usage_per_peer * std::max<int64_t>(m_peer_orphanage_info.size(), 1); }
bool TxOrphanageImpl::NeedsTrim() const
{
return TotalLatencyScore() > MaxGlobalLatencyScore() || TotalOrphanUsage() > MaxGlobalUsage();
}
std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept
{
return std::make_unique<TxOrphanageImpl>();
}
std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_ann, TxOrphanage::Usage reserved_peer_usage) noexcept
{
return std::make_unique<TxOrphanageImpl>(max_global_ann, reserved_peer_usage);
}
} // namespace node